diff --git "a/6077.jsonl" "b/6077.jsonl" new file mode 100644--- /dev/null +++ "b/6077.jsonl" @@ -0,0 +1,443 @@ +{"seq_id":"27087238981","text":"class Solution:\n def minOperations(self, nums: List[int], x: int) -> int:\n total, left, rslt = sum(nums), 0, len(nums)+1\n if total < x:\n return -1\n for right, num in enumerate(nums):\n total -= num\n while total < x:\n total += nums[left]\n left += 1\n if total == x:\n rslt = min(rslt, len(nums)-right-1+left)\n return rslt if rslt < len(nums)+1 else -1\n","repo_name":"Mela2014/lc_punch","sub_path":"lc1658_twopointer.py","file_name":"lc1658_twopointer.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44668748110","text":"import argparse\nimport os\nimport logging\nimport time\nimport pickle\nfrom tqdm import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\nfrom pytorch_lightning import seed_everything\n\nfrom transformers import AdamW, T5ForConditionalGeneration, T5Tokenizer\nfrom transformers import get_linear_schedule_with_warmup\n\nfrom data_utils import ABSADataset\nfrom data_utils import write_results_to_log, read_line_examples_from_file\nfrom eval_utils import compute_scores\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef init_args():\n parser = argparse.ArgumentParser()\n # basic settings\n parser.add_argument(\"--task\", default='uabsa', type=str, required=True,\n help=\"The name of the task, selected from: [uabsa, aste, tasd, aope]\")\n parser.add_argument(\"--dataset\", default='rest14', type=str, required=True,\n help=\"The name of the dataset, selected from: [laptop14, rest14, rest15, rest16]\")\n parser.add_argument(\"--model_name_or_path\", default='t5-base', type=str,\n help=\"Path to pre-trained model or shortcut name\")\n parser.add_argument(\"--paradigm\", default='annotation', type=str, required=True,\n help=\"The way to construct target sentence, selected from: [annotation, extraction]\")\n parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the dev/test set.\")\n parser.add_argument(\"--do_direct_eval\", action='store_true', \n help=\"Whether to run direct eval on the dev/test set.\")\n\n # Other parameters\n parser.add_argument(\"--max_seq_length\", default=128, type=int)\n parser.add_argument(\"--n_gpu\", default=0)\n parser.add_argument(\"--train_batch_size\", default=16, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--eval_batch_size\", default=16, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=3e-4, type=float)\n parser.add_argument(\"--num_train_epochs\", default=20, type=int, \n help=\"Total number of training epochs to perform.\")\n parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\")\n\n # training details\n parser.add_argument(\"--weight_decay\", default=0.0, type=float)\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float)\n parser.add_argument(\"--warmup_steps\", default=0.0, type=float)\n\n args = parser.parse_args()\n\n # set up output dir which looks like './aste/rest14/extraction/'\n if not os.path.exists('./outputs'):\n os.mkdir('./outputs')\n\n task_dir = f\"./outputs/{args.task}\"\n if not os.path.exists(task_dir):\n os.mkdir(task_dir)\n\n task_dataset_dir = f\"{task_dir}/{args.dataset}\"\n if not os.path.exists(task_dataset_dir):\n os.mkdir(task_dataset_dir)\n\n output_dir = f\"{task_dataset_dir}/{args.paradigm}\"\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n args.output_dir = output_dir\n\n return args\n\n\ndef get_dataset(tokenizer, type_path, args):\n return ABSADataset(tokenizer=tokenizer, data_dir=args.dataset, data_type=type_path, \n paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)\n\n\nclass T5FineTuner(pl.LightningModule):\n def __init__(self, hparams):\n super(T5FineTuner, self).__init__()\n self.hparams = hparams\n\n self.model = T5ForConditionalGeneration.from_pretrained(hparams.model_name_or_path)\n self.tokenizer = T5Tokenizer.from_pretrained(hparams.model_name_or_path)\n\n def is_logger(self):\n return True\n\n def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, \n decoder_attention_mask=None, labels=None):\n return self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n labels=labels,\n )\n\n def _step(self, batch):\n lm_labels = batch[\"target_ids\"]\n lm_labels[lm_labels[:, :] == self.tokenizer.pad_token_id] = -100\n\n outputs = self(\n input_ids=batch[\"source_ids\"],\n attention_mask=batch[\"source_mask\"],\n labels=lm_labels,\n decoder_attention_mask=batch['target_mask']\n )\n\n loss = outputs[0]\n return loss\n\n def training_step(self, batch, batch_idx):\n loss = self._step(batch)\n\n tensorboard_logs = {\"train_loss\": loss}\n return {\"loss\": loss, \"log\": tensorboard_logs}\n\n def training_epoch_end(self, outputs):\n avg_train_loss = torch.stack([x[\"loss\"] for x in outputs]).mean()\n tensorboard_logs = {\"avg_train_loss\": avg_train_loss}\n return {\"avg_train_loss\": avg_train_loss, \"log\": tensorboard_logs, 'progress_bar': tensorboard_logs}\n\n def validation_step(self, batch, batch_idx):\n loss = self._step(batch)\n return {\"val_loss\": loss}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n tensorboard_logs = {\"val_loss\": avg_loss}\n return {\"avg_val_loss\": avg_loss, \"log\": tensorboard_logs, 'progress_bar': tensorboard_logs}\n\n def configure_optimizers(self):\n '''Prepare optimizer and schedule (linear warmup and decay)'''\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n self.opt = optimizer\n return [optimizer]\n\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):\n if self.trainer.use_tpu:\n xm.optimizer_step(optimizer)\n else:\n optimizer.step()\n optimizer.zero_grad()\n self.lr_scheduler.step()\n\n def get_tqdm_dict(self):\n tqdm_dict = {\"loss\": \"{:.4f}\".format(self.trainer.avg_loss), \"lr\": self.lr_scheduler.get_last_lr()[-1]}\n return tqdm_dict\n\n def train_dataloader(self):\n train_dataset = get_dataset(tokenizer=self.tokenizer, type_path=\"train\", args=self.hparams)\n dataloader = DataLoader(train_dataset, batch_size=self.hparams.train_batch_size, drop_last=True, shuffle=True, num_workers=4)\n t_total = (\n (len(dataloader.dataset) // (self.hparams.train_batch_size * max(1, len(self.hparams.n_gpu))))\n // self.hparams.gradient_accumulation_steps\n * float(self.hparams.num_train_epochs)\n )\n scheduler = get_linear_schedule_with_warmup(\n self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total\n )\n self.lr_scheduler = scheduler\n return dataloader\n\n def val_dataloader(self):\n val_dataset = get_dataset(tokenizer=self.tokenizer, type_path=\"dev\", args=self.hparams)\n return DataLoader(val_dataset, batch_size=self.hparams.eval_batch_size, num_workers=4)\n\n\nclass LoggingCallback(pl.Callback):\n def on_validation_end(self, trainer, pl_module):\n logger.info(\"***** Validation results *****\")\n if pl_module.is_logger():\n metrics = trainer.callback_metrics\n # Log results\n for key in sorted(metrics):\n if key not in [\"log\", \"progress_bar\"]:\n logger.info(\"{} = {}\\n\".format(key, str(metrics[key])))\n\n def on_test_end(self, trainer, pl_module):\n logger.info(\"***** Test results *****\")\n\n if pl_module.is_logger():\n metrics = trainer.callback_metrics\n\n # Log and save results to file\n output_test_results_file = os.path.join(pl_module.hparams.output_dir, \"test_results.txt\")\n with open(output_test_results_file, \"w\") as writer:\n for key in sorted(metrics):\n if key not in [\"log\", \"progress_bar\"]:\n logger.info(\"{} = {}\\n\".format(key, str(metrics[key])))\n writer.write(\"{} = {}\\n\".format(key, str(metrics[key])))\n\n\ndef evaluate(data_loader, model, paradigm, task, sents):\n \"\"\"\n Compute scores given the predictions and gold labels\n \"\"\"\n device = torch.device(f'cuda:{args.n_gpu}')\n model.model.to(device)\n \n model.model.eval()\n outputs, targets = [], []\n for batch in tqdm(data_loader):\n # need to push the data to device\n outs = model.model.generate(input_ids=batch['source_ids'].to(device), \n attention_mask=batch['source_mask'].to(device), \n max_length=128)\n\n dec = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]\n target = [tokenizer.decode(ids, skip_special_tokens=True) for ids in batch[\"target_ids\"]]\n\n outputs.extend(dec)\n targets.extend(target)\n\n raw_scores, fixed_scores, all_labels, all_preds, all_preds_fixed = compute_scores(outputs, targets, sents, paradigm, task)\n results = {'raw_scores': raw_scores, 'fixed_scores': fixed_scores, 'labels': all_labels,\n 'preds': all_preds, 'preds_fixed': all_preds_fixed}\n # pickle.dump(results, open(f\"{args.output_dir}/results-{args.task}-{args.dataset}-{args.paradigm}.pickle\", 'wb'))\n\n return raw_scores, fixed_scores\n\n\n# initialization\nargs = init_args()\nprint(\"\\n\", \"=\"*30, f\"NEW EXP: {args.task.upper()} on {args.dataset}\", \"=\"*30, \"\\n\")\n\nseed_everything(args.seed)\n\ntokenizer = T5Tokenizer.from_pretrained(args.model_name_or_path)\n\n# show one sample to check the sanity of the code and the expected output\nprint(f\"Here is an example (from dev set) under `{args.paradigm}` paradigm:\")\ndataset = ABSADataset(tokenizer=tokenizer, data_dir=args.dataset, data_type='dev', \n paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)\ndata_sample = dataset[2] # a random data sample\nprint('Input :', tokenizer.decode(data_sample['source_ids'], skip_special_tokens=True))\nprint('Output:', tokenizer.decode(data_sample['target_ids'], skip_special_tokens=True))\n\n\n# training process\nif args.do_train:\n print(\"\\n****** Conduct Training ******\")\n model = T5FineTuner(args)\n\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath=args.output_dir, prefix=\"ckt\", monitor='val_loss', mode='min', save_top_k=3\n )\n\n # prepare for trainer\n train_params = dict(\n default_root_dir=args.output_dir,\n accumulate_grad_batches=args.gradient_accumulation_steps,\n gpus=args.n_gpu,\n gradient_clip_val=1.0,\n #amp_level='O1',\n max_epochs=args.num_train_epochs,\n checkpoint_callback=checkpoint_callback,\n callbacks=[LoggingCallback()],\n )\n\n trainer = pl.Trainer(**train_params)\n trainer.fit(model)\n\n # save the final model\n # model.model.save_pretrained(args.output_dir)\n\n print(\"Finish training and saving the model!\")\n\n\nif args.do_eval:\n\n print(\"\\n****** Conduct Evaluating ******\")\n\n # model = T5FineTuner(args)\n dev_results, test_results = {}, {}\n best_f1, best_checkpoint, best_epoch = -999999.0, None, None\n all_checkpoints, all_epochs = [], []\n\n # retrieve all the saved checkpoints for model selection\n saved_model_dir = args.output_dir\n for f in os.listdir(saved_model_dir):\n file_name = os.path.join(saved_model_dir, f)\n if 'cktepoch' in file_name:\n all_checkpoints.append(file_name)\n\n # conduct some selection (or not)\n print(f\"We will perform validation on the following checkpoints: {all_checkpoints}\")\n\n # load dev and test datasets\n dev_dataset = ABSADataset(tokenizer, data_dir=args.dataset, data_type='dev',\n paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)\n dev_loader = DataLoader(dev_dataset, batch_size=32, num_workers=4)\n\n test_dataset = ABSADataset(tokenizer, data_dir=args.dataset, data_type='test', \n paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)\n test_loader = DataLoader(test_dataset, batch_size=32, num_workers=4)\n \n for checkpoint in all_checkpoints:\n epoch = checkpoint.split('=')[-1][:-5] if len(checkpoint) > 1 else \"\"\n # only perform evaluation at the specific epochs (\"15-19\")\n # eval_begin, eval_end = args.eval_begin_end.split('-')\n if 0 <= int(epoch) < 100:\n all_epochs.append(epoch)\n\n # reload the model and conduct inference\n print(f\"\\nLoad the trained model from {checkpoint}...\")\n model_ckpt = torch.load(checkpoint)\n model = T5FineTuner(model_ckpt['hyper_parameters'])\n model.load_state_dict(model_ckpt['state_dict'])\n \n dev_result = evaluate(dev_loader, model, args.paradigm, args.task)\n if dev_result['f1'] > best_f1:\n best_f1 = dev_result['f1']\n best_checkpoint = checkpoint\n best_epoch = epoch\n\n # add the global step to the name of these metrics for recording\n # 'f1' --> 'f1_1000'\n dev_result = dict((k + '_{}'.format(epoch), v) for k, v in dev_result.items())\n dev_results.update(dev_result)\n\n test_result = evaluate(test_loader, model, args.paradigm, args.task)\n test_result = dict((k + '_{}'.format(epoch), v) for k, v in test_result.items())\n test_results.update(test_result)\n\n # print test results over last few steps\n print(f\"\\n\\nThe best checkpoint is {best_checkpoint}\")\n best_step_metric = f\"f1_{best_epoch}\"\n print(f\"F1 scores on test set: {test_results[best_step_metric]:.4f}\")\n\n print(\"\\n* Results *: Dev / Test \\n\")\n metric_names = ['f1', 'precision', 'recall']\n for epoch in all_epochs:\n print(f\"Epoch-{epoch}:\")\n for name in metric_names:\n name_step = f'{name}_{epoch}'\n print(f\"{name:<10}: {dev_results[name_step]:.4f} / {test_results[name_step]:.4f}\", sep=' ')\n print()\n\n results_log_dir = './results_log'\n if not os.path.exists(results_log_dir):\n os.mkdir(results_log_dir)\n log_file_path = f\"{results_log_dir}/{args.task}-{args.dataset}.txt\"\n write_results_to_log(log_file_path, test_results[best_step_metric], args, dev_results, test_results, all_epochs)\n\n\n# evaluation process\nif args.do_direct_eval:\n print(\"\\n****** Conduct Evaluating with the last state ******\")\n\n # model = T5FineTuner(args)\n\n # print(\"Reload the model\")\n # model.model.from_pretrained(args.output_dir)\n\n sents, _ = read_line_examples_from_file(f'data/{args.task}/{args.dataset}/test.txt')\n\n print()\n test_dataset = ABSADataset(tokenizer, data_dir=args.dataset, data_type='test', \n paradigm=args.paradigm, task=args.task, max_len=args.max_seq_length)\n test_loader = DataLoader(test_dataset, batch_size=32, num_workers=4)\n # print(test_loader.device)\n raw_scores, fixed_scores = evaluate(test_loader, model, args.paradigm, args.task, sents)\n # print(scores)\n\n # write to file\n log_file_path = f\"results_log/{args.task}-{args.dataset}.txt\"\n local_time = time.asctime(time.localtime(time.time()))\n exp_settings = f\"{args.task} on {args.dataset} under {args.paradigm}; Train bs={args.train_batch_size}, num_epochs = {args.num_train_epochs}\"\n exp_results = f\"Raw F1 = {raw_scores['f1']:.4f}, Fixed F1 = {fixed_scores['f1']:.4f}\"\n log_str = f'============================================================\\n'\n log_str += f\"{local_time}\\n{exp_settings}\\n{exp_results}\\n\\n\"\n with open(log_file_path, \"a+\") as f:\n f.write(log_str)\n","repo_name":"IsakZhang/Generative-ABSA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16400,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"61"} +{"seq_id":"6593264289","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom openapi_server.models.base_model_ import Model\nfrom openapi_server import util\n\n\nclass Population(Model):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, province=None, population=None): # noqa: E501\n \"\"\"Population - a model defined in OpenAPI\n\n :param province: The province of this Population. # noqa: E501\n :type province: str\n :param population: The population of this Population. # noqa: E501\n :type population: int\n \"\"\"\n self.openapi_types = {\n 'province': str,\n 'population': int\n }\n\n self.attribute_map = {\n 'province': 'province',\n 'population': 'population'\n }\n\n self._province = province\n self._population = population\n\n @classmethod\n def from_dict(cls, dikt) -> 'Population':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The Population of this Population. # noqa: E501\n :rtype: Population\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def province(self):\n \"\"\"Gets the province of this Population.\n\n\n :return: The province of this Population.\n :rtype: str\n \"\"\"\n return self._province\n\n @province.setter\n def province(self, province):\n \"\"\"Sets the province of this Population.\n\n\n :param province: The province of this Population.\n :type province: str\n \"\"\"\n\n self._province = province\n\n @property\n def population(self):\n \"\"\"Gets the population of this Population.\n\n\n :return: The population of this Population.\n :rtype: int\n \"\"\"\n return self._population\n\n @population.setter\n def population(self, population):\n \"\"\"Sets the population of this Population.\n\n\n :param population: The population of this Population.\n :type population: int\n \"\"\"\n\n self._population = population\n","repo_name":"jeanyjean/locatitude-api","sub_path":"autogen/openapi_server/models/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39911154924","text":"from elasticsearch import Elasticsearch\nfrom flask import request, render_template, abort, jsonify, redirect, url_for, session\nfrom opencampus.server import app\nfrom opencampus.common import modulemanager\nfrom opencampus.common.models import Application, ApplicationEmbedded\n\n\n@modulemanager.campus_route('app', '/apps/search', menu='Apps')\ndef app_search():\n query = request.args.get('q', '')\n page = int(request.args.get('page', 1)) - 1\n size = 20\n results = []\n\n if query == '':\n results = [Application.objects(id=embedded.application_id).get() for embedded in ApplicationEmbedded.objects(\n campus_ids__in=[request.campus.id]).skip(page*size).limit(size)]\n else:\n es = Elasticsearch(hosts=app.config.get('ELASTICSEARCH_HOSTS'))\n\n search_body = {\n 'from': page*size,\n 'size': size,\n 'sort': ['_score'],\n 'query': {\n 'bool': {\n 'must': [\n {\n 'term': {\n 'embedded_app.campus_ids': str(request.campus.id)\n }\n },\n {\n 'query_string': {\n \"default_field\": \"_all\",\n \"query\": query\n }\n }\n ]\n }\n }\n }\n\n res = es.search(index='embedded_app_list', body=search_body)\n\n for result_app in res.get('hits').get('hits'):\n results.append({\n 'id': result_app.get('_source').get('app_id'),\n 'name': result_app.get('_source').get('name'),\n 'description': result_app.get('_source').get('description'),\n })\n\n if page > 1 and len(results) < 1:\n return abort(404)\n\n return render_template('module/app/search.html', results=results, next_page=page+2, query=query)\n\n\n@modulemanager.campus_route('app', '/apps/')\ndef app_run(app_id):\n try:\n embeded = ApplicationEmbedded.objects(application_id=app_id).get()\n except ApplicationEmbedded.DostNotFound:\n return abort(404)\n\n return render_template('module/app/run.html', embeded=embeded)\n","repo_name":"shlee322/opencampus","sub_path":"opencampus/module/app/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"39076587934","text":"# capture.py\nimport numpy as np\nimport cv2\n\n# Capture video from camera\n# cap = cv2.VideoCapture(0)\n\n# Capture video from file\ncap = cv2.VideoCapture('yolov7-pose/shortvideo.m4v')\n\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nout = cv2.VideoWriter('yolov7-pose/shortvideo_hsv.mp4',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame_width,frame_height))\n\n\n\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n out.write(hsv)\n # Display the resulting frame\n cv2.imshow('frame',hsv)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n\n# When everything done, release the capture\ncap.release()\n\ncv2.destroyAllWindows()","repo_name":"paula22on/Video_Processing","sub_path":"change_colorspace.py","file_name":"change_colorspace.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7252838709","text":"import discord\nimport os\nimport sys\nimport logging\nimport random\nfrom yt_dlp import YoutubeDL\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nfrom discord import Intents, Embed\nfrom typing import Any\nfrom ytmusicapi import YTMusic\nfrom dataclasses import dataclass\nfrom collections import deque\n\ndef pp_size_function():\n size = random.randrange(20)\n sizestr = \"smol\" if size >= 0 and size <= 10 else \"yuge\"\n return (size, sizestr)\n\ndef checkSearchInput(msg):\n return len(msg.content) > 0 and isinstance(int(msg.content), int) and int(msg.content) <= 5 and int(msg.content) > 0\n\n\nclass Config():\n def __init__(self, path: str) -> None:\n env_path = Path(path)\n load_dotenv(dotenv_path=env_path)\n self.token = os.getenv(\"TOKEN\") or \"\"\n self.guild_name = os.getenv(\"GUILD\") or \"\"\n self.loghandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\n\n def __str__(self) -> str:\n return f\"Token: {self.token}\"\n\n@dataclass\nclass ytApiResult:\n titre: str\n duration: str\n videoId: str\n artist: str\n\nclass SteveBot(discord.Client):\n __guild_count = 0\n\n def __init__(self, *, intents: Intents, **options: Any) -> None:\n super().__init__(intents=intents, **options)\n self.emoji_list: list = ['🍆', '🤏']\n self.voice_channel: Any = None\n self.ytdl_options = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }]}\n\n self.ffmpeg_before_options = '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5'\n self.ffmpeg_options = '-vn'\n self.ytmusic = YTMusic(\"oauth.json\", language='fr')\n\n self.song_queue = deque(maxlen=20)\n\n async def on_ready(self):\n for _ in self.guilds:\n self.__guild_count = self.__guild_count + 1\n print(f\"Logged as {self.user}, member of {self.__guild_count} guilds(s)\")\n\n async def on_message(self, message):\n if message.author == self.user:\n return\n if message.content.startswith(\"#ppsize\"):\n size, sizestr = pp_size_function()\n msg = await message.reply(f\"{size} cm , {sizestr}\")\n if sizestr == 'smol':\n await msg.add_reaction(self.emoji_list[1])\n else:\n await msg.add_reaction(self.emoji_list[0])\n if message.content.startswith(\"#join\"):\n if message.author.voice and message.author.voice.channel:\n channel = message.author.voice.channel\n self.voice_channel = await channel.connect()\n else:\n await message.reply(\"You are not connected to a voice channel, retard !\")\n if message.content.startswith(\"#leave\"):\n if self.voice_channel:\n self.song_queue.clear()\n await self.voice_channel.disconnect()\n self.voice_channel = None\n else:\n await message.reply(\"You are not connected to a voice channel, retard !\")\n\n if message.content.startswith(\"#qsize\"):\n await message.reply(f\"Queue size: {len(self.song_queue)}\")\n\n if message.content.startswith(\"#stop\"):\n if self.voice_channel.is_playing() or self.voice_channel.is_paused():\n self.voice_channel.stop()\n\n if message.content.startswith(\"#resume\"):\n if self.voice_channel.is_paused():\n self.voice_channel.resume()\n\n if message.content.startswith(\"#pause\"):\n if self.voice_channel.is_playing():\n self.voice_channel.pause()\n\n if message.content.startswith(\"#skip\"):\n if self.voice_channel.is_playing():\n self.voice_channel.stop()\n await message.reply(\"Skipping this dogshit ass song...\")\n self.__check_queue()\n\n if message.content.startswith(\"#search\"):\n assert self.ytmusic\n if message.author.voice and message.author.voice.channel:\n if not self.voice_channel:\n channel = message.author.voice.channel\n self.voice_channel = await channel.connect()\n else:\n await message.reply(\"You are not connected to a voice channel, retard !\")\n\n tokens = message.content.split(\" \")\n\n if len(tokens) == 1:\n return await message.reply(\"My brother in christ, the fuck i'm i supposed to search !\")\n\n query = \" \".join(tokens[1:])\n searchlist = self.ytmusic.search(query,filter=\"songs\")\n\n #TODO: add multiple artists names\n marshalled_list = list(map(lambda x: ytApiResult(x[\"title\"], x[\"duration\"],x[\"videoId\"], x[\"artists\"][0][\"name\"]), searchlist))[:5]\n\n embed = Embed(\n title=\"Search results\",\n description= \"Select the song that should be played: \",\n )\n\n for pos, i in enumerate(marshalled_list):\n embed.add_field(name=pos + 1, value=f\"{i.artist} - {i.titre} - {i.duration}\", inline=False)\n\n await message.channel.send(embed=embed)\n\n msg: Any = None\n\n try:\n msg = await self.wait_for('message',check=checkSearchInput)\n except TimeoutError as e:\n print(e)\n\n\n selected_song= marshalled_list[int(msg.content) - 1 ]\n selected_song_id = selected_song.videoId\n\n\n if self.voice_channel.is_playing():\n self.song_queue.append(selected_song_id)\n await message.reply(f\"Enqueued: {selected_song.artist} - {selected_song.titre} - {selected_song.duration}\")\n else:\n await message.reply(f\"Now playing: {selected_song.artist} - {selected_song.titre} - {selected_song.duration}\")\n\n self.__play_song(selected_song_id)\n\n if message.content.startswith(\"#play\"):\n if message.author.voice and message.author.voice.channel:\n if not self.voice_channel:\n channel = message.author.voice.channel\n self.voice_channel = await channel.connect()\n else:\n await message.reply(\"You are not connected to a voice channel, retard !\")\n\n tokens = message.content.split(\" \")\n\n if len(tokens) == 1:\n return await message.reply(\"My brother in christ, the fuck i'm i supposed to search !\")\n\n query = \" \".join(tokens[1:])\n searchlist = self.ytmusic.search(query,filter=\"songs\")\n\n marshalled_list = list(map(lambda x: ytApiResult(x[\"title\"], x[\"duration\"],x[\"videoId\"], x[\"artists\"][0][\"name\"]), searchlist))[0]\n\n selected_song= marshalled_list\n selected_song_id = selected_song.videoId\n\n if self.voice_channel.is_playing():\n self.song_queue.append(selected_song_id)\n await message.reply(f\"Enqueued: {selected_song.artist} - {selected_song.titre} - {selected_song.duration}\")\n else:\n await message.reply(f\"Now playing: {selected_song.artist} - {selected_song.titre} - {selected_song.duration}\")\n\n self.__play_song(selected_song_id)\n\n def __play_song(self, song_id: str):\n with YoutubeDL(self.ytdl_options) as yt:\n youtube_str = f'https://www.youtube.com/watch?v={song_id}'\n info = yt.extract_info(youtube_str, download=False)\n\n if info:\n try:\n self.voice_channel.play(discord.FFmpegOpusAudio(info[\"url\"],options=self.ffmpeg_options, before_options=self.ffmpeg_before_options), after=lambda x=None: self.__check_queue())\n except TypeError as e:\n print(e)\n else:\n return\n\n def __check_queue(self):\n if len(self.song_queue) > 0:\n current_id = self.song_queue.popleft()\n self.__play_song(current_id)\n\nif __name__ == \"__main__\":\n # Load configuration\n conf = Config(sys.argv[1])\n intents = discord.Intents.default()\n intents.message_content = True\n\n client = SteveBot(intents=intents)\n client.run(conf.token, log_handler=conf.loghandler)\n\n","repo_name":"Khalimouh/SbeveBot","sub_path":"sbevebot/steve.py","file_name":"steve.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23641437871","text":"import sys\r\n\r\ng_to_e = '''\r\na\ty\r\nb\th\r\nc\te\r\nd\ts\r\ne\to\r\nf\tc\r\ng\tv\r\nh\tx\r\ni\td\r\nj\tu\r\nk\ti\r\nl\tg\r\nm\tl\r\nn\tb\r\no\tk\r\np\tr\r\nq\tz\r\nr\tt\r\ns\tn\r\nt\tw\r\nu\tj\r\nv\tp\r\nw\tf\r\nx\tm\r\ny\ta\r\nz\tq\r\n'''\r\n\r\n# read k elements separated by sep, and convert them to type typ\r\nclass Reader:\r\n def __init__(self, filename, sep = ' ', typ = int):\r\n self.sep = sep\r\n self.typ = typ\r\n self.in_file = open(filename)\r\n \r\n def get_line(self):\r\n return next(self.in_file).rstrip()\r\n\r\n def next(self, k = 1, sep = None, typ = None):\r\n line = self.get_line()\r\n if typ is None:\r\n typ = self.typ\r\n if k == 1:\r\n return typ(line)\r\n if sep is None:\r\n sep = self.sep\r\n fields = line.split(sep)\r\n if k != 0 and len(fields) != k:\r\n raise Exception('expected {}, found {} fields in {}'.format(k, len(fields), line))\r\n return [typ(f) for f in fields]\r\n\r\n\r\ndef problem1():\r\n it = iter(g_to_e.splitlines())\r\n d = {}\r\n for line in it:\r\n line = line.rstrip()\r\n if line == '':\r\n continue\r\n g, e = line.split('\\t')\r\n d[g] = e\r\n d[' '] = ' '\r\n r = Reader(sys.argv[1], sep = None)\r\n n = r.next()\r\n for t in range(1, n+1):\r\n s = r.next(typ = str)\r\n result = ''.join(d[c] for c in s)\r\n print('Case #{}: {}'.format(t, result))\r\n\r\n\r\nFeasible = 0\r\nFeasibleButSurprising = 1\r\nInfeasible = 2\r\n\r\ndef feasible(s, threshold):\r\n if threshold + max(0, threshold-1) * 2 <= s:\r\n return Feasible\r\n if threshold + max(0, threshold-2) * 2 <= s:\r\n return FeasibleButSurprising\r\n return Infeasible\r\n\r\ndef problem2():\r\n r = Reader(sys.argv[1])\r\n n_tests = r.next()\r\n for t in range(1, n_tests+1):\r\n n_googlers, n_surprising, threshold, *scores = r.next(k = 0)\r\n result = 0\r\n for s in scores:\r\n assessment = feasible(s, threshold)\r\n if assessment == Feasible:\r\n result += 1\r\n continue\r\n if assessment == FeasibleButSurprising:\r\n if n_surprising > 0:\r\n n_surprising -= 1\r\n result += 1\r\n print('Case #{}: {}'.format(t, result))\r\n\r\n\r\ndef main():\r\n problem2()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/738.py","file_name":"738.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30371403611","text":"import math\n\nclass Vec2:\n\n def __init__(self,*args):\n if isinstance(args[0],Vec2):\n self.x = args[0].x\n self.y = args[0].y\n else:\n self.x = args[0]\n self.y = args[1]\n\n def cloneFrom(self,v):\n self.x = v.x\n self.y = v.y\n\n def cloneInto(self,v):\n v.x = self.x\n v.y = self.y\n\n def clone(self):\n return Vec2(self)\n\n def __str__(self):\n return \"(\" + str(self.x) + \", \" + str(self.y) + \")\"\n\n def __add__(self,q):\n x = self.x + q.x\n y = self.y + q.y\n return Vec2(x,y)\n\n def __radd__(self,q):\n x = self.x + q.x\n y = self.y + q.y\n return Vec2(x,y)\n \n \n def __sub__(self,q):\n x = self.x - q.x\n y = self.y - q.y\n return Vec2(x,y)\n\n def __rsub__(self,q):\n x = q.x - self.x\n y = q.y - self.y\n return Vec2(x,y)\n\n def __mul__(self,q):\n if isinstance(q,Vec2):\n x = self.x * q.x - self.y * q.y\n y = self.x * q.y + self.y * q.x\n else:\n x = self.x * q\n y = self.y * q\n return Vec2(x,y)\n\n def __rmul__(self,q):\n if isinstance(q,Vec2):\n x = self.x * q.x - self.y * q.y\n y = self.x * q.y + self.y * q.x\n else:\n x = self.x * q\n y = self.y * q\n return Vec2(x,y)\n\n def __truediv__(self,q):\n if isinstance(q,Vec2):\n l = q.lenSqr()\n x = (self.x * q.x + self.y * q.y)/l\n y = (self.y * q.x - self.x * q.y)/l\n else:\n x = self.x / q\n y = self.y / q\n return Vec2(x,y)\n\n def __rtruediv__(self,q):\n if isinstance(q,Vec2):\n l = self.lenSqr()\n x = (q.x * self.x + q.y * self.y)/l\n y = (q.y * self.x - q.x * self.y)/l\n else:\n l = self.lenSqr()\n x = q * self.x/l\n y = - q * self.y/l\n return Vec2(x,y)\n\n def __neg__(self):\n return Vec2(-self.x,-self.y)\n\n def __pos__(self):\n return Vec2(self)\n\n def __abs__(self):\n return math.sqrt(self.x*self.x + self.y*self.y)\n\n def __invert__(self):\n return Vec2(self.x,-self.y)\n\n def conjugate(self):\n return Vec2(self.x,-self.y)\n\n def __eq__(self,q):\n if self.x != q.x:\n return False\n if self.y != q.y:\n return False\n return True\n \n def __ne__(self,q):\n if self.x != q.x:\n return True\n if self.y != q.y:\n return True\n return False\n\n def __copy__(self):\n return Vec2(self)\n \n def lenSqr(self):\n return self.x*self.x + self.y*self.y\n\n def len(self):\n return math.sqrt(self.x*self.x + self.y*self.y)\n\n def dist(self,q):\n x = self.x - q.x\n y = self.y - q.y\n return math.sqrt(x*x + y*y)\n \n def distSqr(self,q):\n x = self.x - q.x\n y = self.y - q.y\n return x*x + y*y\n\n def dot(self,q):\n return self.x * q.x + self.y * q.y\n\n def cross(self,q):\n return self.x * q.y - self.y * q.x\n \n def normalise(self):\n l = self.len()\n if l != 0:\n return Vec2(self.x/l,self.y/l)\n else:\n return Vec2(1,0)\n\n def rotate90(self):\n return Vec2(-self.y,self.x)\n \n def rotate(self,a):\n return Vec2(math.cos(a)*self.x - math.sin(a)*self.y,math.sin(a)*self.x + math.cos(a)*self.y)\n\n# Tests\nif __name__ == \"__main__\":\n q = Quaternion(1,-2,0,0)\n qq = Quaternion(q)\n i = Quaternion(0,1,0,0)\n j = Quaternion(0,0,1,0)\n k = Quaternion(0,0,0,1)\n u = Vec3(1,-2,3)\n v = Vec3(u)\n print(u)\n print(v)\n print(u + v)\n print(3 * u)\n print(v * 3)\n print(u != v)\n print(u.toQuaternion() * j)\n print(j * u.toQuaternion())\n print(u ** i)\n print(u ** j)\n print(u ** k)\n u = Vec3(0.8,0.6,0)\n v = Vec3(0,0.6,0.8)\n print(u)\n print(u ** k)\n print(v)\n w = u ** u.rotateTo(v)\n q = u.rotateTo(v)\n print(u.rotateTo(v))\n print(w)\n print(u.len())\n print(w.len())\n print(qRotation(math.pi/2,1,0,0))\n '''\n print(i * u.toQuaternion())\n print(u.applyQuaternion(i))\n print(u ** j)\n print(u ** k)\n print(v)\n print(q)\n print(qq)\n print(q + qq)\n print(q + 3)\n print(q * qq)\n print(q * qq / q)\n print(3 - q)\n print(i * j)\n print(j * k)\n print(k * i)\n print(3 * q)\n print(q * 3)\n print(1/i)\n print(q * ~q)\n print(q != qq)\n print(q ** 2 / q)\n '''\n","repo_name":"loopspace/Python-OpenGL","sub_path":"lib/vectors/vec2.py","file_name":"vec2.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2348059489","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 16 17:23:07 2018\n\n@author: YY\n\"\"\"\n\nimport turtle as t\n\nt.setup(800,400)\nt.ondrag(t.goto)\nt.colormode(255)\nt.shape('turtle')\nt.speed(3)\nt.pendown()\nt.begin_fill#t.fill(True)\nfor i in range(3):\n t.forward(100)\n t.left(120)\nt.end_fill\nt.penup()\nt.done()","repo_name":"fibonacciyys/mypy","sub_path":"pylearn/turtlee/pre_turtle.py","file_name":"pre_turtle.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9220068054","text":"from fileinput import filename\r\nfrom flask import Flask, json, request, send_file\r\nfrom elasticsearch import Elasticsearch\r\nfrom datetime import datetime\r\nimport os\r\nimport csv \r\n\r\napi = Flask(__name__)\r\n\r\n@api.route('/read', methods=['GET'])\r\ndef get_read():\r\n # Retrieve parameters from the request\r\n index = request.args.get('index')\r\n size = request.args.get('size')\r\n try:\r\n # Connect to Elasticsearch\r\n elastic_client = Elasticsearch([{'host': 'localhost', 'port': 9200}])\r\n # Search from elasticsearch by index and size\r\n resp_search = elastic_client.search(index=index, query={\"match_all\": {}}, size=size)\r\n result = {\r\n 'size': len(resp_search[\"hits\"][\"hits\"])\r\n }\r\n result['data'] = []\r\n # Append all found data to a list\r\n for hit in resp_search['hits']['hits']:\r\n result['data'].append(hit)\r\n return json.dumps(result)\r\n except Exception as err:\r\n return 'Error index not found'\r\n\r\n@api.route('/start_read_all', methods=['GET'])\r\ndef get_start_read_all():\r\n # Retrieve parameter from the request\r\n index = request.args.get('index')\r\n try:\r\n # Connect to Elasticsearch\r\n elastic_client = Elasticsearch([{'host': 'localhost', 'port': 9200}])\r\n # Search from elasticsearch by index\r\n resp_search = elastic_client.search(index=index, query={\"match_all\": {}})\r\n # Set CSV filename based on datetime and given index\r\n csv_dir = './csv/'\r\n csv_filename = '{timestamp}_{filename}'.format(filename=index, timestamp=datetime.now().strftime(\"%Y%m%d%H%M%S\"))\r\n csv_file = csv_filename + '.csv'\r\n csv_path = os.path.join(csv_dir, csv_file)\r\n csv_column = resp_search['hits']['hits'][0].keys()\r\n # Write data to a csv file\r\n with open(csv_path, 'w+') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=csv_column)\r\n writer.writeheader()\r\n for data in resp_search['hits']['hits']:\r\n writer.writerow(data)\r\n # Set response data\r\n result = {\r\n 'job_id': csv_filename\r\n }\r\n return json.dumps(result)\r\n except Exception as err:\r\n return 'Error index not found'\r\n\r\n@api.route('/read_all', methods=['GET'])\r\ndef get_read_all():\r\n # Retrieve parameter from the request\r\n job_id = request.args.get('job_id')\r\n try:\r\n # Set CSV filename based on job_id parameter\r\n csv_dir = './csv'\r\n csv_file = '{filename}.csv'.format(filename=job_id)\r\n csv_path = os.path.join(csv_dir, csv_file)\r\n if not os.path.isfile(csv_path):\r\n return 'File not found: file {filename} was not found on the server'.format(filename=csv_file)\r\n return send_file(csv_path, as_attachment=True, attachment_filename=csv_file)\r\n except Exception as err:\r\n return 'Error job_id not found'\r\n\r\nif __name__ == '__main__':\r\n api.run() ","repo_name":"enjinette/elasticsearch","sub_path":"flaskelastic.py","file_name":"flaskelastic.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28302338944","text":"import flask\nfrom flask import Flask, request\nfrom flask_cors import CORS, cross_origin\nimport time\nfrom flask_pymongo import PyMongo\nimport pymongo\nimport urllib\nfrom PIL import Image\nimport requests\nimport os\nimport openai\n\ndb_key = os.getenv(\"DB_KEY\")\n\napp = Flask(__name__)\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nclient = pymongo.MongoClient(\"mongodb+srv://pramit25:kTkHK4d1NxposPwP@app.wzujqrt.mongodb.net/\")\ndb = client[\"app\"]\n\ncollection = db[\"customers\"]\n\n\n@app.route(\"/\", methods=['GET'])\n@cross_origin()\ndef hello(name):\n return flask.render_template(\"index.html\", name=name)\n\n@app.route(\"/\")\n@cross_origin()\ndef home():\n return flask.render_template(\"home.html\")\n\n@app.route(\"/leaderboard/\", methods=[\"GET\", \"POST\"])\n@cross_origin()\ndef leaderboard():\n cursor = collection.find({})\n all = []\n for document in cursor:\n all.append(document)\n if request.method == 'POST':\n name = request.json['name']\n score = request.json['score']\n date = request.json['date']\n deaths = request.json['deaths']\n new_score = {\"Name\" : name, \"Score\" : score, 'Date': date, \"Deaths\": deaths}\n collection.insert_one(new_score)\n print(\"POSTED NEW ENTRY\")\n all = sorted(all, key=lambda i: (i['Score'], i['Deaths'], i['Date'], i['Name']))\n # all = [{'Name': \"pr\", 'Score': \"23\", 'Date':'May 2222', 'Name': 'Pr'}]\n return flask.render_template(\"leaderboard.html\", all=all)\n if(request.method == 'GET'):\n all = sorted(all, key=lambda i: (i['Score'], i['Deaths'], i['Date'], i['Name']))\n # all = [{'Name': \"pr\", 'Score': \"23\", 'Deaths': 3, 'Date':'May 2222'}]\n return flask.render_template(\"leaderboard.html\", all=all)","repo_name":"pramitbhatia25/GameJam","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39575077154","text":"'''\nCreated on Jun 20, 2018\n\n@author: grovesr\n'''\n\n#from mts_app import app, db, auth\nfrom mts_app.models import db\nfrom mts_app.admin import bp as admin_bp\nfrom flask import current_app\nfrom mts_app.models import User\nfrom flask import make_response\nfrom flask import jsonify\nfrom flask import request\nfrom flask_httpauth import HTTPBasicAuth\nfrom werkzeug.exceptions import NotFound\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.exceptions import Forbidden\nfrom werkzeug.exceptions import MethodNotAllowed\nfrom werkzeug.security import check_password_hash\n\nauth = HTTPBasicAuth()\n\n@admin_bp.errorhandler(404)\ndef notFound(error):\n return make_response(jsonify({'error': error.description}), 404)\n\n@admin_bp.errorhandler(405)\ndef methodNotAllowed(error):\n return make_response(jsonify({'error': error.description}), 405)\n\n@admin_bp.errorhandler(400)\ndef invalidFormat(error):\n return make_response(jsonify({'error': error.description}), 400)\n\n@admin_bp.errorhandler(403)\ndef forbidden(error):\n return make_response(jsonify({'error': error.description}), 403)\n\n@admin_bp.errorhandler(500)\ndef serverError(error):\n return make_response(jsonify({'error': error.description}), 500)\n\n@auth.verify_password\ndef verify_password(username, password):\n userQuery=User.query.filter_by(username=username)\n if userQuery.count() > 0:\n return check_password_hash(userQuery.first().password_hash, password)\n return False\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n\ndef validateUser():\n queryUser = User.query.filter_by(username=auth.username())\n if queryUser.count() == 0:\n raise Forbidden('Permission denied. Request API access.')\n\ndef validateAdmin():\n validateUser()\n queryUser = User.query.filter_by(username=auth.username())\n if queryUser.count() > 0 and not queryUser.first().isAdmin:\n raise Forbidden('User is not admin')\n\ndef validateEditor():\n validateUser()\n queryUser = User.query.filter_by(username=auth.username())\n if queryUser.count() > 0 and not queryUser.first().canEdit:\n raise Forbidden('User doesn''t have edit permission')\n \n@admin_bp.route('/add/user', methods=['POST'])\n@auth.login_required\ndef addUser():\n validateAdmin()\n if not request.json or not 'username' in request.json or not 'password' in request.json:\n raise BadRequest('No username and/or pasword specified in add/user request')\n try:\n user = User(username=request.json['username'])\n except AssertionError as e:\n raise BadRequest('username error: ' + str(e))\n user.set_password(request.json['password'])\n if User.query.filter_by(username=request.json['username']).count():\n raise BadRequest('User already exists')\n if 'email' in request.json:\n try:\n user.email = request.json['email']\n except AssertionError as e:\n raise BadRequest('bad email format' + str(e))\n if 'isAdmin' in request.json:\n try:\n user.isAdmin = request.json['isAdmin']\n except AssertionError as e:\n raise BadRequest('isAdmin error: ' + str(e))\n if 'canEdit' in request.json:\n try:\n user.canEdit = request.json['canEdit']\n except AssertionError as e:\n raise BadRequest('canEdit error: ' + str(e))\n db.session.add(user)\n db.session.commit()\n return jsonify(user.buildPublicJson()), 201\n\n@admin_bp.route('/check/user/', methods=['GET'])\ndef checkUser(username):\n userQuery = User.query.filter_by(username=username)\n if userQuery.count() == 0:\n raise NotFound('User not found')\n user = userQuery.first()\n returnJson = {'id': user.id}\n return jsonify(returnJson)\n\n@admin_bp.route('/user/', methods=['GET'])\n@auth.login_required\ndef getUser(username):\n validateUser()\n userQuery = User.query.filter_by(username=username)\n if not userQuery.count():\n raise NotFound('User not found')\n user = userQuery.first()\n return jsonify(user.buildPublicJson())\n\n@admin_bp.route('/users', methods=['GET'])\n@auth.login_required\ndef getUsers():\n validateAdmin()\n users = User.query.all()\n if len(users) == 0:\n raise NotFound('No users found')\n usersJson = {'users':[]}\n usersJson['userCount'] = len(users)\n for user in users:\n usersJson['users'].append(user.buildPublicJson())\n return jsonify(usersJson)\n\n@admin_bp.route('/user/', methods=['DELETE'])\n@auth.login_required\ndef deleteUser(username):\n validateAdmin()\n userQuery = User.query.filter_by(username=username)\n if not userQuery.count():\n raise NotFound('User not found')\n db.session.delete(userQuery.first())\n db.session.commit()\n return jsonify({'result': userQuery.count() == 0})\n\n@admin_bp.route('/user/', methods=['PUT'])\n@auth.login_required\ndef updateUser(username):\n validateAdmin()\n userQuery = User.query.filter_by(username=username)\n if not userQuery.count():\n raise NotFound('User not found')\n if not request.json:\n raise BadRequest('Not json data')\n user = userQuery.first()\n if 'username' in request.json:\n try:\n user.username = request.json['username']\n except AssertionError as e:\n raise BadRequest('username error: ' + str(e))\n if 'email' in request.json:\n try:\n user.email = request.json['email']\n except AssertionError as e:\n raise BadRequest(str(e))\n if 'isAdmin' in request.json:\n try:\n user.isAdmin = request.json['isAdmin']\n except AssertionError as e:\n raise BadRequest('isAdmin error: ' + str(e))\n if 'canEdit' in request.json:\n try:\n user.canEdit = request.json['canEdit']\n except AssertionError as e:\n raise BadRequest('canEdit error: ' + str(e))\n db.session.add(user)\n db.session.commit()\n return jsonify(user.buildPublicJson())","repo_name":"grovesr/my-things-server","sub_path":"mts_app/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9308028302","text":"import os\nimport sys\nsys.path.append(os.path.abspath('..'))\n\nfrom PIL import Image\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_datasets as tfd\n\nfrom Lesson_3.plotImages import dim\n\ndef format_image(image, label):\n image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))/255.0\n return image, label\n\n# Check the minimum dimensions\n# dim('C:\\\\Users\\\\lukas.rimkus\\\\.keras\\\\datasets\\\\flower_photos\\\\validation')\n# Download Flower dataset\n# Already in C:\\Users\\lukas.rimkus\\.keras\\datasets\n# Actualy following the example couase doesn't really work like that\n# This is tensorflow_dataset that contains not IMAGES but labels.txt and tf_flowers-train.tfrecord-00000-of-00002 files\n(training_set, validation_set), info = tfd.load(\n 'tf_flowers',\n split=['train[:80%]', 'train[70%:]'],\n with_info=True,\n as_supervised=True,\n)\nprint('training_set: ', training_set)\nprint('validation_set: ', validation_set)\nprint('dataset_info: ', info)\n\n# Prepare the data\nIMG_SIZE = 224\nBATCH_SIZE = 32\n\nnum_classes = info.features['label'].num_classes\n\nnum_training_examples = 0\nnum_validation_examples = 0\n\nfor example in training_set:\n num_training_examples += 1\n\nfor example in validation_set:\n num_validation_examples += 1\n\nprint('Total Number of Classes: {}'.format(num_classes))\nprint('Total Number of Training Images: {}'.format(num_training_examples))\nprint('Total Number of Validation Images: {} \\n'.format(num_validation_examples))\n\n\n# CREATE BATCHES\ntrain_batches = training_set.shuffle(num_training_examples//4).map(format_image).batch(BATCH_SIZE).prefetch(1)\n\nvalidation_batches = validation_set.map(format_image).batch(BATCH_SIZE).prefetch(1)\n\n# CREATE FEATURE EXTRACTOR\nURL = \"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4\"\nfeature_extractor = hub.KerasLayer(URL,\n input_shape=(IMG_SIZE, IMG_SIZE, 3))\n\nfeature_extractor.trainable = False\n\nmodel = tf.keras.Sequential([\n feature_extractor,\n tf.keras.layers.Dense(num_classes)\n])\n\nmodel.summary()\n\nmodel.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nEPOCHS = 6\n\nhistory = model.fit(train_batches,\n epochs=EPOCHS,\n validation_data=validation_batches)\n\n\n# Plot Training and Validation Graphs\n\nimport matplotlib.pyplot as plt\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(EPOCHS)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.show()\n\n# Check Predictions\nimport numpy as np\n\nclass_names = np.array(info.features['label'].names)\n\nprint(class_names)\n\nimage_batch, label_batch = next(iter(train_batches))\n\n\nimage_batch = image_batch.numpy()\nlabel_batch = label_batch.numpy()\n\npredicted_batch = model.predict(image_batch)\npredicted_batch = tf.squeeze(predicted_batch).numpy()\n\npredicted_ids = np.argmax(predicted_batch, axis=-1)\npredicted_class_names = class_names[predicted_ids]\n\nprint(predicted_class_names)\n\n# Perform Transfer Learning with the Inception Model\n\nIMAGE_RES = 299\n\n(training_set, validation_set), dataset_info = tfd.load(\n 'tf_flowers',\n with_info=True,\n as_supervised=True,\n split=['train[:70%]', 'train[70%:]'],\n)\ntrain_batches = training_set.shuffle(num_training_examples//4).map(format_image).batch(BATCH_SIZE).prefetch(1)\nvalidation_batches = validation_set.map(format_image).batch(BATCH_SIZE).prefetch(1)\n\nURL = \"https://tfhub.dev/google/tf2-preview/inception_v3/feature_vector/4\"\nfeature_extractor = hub.KerasLayer(URL,\n input_shape=(IMAGE_RES, IMAGE_RES, 3),\n trainable=False)\n\nmodel_inception = tf.keras.Sequential([\n feature_extractor,\n tf.keras.layers.Dense(num_classes)\n])\n\nmodel_inception.summary()\n\nmodel_inception.compile(\n optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nEPOCHS = 6\n\nhistory = model_inception.fit(train_batches,\n epochs=EPOCHS,\n validation_data=validation_batches)\n\n\n\n","repo_name":"lukasijus/ItMLu","sub_path":"Lesson_6/FLOWERS_TRANSFER_LEARNING.py","file_name":"FLOWERS_TRANSFER_LEARNING.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9161907561","text":"#! /usr/bin/env python\n\n# standard imports\nimport argparse\nimport os\n\nfrom AegeanTools import BANE, __citation__\n\n__author__ = 'Paul Hancock'\n\n\ndef main(argv=()):\n parser = argparse.ArgumentParser(prog='BANE', prefix_chars='-')\n parser.add_argument('image', nargs='?', default=None)\n group1 = parser.add_argument_group('Configuration Options')\n group1.add_argument(\"--out\", dest='out_base', type=str,\n help=\"Basename for output images default: \"\n \"FileName_{bkg,rms}.fits\")\n group1.add_argument('--grid', dest='step_size', type=int, nargs=2,\n help='The [x,y] size of the grid to use. '\n 'Default = ~4* beam size square.')\n group1.add_argument('--box', dest='box_size', type=int, nargs=2,\n help='The [x,y] size of the box over which the '\n 'rms/bkg is calculated. Default = 5*grid.')\n group1.add_argument('--cores', dest='cores', type=int,\n help='Number of cores to use. '\n 'Default = all available.')\n group1.add_argument('--stripes', dest='stripes', type=int, default=None,\n help='Number of slices.')\n group1.add_argument('--slice', dest='cube_index', type=int, default=0,\n help='If the input data is a cube, then this slice '\n 'will determine the array index of the image '\n 'which will be processed by BANE')\n group1.add_argument('--nomask', dest='mask', action='store_false',\n default=True,\n help=\"Don't mask the output array [default = mask]\")\n group1.add_argument('--noclobber', dest='clobber', action='store_false',\n default=True,\n help=\"Don't run if output files already exist. \"\n \"Default is to run+overwrite.\")\n group1.add_argument('--debug', dest='debug',\n action='store_true', help='debug mode, default=False')\n group1.add_argument('--compress', dest='compress', action='store_true',\n default=False,\n help='Produce a compressed output file.')\n group1.add_argument('--cite', dest='cite', action=\"store_true\",\n default=False,\n help='Show citation information.')\n\n parser.set_defaults(out_base=None, step_size=None, box_size=None,\n twopass=True, cores=None, usescipy=False, debug=False)\n\n options = parser.parse_args(args=argv)\n\n if options.cite:\n print(__citation__)\n return 0\n\n if options.image is None:\n parser.print_help()\n return 0\n\n # Get the BANE logger.\n logging = BANE.logging\n logging_level = logging.DEBUG if options.debug else logging.INFO\n logging.basicConfig(level=logging_level,\n format=\"%(process)d:%(levelname)s %(message)s\")\n logging.info(\n \"This is BANE {0}-({1})\".format(BANE.__version__, BANE.__date__))\n\n if not os.path.exists(options.image):\n logging.error(\"File not found: {0} \".format(options.image))\n return 1\n\n if options.out_base is None:\n options.out_base = os.path.splitext(options.image)[0]\n\n if not options.clobber:\n bkgout = options.out_base + '_bkg.fits'\n rmsout = options.out_base + '_rms.fits'\n if os.path.exists(bkgout) and os.path.exists(rmsout):\n logging.error(\"{0} and {1} exist and you said noclobber\"\n \"\".format(bkgout, rmsout))\n logging.error(\"Not running\")\n return 1\n\n BANE.filter_image(im_name=options.image, out_base=options.out_base,\n step_size=options.step_size,\n box_size=options.box_size, cores=options.cores,\n mask=options.mask, compressed=options.compress,\n nslice=options.stripes,\n cube_index=options.cube_index)\n return 0\n","repo_name":"PaulHancock/Aegean","sub_path":"AegeanTools/CLI/BANE.py","file_name":"BANE.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"73292310595","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QDockWidget, QPushButton\nimport PyQt5.QtCore\n\nclass DockTest(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n self.floating = False\n self.closable = True\n\n def init(self):\n self.setGeometry(300, 300, 300, 250)\n self.setStyleSheet('background-color:white;')\n self.setWindowTitle('Dock Test')\n\n self.dock = QDockWidget('Dock Thing', self)\n self.dock.setFloating(self.floating)\n self.dock.setFeatures(self.dock.DockWidgetClosable|self.dock.DockWidgetMovable)\n self.dock.move(20, 160)\n\n self.dock2 = QDockWidget('Second Dock', self)\n self.dock2.setFloating(True)\n self.dock2.move(20, 80)\n\n self.btn1 = QPushButton('Change Dock Closable', self)\n self.btn1.move(20, 20)\n self.btn1.clicked.connect(self.changeClosable)\n\n self.btn2 = QPushButton('This is on the QDock', self)\n self.btn2.move(20, 20)\n self.btn2.clicked.connect(self.changeFloat)\n self.dock.setWidget(self.btn2)\n\n self.show()\n\n def changeFloat(self):\n self.floating = not self.floating\n if not self.floating:\n self.dock.move(20, 160)\n self.dock.setFloating(self.floating)\n\n def changeClosable(self):\n self.closable = not self.closable\n if self.closable:\n self.dock.setFeatures(self.dock.DockWidgetClosable|self.dock.DockWidgetMovable)\n self.dock2.setFeatures(self.dock.DockWidgetClosable|self.dock.DockWidgetFloatable)\n else:\n self.dock.setFeatures(self.dock.DockWidgetMovable)\n self.dock2.setFeatures(self.dock.DockWidgetFloatable)\n\napp = QApplication(sys.argv)\n\nw = DockTest()\nw.init()\n\nexit(app.exec_())\n","repo_name":"DarrenChiang/Python-2016","sub_path":"GUI Files/QDock Tutorial.py","file_name":"QDock Tutorial.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19069901234","text":"import sys\nimport unittest\n\nfrom magnum import *\nfrom magnum import scenegraph\nfrom magnum.scenegraph.matrix import Object3D, Scene3D\n\nclass Object(unittest.TestCase):\n def test_hierarchy(self):\n scene = Scene3D()\n scene_refcount = sys.getrefcount(scene)\n\n a = Object3D()\n a_refcount = sys.getrefcount(a)\n self.assertIs(a.scene, None)\n\n b = Object3D(parent=scene)\n b_refcount = sys.getrefcount(b)\n self.assertIs(b.scene, scene)\n self.assertIs(b.parent, scene)\n\n # B should be referenced by the scene, but not cyclically\n self.assertEqual(sys.getrefcount(b), scene_refcount + 1)\n self.assertEqual(sys.getrefcount(scene), scene_refcount)\n\n c = Object3D(parent=b)\n c_refcount = sys.getrefcount(c)\n self.assertIs(c.scene, scene)\n self.assertIs(c.parent, b)\n\n # C should be referenced by B\n self.assertEqual(sys.getrefcount(b), scene_refcount + 1)\n self.assertEqual(sys.getrefcount(c), scene_refcount + 1)\n self.assertEqual(sys.getrefcount(scene), scene_refcount)\n\n # Delete B. Because B has a parent as well, it's not deleted yet\n del b\n self.assertIsNotNone(c.parent)\n self.assertEqual(sys.getrefcount(c.parent), b_refcount - 1)\n self.assertEqual(sys.getrefcount(c), scene_refcount + 1)\n\n # Delete a scene. That also makes B deleted and C is then orphaned\n del scene\n self.assertIsNone(c.parent)\n self.assertEqual(sys.getrefcount(c), c_refcount - 1)\n\n def test_hierarchy_set_parent(self):\n # Same as test_hierarchy, but setting the parent later\n\n scene = Scene3D()\n scene_refcount = sys.getrefcount(scene)\n\n a = Object3D()\n a_refcount = sys.getrefcount(a)\n self.assertIs(a.scene, None)\n\n b = Object3D()\n b.parent = scene\n b_refcount = sys.getrefcount(b)\n self.assertIs(b.scene, scene)\n self.assertIs(b.parent, scene)\n\n # B should be referenced by the scene, but not cyclically\n self.assertEqual(sys.getrefcount(b), scene_refcount + 1)\n self.assertEqual(sys.getrefcount(scene), scene_refcount)\n\n c = Object3D()\n c.parent = b\n c_refcount = sys.getrefcount(c)\n self.assertIs(c.scene, scene)\n self.assertIs(c.parent, b)\n\n # C should be referenced by B\n self.assertEqual(sys.getrefcount(b), scene_refcount + 1)\n self.assertEqual(sys.getrefcount(c), scene_refcount + 1)\n self.assertEqual(sys.getrefcount(scene), scene_refcount)\n\n # Delete B. Because B has a parent as well, it's not deleted yet\n del b\n self.assertIsNotNone(c.parent)\n self.assertEqual(sys.getrefcount(c.parent), b_refcount - 1)\n self.assertEqual(sys.getrefcount(c), scene_refcount + 1)\n\n # Delete a scene. That also makes B deleted and C is then orphaned\n del scene\n self.assertIsNone(c.parent)\n self.assertEqual(sys.getrefcount(c), c_refcount - 1)\n\n def test_set_parent_invalid(self):\n a = Object3D()\n with self.assertRaisesRegex(TypeError, \"expected Scene, Object or None, got \"):\n a.parent = \"noo\"\n\n def test_drawable(self):\n object = Object3D()\n object_refcount = sys.getrefcount(object)\n\n a = scenegraph.Drawable3D(object)\n a_refcount = sys.getrefcount(a)\n self.assertIs(a.object, object)\n\n b = scenegraph.Drawable3D(object)\n b_refcount = sys.getrefcount(b)\n self.assertIs(b.object, object)\n\n # Drawables should be referenced by the object, but not cyclically\n self.assertEqual(sys.getrefcount(object), object_refcount)\n self.assertEqual(sys.getrefcount(a), object_refcount + 1)\n self.assertEqual(sys.getrefcount(b), object_refcount + 1)\n\n # Delete the object. The drawable should be still alive, but\n # disconnected from the object (and thus useless).\n del object\n self.assertIsNone(a.object)\n self.assertIsNone(b.object)\n self.assertEqual(sys.getrefcount(a), a_refcount - 1)\n self.assertEqual(sys.getrefcount(b), b_refcount - 1)\n\n def test_drawable_group(self):\n object = Object3D()\n drawables = scenegraph.DrawableGroup3D()\n\n deleted = 0\n class MyDrawable(scenegraph.Drawable3D):\n def __del__(self):\n nonlocal deleted\n deleted += 1\n\n a = MyDrawable(object, drawables)\n b = MyDrawable(object, drawables)\n\n # The drawable group should have these listed\n self.assertEqual([i for i in drawables], [a, b])\n\n # Deleting each of them should do nothing, since they're still\n # referenced by the object\n del a, b\n self.assertEqual(deleted, 0)\n self.assertEqual(len(drawables), 2)\n\n # Deleting the holder object will, tho\n del object\n self.assertEqual(deleted, 2)\n self.assertEqual(len(drawables), 0)\n\n def test_camera(self):\n object = Object3D()\n object.translate(Vector3.z_axis(5.0))\n object_refcount = sys.getrefcount(object)\n\n a = scenegraph.Camera3D(object)\n a.viewport = (400, 300)\n a.projection_matrix = Matrix4.perspective_projection(\n fov=Deg(45.0), near=0.01, far=100.0, aspect_ratio=1.0)\n a.aspect_ratio_policy = scenegraph.AspectRatioPolicy.EXTEND\n a_refcount = sys.getrefcount(a)\n self.assertEqual(a.viewport, Vector2i(400, 300))\n self.assertEqual(a.projection_matrix, Matrix4.perspective_projection(\n fov=Deg(57.82240), near=0.01, far=100.0, aspect_ratio=1.33333333))\n self.assertEqual(a.camera_matrix, Matrix4.translation(-Vector3.z_axis(5.0)))\n self.assertEqual(a.aspect_ratio_policy, scenegraph.AspectRatioPolicy.EXTEND)\n self.assertIs(a.object, object)\n\n # Camera should be referenced by the object, but not cyclically\n self.assertEqual(sys.getrefcount(object), object_refcount)\n self.assertEqual(sys.getrefcount(a), object_refcount + 1)\n\n # Delete the object. The camera should be still alive, but disconnected\n # from the object (and thus useless).\n del object\n self.assertIsNone(a.object)\n self.assertEqual(sys.getrefcount(a), a_refcount - 1)\n\n def test_camera_draw(self):\n scene = Scene3D()\n drawables = scenegraph.DrawableGroup3D()\n\n camera_object = Object3D(scene)\n camera_object.translate((0.0, 1.0, 5.0))\n\n camera = scenegraph.Camera3D(camera_object)\n\n rendered = None, None\n deleted = \"no :)\"\n class MyDrawable(scenegraph.Drawable3D):\n def draw(self, transformation_matrix: Matrix4, camera: scenegraph.Camera3D):\n nonlocal rendered\n rendered = (transformation_matrix, camera)\n\n def __del__(self):\n nonlocal deleted\n deleted = \"yes :(\"\n\n class MySilentDrawable(scenegraph.Drawable3D):\n def draw(self, transformation_matrix: Matrix4, camera: scenegraph.Camera3D):\n pass\n\n object = Object3D(scene)\n object.translate(Vector3.x_axis(5.0))\n a = MyDrawable(object, drawables)\n b = MySilentDrawable(object, drawables)\n\n # The drawable group should have these listed\n self.assertEqual([i for i in drawables], [a, b])\n\n # Deleting the object, the camera holder and drawable does nothing\n del camera_object, object, a, b\n\n camera.draw(drawables)\n self.assertEqual(rendered[0],\n Matrix4.translation(Vector3.x_axis(5.0))@\n Matrix4.translation((0.0, -1.0, -5.0)))\n self.assertIs(rendered[1], camera)\n\n # Deleting the scene will delete A and the drawable as well\n del scene\n self.assertEqual(deleted, \"yes :(\")\n self.assertIsNone(camera.object)\n self.assertIs(len(drawables), 0)\n\nclass Feature(unittest.TestCase):\n def test(self):\n class MyFeature(scenegraph.AbstractFeature3D):\n def __init__(self, object: Object3D):\n scenegraph.AbstractFeature3D.__init__(self, object)\n\n object = Object3D()\n feature = MyFeature(object)\n self.assertIs(feature.object, object)\n","repo_name":"mosra/magnum-bindings","sub_path":"src/python/magnum/test/test_scenegraph.py","file_name":"test_scenegraph.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"3506166488","text":"# DESAFIO 101 - Crie um programa que tenha uma FUNÇÃO chamada voto() que vai receber como PARÂMETRO o ANO DE NASCIMENTO de\n#uma pessoa, RETORNANDO um valor LITERAL indicando se uma pessoa tem voto NEGADO, OPCIONAL ou OBRIGATÓRIO nas eleições.\n\ndef voto(n):\n from datetime import date\n i = date.today().year - n\n if 16 <= i < 18 or i >= 65:\n print('Voto opcional!')\n elif 18 <= i < 65:\n print('Voto obrigatório!')\n else:\n print('Não vota!')\n\nprint('--------------------------------')\nnasc = int(input('Em que ano você nasceu? '))\nvoto(nasc)","repo_name":"pedr0diniz/cevpython","sub_path":"PythonExercícios/ex101 - Funções para votação.py","file_name":"ex101 - Funções para votação.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23542596511","text":"from sys import stdin\r\ndic = {}\r\ndic[\"-\"] = \"+\"\r\ndic[\"+\"] = \"-\"\r\n\r\ndef solution(m,y,n):\r\n cont = 0\r\n for i in range(len(m)):\r\n if m[i] == \"-\":\r\n if i+y <= len(m):\r\n for j in range(i,i+y):\r\n m[j] = dic[m[j]]\r\n cont += 1\r\n else:\r\n print(\"Case #\"+str(n+1)+\": IMPOSSIBLE\")\r\n return\r\n print(\"Case #\"+str(n+1)+\":\",cont)\r\n return\r\n \r\n\r\ndef main():\r\n n = int(stdin.readline())\r\n for i in range(n):\r\n m = list(stdin.readline().split())\r\n p = list(m[0])\r\n y = int(m[1])\r\n conta = 0\r\n t = True\r\n if \"-\" not in p:\r\n print(\"Case #\"+str(i+1)+\":\",0)\r\n continue\r\n else:\r\n solution(p,y,i)\r\n \r\n \r\n\r\nmain()\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2155.py","file_name":"2155.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32758342703","text":"import random\nimport string\nfrom .utils import Pair, Letter\n\n\nclass Plugboard:\n def __init__(self, pairs: str) -> None:\n pairs = pairs.split(\" \")\n self.pairs = [Pair(*p) for p in pairs if p]\n\n def encode(self, x: Letter):\n for pair in self.pairs:\n ret = pair.get(x)\n if ret:\n return ret\n return x\n\n def active_len(self):\n return len(self.pairs)\n\n def randomize(self, pairs=10):\n alphabet = string.ascii_lowercase\n while self.active_len() < pairs:\n a = random.choice(alphabet)\n alphabet = alphabet.replace(a, \"\")\n b = random.choice(alphabet)\n alphabet = alphabet.replace(b, \"\")\n self.pairs.append(Pair(a, b))\n\n def __str__(self):\n return \" \".join(map(lambda p: str(p), self.pairs))\n","repo_name":"raymas/enigma-cipher-machine","sub_path":"enigma/plugboard.py","file_name":"plugboard.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40501841269","text":"import matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport nvidia_smi\nimport time\nimport argparse\nimport sys\n\n#matplotlib.use('Qt5Agg')\n\n\ndef get_temperature_and_update_axes(\n i, plt, num_gpus, start, time_list, gpu_temp_list, fan_speed_list, axis_gpu_temp_list, axis_fan_speed_list\n ):\n plt.tight_layout(w_pad=1, h_pad=1)\n\n for gpu_id in range(num_gpus):\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(gpu_id)\n gpu_temp = nvidia_smi.nvmlDeviceGetTemperature(handle, 0)\n fan_speed = nvidia_smi.nvmlDeviceGetFanSpeed(handle)\n now = time.time()\n time_list[gpu_id].append(now - start)\n gpu_temp_list[gpu_id].append(int(gpu_temp))\n fan_speed_list[gpu_id].append(int(fan_speed))\n axis_gpu_temp_list[gpu_id].clear()\n axis_fan_speed_list[gpu_id].clear()\n axis_gpu_temp_list[gpu_id].set_xlabel('Time in seconds')\n axis_gpu_temp_list[gpu_id].set_ylabel('Temperature in °C', color='tab:red')\n axis_gpu_temp_list[gpu_id].set_title(f'GPU {gpu_id}')\n axis_fan_speed_list[gpu_id].set_ylabel('Fan Speed in %', color='tab:blue')\n axis_fan_speed_list[gpu_id].set_ylim(0, 100)\n axis_gpu_temp_list[gpu_id].yaxis.set_major_locator(MaxNLocator(integer=True))\n axis_fan_speed_list[gpu_id].yaxis.set_major_locator(MaxNLocator(integer=True))\n axis_gpu_temp_list[gpu_id].grid(axis='y', color='grey', linestyle='--', linewidth=0.5)\n axis_gpu_temp_list[gpu_id].plot(time_list[gpu_id], gpu_temp_list[gpu_id], color='tab:red')\n axis_fan_speed_list[gpu_id].plot(time_list[gpu_id], fan_speed_list[gpu_id], color='tab:blue')\n plt.savefig('temp_fan_speed_log.png')\n\n\ndef make_live_plot_gpu_temp_and_fan_speed(num_gpus, refresh_interval):\n fig = plt.figure(figsize=(6, 4*num_gpus))\n axis_gpu_temp_list = []\n axis_fan_speed_list = []\n\n for gpu_id in range(num_gpus):\n axis_gpu_temp = fig.add_subplot(num_gpus, 1, gpu_id + 1)\n axis_gpu_temp.set_title(f'GPU {gpu_id}')\n axis_fan_speed = axis_gpu_temp.twinx()\n axis_gpu_temp.yaxis.set_major_locator(MaxNLocator(integer=True))\n axis_fan_speed.yaxis.set_major_locator(MaxNLocator(integer=True))\n axis_gpu_temp_list.append(axis_gpu_temp)\n axis_fan_speed_list.append(axis_fan_speed)\n axis_gpu_temp.set_xlabel('Time in seconds')\n axis_gpu_temp.set_ylabel('Temperature in °C', color='tab:red')\n axis_fan_speed.set_ylabel('Fan Speed in %', color='tab:blue')\n axis_fan_speed.set_ylim(0, 100)\n time_list = [[] for gpu_id in range(num_gpus)]\n gpu_temp_list = [[] for gpu_id in range(num_gpus)]\n fan_speed_list = [[] for gpu_id in range(num_gpus)]\n plt.grid(axis='y', color='grey', linestyle='--', linewidth=0.5)\n plt.tight_layout(w_pad=1, h_pad=1)\n start = time.time()\n _ = matplotlib.animation.FuncAnimation(\n fig, get_temperature_and_update_axes, interval=refresh_interval,\n fargs=(plt, num_gpus, start, time_list, gpu_temp_list, fan_speed_list, axis_gpu_temp_list, axis_fan_speed_list)\n )\n plt.show()\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='GPU temp and fan speed live plot', formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-ng', '--num_gpus', type=int, default=1, required=False,\n help='Number of gpus.'\n )\n parser.add_argument(\n '-ri', '--refresh_interval', type=int, default=500, required=False,\n help='Change live plot refresh interval in ms.'\n )\n args = parser.parse_args()\n nvidia_smi.nvmlInit()\n make_live_plot_gpu_temp_and_fan_speed(args.num_gpus, args.refresh_interval)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(\"plot interrupt\")\n","repo_name":"aime-team/pytorch-benchmarks","sub_path":"utils/gpu_live_plot.py","file_name":"gpu_live_plot.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"23111004716","text":"import unittest\r\n\r\nPosition = {'high': 'h', 'low': 'l'} # don't change this!\r\n\r\nclass Warrior:\r\n def __init__(self, name):\r\n # each warrior should be created with a name and 100 health points\r\n self.name = name\r\n self.health = 100\r\n # default guard is \"\", that is unguarded\r\n self.block = \"\"\r\n self.deceased = False\r\n self.zombie = False\r\n\r\n def attack(self, enemy, position):\r\n damage = 0\r\n # attacking high deals 10 damage, low 5\r\n # 0 damage if the enemy blocks in the same position\r\n if enemy.block != position: damage += 10 if position == Position['high'] else 5\r\n # and even more damage if the enemy is not blocking at all\r\n if enemy.block == \"\": damage += 5\r\n enemy.set_health(enemy.health - damage)\r\n\r\n\r\n def set_health(self, new_health):\r\n # health cannot have negative values\r\n self.health = max(0, new_health)\r\n # if a warrior is set to 0 health he is dead\r\n if self.health == 0:\r\n self.deceased = True\r\n\r\n\r\nclass Tests(unittest.TestCase):\r\n def test_1(self):\r\n ninja = Warrior('Hanzo Hattori')\r\n samurai = Warrior('Ryoma Sakamoto')\r\n samurai.block = 'l'\r\n ninja.attack(samurai, 'h')\r\n self.assertEqual(samurai.health, 90,\r\n 'Expected samurai health to equal 90, instead got ' + str(samurai.health))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","repo_name":"HoggTruman/Code-Wars","sub_path":"5 kyu/Ninja vs Samurai.py","file_name":"Ninja vs Samurai.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43821452035","text":"import requests\nfrom datetime import datetime\nfrom flask import request\nimport sqlite3\n\n\nweather_dict = {}\n\n\ndef get_weather(lat, long):\n print('get_weather')\n print(lat, long)\n # weather.com\n weather_api = requests.get('https://api.weather.gov/points/'+str(lat)+','+str(long))\n weather_api_dict = weather_api.json()\n forecast_url = weather_api_dict['properties']['forecast']\n forecast_request = requests.get(forecast_url)\n forecast_dict = forecast_request.json()\n weather_list = []\n for forecast in forecast_dict['properties']['periods']:\n weather_list.append(forecast['name'] + ': ' + forecast['detailedForecast'])\n\n weather_dict['weather.com'] = weather_list\n # pprint(forecast_dict)\n\n # dark skies\n dark_skies_r = requests.get('https://api.darksky.net/forecast/ffa912ad1451869fa8e5b73869e4e31b/'+str(lat)+','+str(long))\n\n dark_skies = dark_skies_r.json()\n\n # pprint(dark_skies['daily'])\n weather_list = []\n for f in dark_skies['daily']['data']:\n d = datetime.utcfromtimestamp(f['time']).strftime('%A')\n weather_str = d + ' -- '\n weather_str += 'High: ' + str(f['apparentTemperatureHigh']) + ', Low: ' + str(f['apparentTemperatureLow']) + ', '\n weather_str += f['summary']\n if 'precipType' in f.keys():\n weather_str += ' with a ' + str(f['precipProbability']) + ' probability of ' + f['precipType']\n weather_list.append(weather_str)\n weather_dict['Dark Skies'] = weather_list\n\n return weather_dict\n\n\ndef get_ip():\n print('get_ip')\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n ip = request.environ['REMOTE_ADDR']\n else:\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n\n req = requests.get('http://api.ipstack.com/'+str(ip)+'?access_key=c3bc8b0532a448ed5b30b561715d428c')\n r = req.json()\n if r['latitude'] is None:\n req = requests.get('http://api.ipstack.com/'+'192.104.183.109'+'?access_key=c3bc8b0532a448ed5b30b561715d428c')\n r = req.json()\n\n return r\n\n\ndef zip_to_coords(user):\n print('zip_to_coords')\n conn = sqlite3.connect('simple_db.sqlite')\n c = conn.cursor()\n c.execute(\"select lat, lng, major_city from simple_zipcode where zipcode = \" + user)\n result = c.fetchall()[0]\n conn.close()\n coords = (result[0], result[1])\n city = result[2]\n return coords, city\n\n\n","repo_name":"jkruszynski/weatherapp","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"46854566790","text":"from networkx.classes import graph\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nfrom scipy import sparse\n\ndef preprocess_nxgraph(graph):\n '''\n 获取节点的字典和列表\n '''\n node2idx = {} # 字典{key=节点:value=索引}\n idx2node = [] # list[node],index是索引\n node_size = 0\n for node in graph.nodes(): # 遍历节点\n node2idx[node] = node_size\n idx2node.append(node)\n node_size += 1\n return idx2node, node2idx\n\n\ndef partition_dict(vertices, workers):\n batch_size = (len(vertices) - 1) // workers + 1\n part_list = []\n part = []\n count = 0\n for v1, nbs in vertices.items():\n part.append((v1, nbs))\n count += 1\n if count % batch_size == 0:\n part_list.append(part)\n part = []\n if len(part) > 0:\n part_list.append(part)\n return part_list\n\n\ndef partition_list(vertices, workers):\n batch_size = (len(vertices) - 1) // workers + 1\n part_list = []\n part = []\n count = 0\n for v1, nbs in enumerate(vertices):\n part.append((v1, nbs))\n count += 1\n if count % batch_size == 0:\n part_list.append(part)\n part = []\n if len(part) > 0:\n part_list.append(part)\n return part_list\n\n\ndef partition_num(num, workers):\n if num % workers == 0:\n return [num//workers]*workers\n else:\n return [num//workers]*workers + [num % workers]\n\ndef read_graph(edge_path):\n \"\"\"\n 方法读取图并创建目标矩阵。\n :param edge_path: 到边列表的路径。\n :return A: 目标矩阵。\n \"\"\"\n edges = pd.read_table(edge_path, header=None, sep= \" \").values.tolist()\n A = normalize_adjacency(edges)\n graph = nx.from_edgelist(edges)\n return A, graph.number_of_nodes()\n\ndef normalize_adjacency(edges):\n \"\"\"\n 计算稀疏度归一化邻接矩阵的方法。——稀疏矩阵解法\n :param edges: 图的边列表。\n :return A: 规范化邻接矩阵。\n \"\"\"\n # 从边列表创建一个逆矩阵。\n D_1 = create_inverse_degree_matrix(edges)\n\n index_1 = [int(edge[0]) for edge in edges] + [int(edge[1]) for edge in edges] # 因为是无向图,a->b肯定有b->a。所以先拼接起来\n index_2 = [int(edge[1]) for edge in edges] + [int(edge[0]) for edge in edges] # 同上\n values = [1.0 for edge in edges] + [1.0 for edge in edges]\n \n A = sparse.coo_matrix((values, (index_1, index_2)),\n shape=D_1.shape, dtype=np.float32)\n A = A.dot(D_1)\n return A\n\ndef create_inverse_degree_matrix(edges):\n \"\"\"\n 从边列表创建一个逆矩阵。\n :param edges: 边列表.\n :return D_1: 逆度矩阵.\n \"\"\"\n graph = nx.from_edgelist(edges)\n ind = range(len(graph.nodes()))\n \n # degs = [1.0/graph.degree(node) for node in range(1, graph.number_of_nodes()+1)]\n degs = [1.0/graph.degree(node) for node in range(graph.number_of_nodes())]\n\n print(len(degs))\n\n D_1 = sparse.coo_matrix((degs, (ind, ind)), # 稀疏矩阵\n shape=(graph.number_of_nodes(),\n graph.number_of_nodes()),\n dtype=np.float32)\n return D_1\n","repo_name":"NightGlory/DHONE","sub_path":"comparison/ge/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5093261107","text":"n = int(input())\n\nall_together = {}\nmax_int = -999999\nfor el in range(n):\n first_number = set([])\n second_number = set([])\n first_start, second_start= input().split('-')\n new_one = str(first_start)\n k = first_start.split(',')\n l = second_start.split(',')\n first_start_number_one = int(k[0])\n first_start_number_two = int(k[1])\n second_start_number_one = int(l[0])\n second_start_number_two = int(l[1])\n for f in range(first_start_number_one, first_start_number_two + 1):\n first_number.add(f)\n for s in range(second_start_number_one, second_start_number_two + 1):\n second_number.add(s)\n\n longest_interaction = list(first_number.intersection(second_number))\n if len(longest_interaction) > max_int:\n max_int = len(longest_interaction)\n all_together['name'] = longest_interaction\n\nfor key, value in all_together.items():\n print(f'Longest intersection is {value} with length {max_int}')\n\n","repo_name":"vmakksimov/Python_Advanced2","sub_path":"6. Longest Intersection.py","file_name":"6. Longest Intersection.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36402214020","text":"from bs4 import BeautifulSoup\nimport requests\nwhile True:\n\tmost_relevent = []\n\tless_relevent = []\n\tleast_relevent = []\n\tsynonym_list = []\n\tlist_size = 5\n\n\titem = input('')\n\n\tpage = requests.get('https://www.thesaurus.com/browse/{}'.format(item))\n\tsoup = BeautifulSoup(page.content,'html.parser')\n\n\tmost = soup.find_all('a', class_='css-r5sw71-ItemAnchor etbu2a31',href=True)\n\tless = soup.find_all('a', class_='css-1k3kgmb-ItemAnchor etbu2a31',href=True)\n\tleast = soup.find_all('a', class_='css-8umlxa-ItemAnchor etbu2a31',href=True)\n\n\tfor i in range(len(most)):\n\t\tsynonym = list(most[i])\n\t\tmost_relevent.append(synonym[0])\n\t\t\n\tfor i in range(len(less)):\n\t\tsynonym = list(less[i])\n\t\tless_relevent.append(synonym[0])\n\t\t\n\tfor i in range(len(least)):\n\t\tsynonym = list(least[i])\n\t\tleast_relevent.append(synonym[0])\n\n\tif len(most_relevent) <= list_size:\n\t\tfor i in range(len(most_relevent)):\n\t\t\tsynonym_list.append(most_relevent[i])\n\t\t\t\n\t\tif len(less_relevent) <= list_size-int(len(most_relevent)):\n\t\t\tfor i in range(list_size-int(len(most_relevent))):\n\t\t\t\tsynonym_list.append(most_relevent[i])\n\t\t\t\t\n\t\t\tfor i in range(list_size-int(len(most_relevent))-int(len(less_relevent))):\n\t\t\t\tsynonym_list.append(least_relevent[i])\n\t\telse:\n\t\t\tfor i in range(list_size-int(len(most_relevent))):\n\t\t\t\tsynonym_list.append(less_relevent[i])\n\telse:\n\t\tfor i in range(list_size):\n\t\t\tsynonym_list.append(most_relevent[i])\n\n\tprint('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n')\n\tfor i in range(len(synonym_list)):\n\t\tprint('\\n{}\\n'.format(synonym_list[i]))\n","repo_name":"PythonOrC/PythonCodeArchive","sub_path":"Creative/synonym search/old synoynm search.py","file_name":"old synoynm search.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"51525788","text":"# File: printfile.py\n# This program prints a file to the screen.\n\ndef main():\n filename = input(\"Enter the full file name: \")\n infile = open(filename, \"r\")\n data = infile.read()\n print(data)\n\nmain()","repo_name":"7H3-5H4D0W/CSI-31-Codes","sub_path":"printfile.py","file_name":"printfile.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23575392991","text":"\r\nT = int(input())\r\nfor case in range(T):\r\n vals = input().split()\r\n C,R = int(vals[0]),int(vals[1])\r\n \r\n cake = []\r\n for r in range(C):\r\n cake.append(list(input()))\r\n \r\n letpostions = [(c,r) for r in range(R) for c in range(C) if cake[c][r] != \"?\"]\r\n for letp in letpostions:\r\n initc = letp[0]\r\n initr = letp[1]\r\n endc = C-1\r\n endr = R-1\r\n startc = 0\r\n startr = 0\r\n\r\n for c in range(initc+1,C):\r\n if cake[c][initr] != \"?\":\r\n endc = c-1\r\n break\r\n \r\n for c in range(initc-1,-1,-1):\r\n if cake[c][initr] != \"?\":\r\n startc = c+1\r\n break\r\n \r\n def check_col(r):\r\n for c in range(startc,endc+1):\r\n if cake[c][r] != \"?\":\r\n return False\r\n return True\r\n \r\n for r in range(initr+1,R):\r\n if not check_col(r):\r\n endr = r-1\r\n break\r\n \r\n for r in range(initr-1,-1,-1):\r\n if not check_col(r):\r\n startr = r+1\r\n break\r\n \r\n for c in range(startc,endc+1):\r\n for r in range(startr,endr+1):\r\n cake[c][r] = cake[initc][initr]\r\n \r\n cakestr = \"\\n\".join(\"\".join(l) for l in cake)\r\n print(\"Case #{}:\\n{}\".format(case+1,cakestr))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_203/378.py","file_name":"378.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23618096741","text":"# -*- coding: utf-8 -*-\nimport sys\nnfh = sys.argv[0][:-3];\nnfhin = nfh + \".in\"; nfhout= nfh + \".out\";\nfin = open(nfhin, \"r\"); fout= open(nfhout,\"w\");\ndef rl(): global fin; return fin.readline()[:-1];\ndef out(cnum, outs): global fout; fout.write(\"\".join([\"Case #\",str(cnum),\": \",str(outs),\"\\n\"]));\ndef end(): global fout; fout.close();\n\ndef analyse(l, opposed, transform):\n if len(l) > 1:\n x = l[-1]\n a = l[-2]\n b = l[-1]\n if a+b in transform:\n l = l[:-2]\n l.append(transform[a+b])\n elif b+a in transform:\n l = l[:-2]\n l.append(transform[b+a])\n elif (x in opposed) and len(opposed[x].intersection(set(l[:-1])))>0:\n l = []\n\n return l\n\n# --- program ---\ndef main(p):\n cels = []\n dels = []\n i = 0\n c = int(p[i])\n for cc in xrange(c):\n i+=1\n cels.append(p[i])\n i+=1\n d = int(p[i])\n for dd in xrange(d):\n i+=1\n dels.append(p[i])\n els = p[-1]\n\n #prepare\n opposed = dict()\n for x in dels:\n a = x[0]\n b = x[1]\n if a not in opposed:\n opposed[a] = set()\n if b not in opposed:\n opposed[b] = set()\n opposed[b].add(a)\n opposed[a].add(b)\n\n transform = dict()\n for x in cels:\n a = x[0]\n b = x[1]\n c = x[2]\n transform[a+b] = c\n transform[b+a] = c\n\n l = []\n for x in els:\n l.append(x)\n oldl = list(l)\n while True:\n l = analyse(l, opposed, transform)\n if l == oldl:\n break\n oldl = list(l)\n\n return repr(l).replace(\"'\",\"\")\n\n# --- test configuration ---\nNT = int(rl());\nfor T in xrange(1, NT+1):\n param = rl().split(\" \")\n res = main(param)\n out(T,res)\nend()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/537.py","file_name":"537.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71071967876","text":"# Import libraries\nimport pandas as pd\nimport numpy as np\nimport datetime\n\n# Import listings csv\ndata = pd.read_csv('listings.csv')\n\n# Drop uneeded columns\ndata = data.drop(['host_name','neighbourhood_group'], axis=1)\n\n# Make object variables categorical\ncols = ['neighbourhood','room_type']\nfor col in cols:\n data[col] = data[col].astype('category')\n\n# Change last review to date type\ndata.last_review = pd.to_datetime(data.last_review)\n\n# Make a column to know what day the data was downloaded - probably a better way to track this\ndata['date_of_data_download'] = datetime.date.today()\ndata.date_of_data_download = pd.to_datetime(data.date_of_data_download)\n\n# Make a column to track days since last review\ndata['days_since_last_review'] = data.date_of_data_download - data.last_review\n\n# Export cleaned dataframe to csv\ndata.to_csv('listings_cleaned.csv',index=False)\n","repo_name":"dhgrainger/rental_predictor","sub_path":"src/etl/etl_listings.py","file_name":"etl_listings.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71110005953","text":"from TriggerMenuMT.L1.Base.L1MenuFlags import L1MenuFlags\nfrom TriggerMenuMT.L1.Base.Limits import Limits\n\ndef print_available():\n import logging\n defineMenu()\n available = list(set(range(Limits.MaxTrigItems-3)) - set(L1MenuFlags.CtpIdMap.value.values()) - set([508]))\n freeItems = Limits.MaxTrigItems - len(L1MenuFlags.items.value) # correct for ZB and CALREQ items\n floatingItems = sorted(list(set(L1MenuFlags.items.value) - set(L1MenuFlags.CtpIdMap.value.keys()))) # these items get their CTPID assigned automatically\n unusedItemsWithCTPID = set(L1MenuFlags.CtpIdMap.value.keys()) - set(L1MenuFlags.items.value) # this should be empty, otherwise remove the items from the CtpIdMap\n available.sort()\n logging.info(\"There are %d available CTP IDs: %s\", len(available), \",\".join(map(str,available)))\n logging.info(\"IDs >= 472 go in partition 2, IDs >= 492 go in partition 3\")\n logging.info(\"There are %d free items\", freeItems)\n logging.info(\"There are %d floating items: %s\", len(floatingItems), \",\".join(map(str,floatingItems)))\n logging.info(\"There are %d unused items with CTP ID: %s\", len(unusedItemsWithCTPID), \",\".join(map(str,unusedItemsWithCTPID)))\n\n\ndef defineMenu():\n\n L1MenuFlags.CTPVersion = 4 # new CTP\n\n L1MenuFlags.BunchGroupPartitioning = [1, 15, 15] # partition 1: 1-10, partition 2: empty (was 14), partition 3: 15 (note that BGRP0 is used by all items)\n L1MenuFlags.BunchGroupNames = ['BCRVeto', 'Paired', 'CalReq', 'Empty', \n 'IsolatedUnpaired', 'NonIsolatedUnpaired', 'EmptyAfterPaired', 'InTrain', \n 'AbortGapNotCalReq', 'VdM', 'ALFA', 'EmptyBeforePaired',\n 'EmptyAndPaired']\n\n L1MenuFlags.MenuPartitioning = [0, 472, 492] # partition 1: ctpid 0-471, partition 2: ctpid 472-491, partition 3: ctpid 492-511\n\n # Define one item per CTP connector\n L1MenuFlags.items = [\n # Direct CTP inputs\n 'L1_CTPCAL_Thresholds',\n 'L1_NIM1_Thresholds',\n 'L1_NIM2_Thresholds',\n\n # Legacy connectors\n 'L1_EM1_Thresholds',\n 'L1_EM2_Thresholds',\n #\n 'L1_TAU1_Thresholds',\n 'L1_TAU2_Thresholds',\n #\n 'L1_JET1_Thresholds',\n 'L1_JET2_Thresholds',\n #\n 'L1_EN1_Thresholds',\n 'L1_EN2_Thresholds',\n\n # Require ZB(_EM15) threshold\n 'L1_ZB',\n ]\n\n # CTP ID 509-511 are reserved for CALREQ\n L1MenuFlags.CtpIdMap = {\n # to be used to hardcode CTP IDs for specific items\n # NB: 508 is reserved for the zero bias trigger, and 509-511 for the CALREQ triggers (at the moment, ATR-22654)\n\n # High-frequency counters fixed to consecutive CTP IDs\n # 8 items with the high frequency per-bunch monitoring counters (HF:111)\n # should be in consecutive cpid, starting a ctpid number with ctpid%16 = 0\n # ATR-23836\n \"L1_BCM_AC_UNPAIRED_ISO\":480,\n \"L1_BCM_CA_UNPAIRED_ISO\":481,\n \"L1_J12\":482,\n \"L1_MBTS_1\":483,\n \"L1_MBTS_2\":484,\n \"L1_MBTS_1_1\":485,\n \"L1_BCM_2A_UNPAIRED_ISO\":486,\n \"L1_BCM_2C_UNPAIRED_ISO\":487,\n #\n \"L1_ZB_eEM18\": 508\n }\n\n\nif __name__ == \"__main__\": print_available()\n\n\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Trigger/TriggerCommon/TriggerMenuMT/python/L1/Menu/Menu_AllCTPIn_run3_v1.py","file_name":"Menu_AllCTPIn_run3_v1.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17514779951","text":"import torch\nfrom torch import nn\nfrom MoE_alpha import MoE\n\nmoe = MoE(\n dim = 4,\n num_experts = 8, # increase the experts (# parameters) of your model without increasing computation\n hidden_dim = 4 * 4, # size of hidden dimension in each expert, defaults to 4 * dimension\n activation = nn.LeakyReLU, # use your preferred activation, will default to GELU\n second_policy_train = 'random', # in top_2 gating, policy for whether to use a second-place expert\n second_policy_eval = 'random', # all (always) | none (never) | threshold (if gate value > the given threshold) | random (if gate value > threshold * random_uniform(0, 1))\n second_threshold_train = 0.2,\n second_threshold_eval = 0.2,\n capacity_factor_train = 1.25, # experts have fixed capacity per batch. we need some extra capacity in case gating is not perfectly balanced.\n capacity_factor_eval = 2., # capacity_factor_* should be set to a value >=1\n loss_coef = 1e-2 # multiplier on the auxiliary expert balancing auxiliary loss\n)\n\ninputs = torch.randn(2, 4, 4)\nout, aux_loss = moe(inputs) # (4, 1024, 512), (1,)","repo_name":"Luowaterbi/DrugPPP","sub_path":"Test/test_alpha.py","file_name":"test_alpha.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16907711516","text":"import redis\nimport logging\nimport pickle\nimport json\nimport aioredis\nfrom app.settings import REDIS_HOST, REDIS_PORT\n\n\nlog = logging.getLogger(__name__)\n\n\nclass CacheGateway:\n redis_host = REDIS_HOST\n redis_port = REDIS_PORT\n redis_password = \"\"\n queue = None\n pool = None\n\n def __init__(self):\n self.queue = redis.StrictRedis(\n host=self.redis_host,\n port=self.redis_port,\n password=\"\",\n decode_responses=True,\n )\n\n async def get_pool(self):\n self.pool = await aioredis.create_redis_pool(f'redis://{self.redis_host}:{self.redis_port}')\n\n async def subscribe(self, key):\n return await self.pool.subscribe(key)\n\n def publish_message(self, key, message):\n log.debug(f\"publishing in {key}: {message}\")\n return self.queue.publish(key, json.dumps(message))\n\n async def close(self):\n self.queue.close()\n if self.pool:\n self.pool.close()\n await self.pool.wait_closed()\n\n def get(self, key):\n value = self.queue.get(key)\n if isinstance(value, dict):\n formatted_value = json.dumps(value)\n else:\n formatted_value = value\n return formatted_value\n\n def set(self, key, value, expires=None):\n if isinstance(value, dict):\n formatted_value = json.dumps(value)\n else:\n formatted_value = value\n\n self.queue.set(key, formatted_value)\n if expires:\n self.queue.expire(key, expires)\n\n def delete(self, key):\n if self.queue.get(key):\n self.queue.delete(key)\n\n def lset(self, key, value):\n self.queue.lpush(key, value)\n\n def lget(self, key):\n _list = []\n if self.queue.llen(key) == 0:\n return _list\n while self.queue.llen(key) != 0:\n _list.append(self.queue.lpop(key))\n self.queue.lpush(key, *_list)\n return _list\n","repo_name":"zilohumberto/movie_score_python","sub_path":"app/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11536478156","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom datetime import date\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.db.models import Count\nfrom django.db.models.functions import TruncMonth\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.views.generic import TemplateView\n\nfrom accounts.models import Provider, Consumer, Account, OPTED_OUT, Colaborator, SignupProcess\nfrom core.forms.password import PasswordForm\nfrom core.forms.profile import ProfileForm\nfrom core.forms.signup import SignUpForm\nfrom intercoop.models import IntercoopEntity, IntercoopAccount\nfrom simple_bpm.models import ProcessStep\n\n\nclass dashboard(TemplateView):\n template_name = 'dashboard/home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n context['year'] = date.today().year\n return context\n\n\nclass accounts(TemplateView):\n template_name = 'dashboard/accounts.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n\n year = date.today().year\n provider_signups = Provider.objects.filter(registration_date__year=year)\n consumer_signups = Consumer.objects.filter(registration_date__year=year)\n account_optedout = Account.objects.filter(status=OPTED_OUT, opted_out_date__year=year)\n\n context['provider_signups'] = provider_signups.count()\n context['consumer_signups'] = consumer_signups.count()\n context['account_optedout'] = account_optedout.count()\n context['num_accounts'] = Account.objects.active().count()\n context['num_providers'] = Provider.objects.active().count()\n context['num_consumers'] = Consumer.objects.active().count()\n context['num_special'] = Colaborator.objects.active().count()\n\n context['year'] = year\n context['signups'] = Account.objects.active().filter(registration_date__year=year).annotate(month=TruncMonth('registration_date')).values('month').annotate(total=Count('id')).values('month', 'total')\n\n return context\n\n\nclass signups(TemplateView):\n template_name = 'dashboard/signups.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n year = date.today().year\n\n signups = SignupProcess.objects.pending().filter(last_update__year=year, cancelled=False)\n signup_statuses = {}\n for signup in signups:\n if signup.workflow.current_state.pk in signup_statuses:\n signup_statuses[signup.workflow.current_state.pk]['count'] += 1\n else:\n signup_statuses[signup.workflow.current_state.pk] = {\n 'step': signup.workflow.current_state,\n 'count': 1\n }\n\n context['signups'] = signups\n context['closed_signups'] = SignupProcess.objects.filter(last_update__year=year, cancelled=False, workflow__completed=True).count()\n context['cancelled_signups'] = SignupProcess.objects.filter(last_update__year=year, cancelled=True).count()\n context['signup_statuses'] = signup_statuses\n\n return context\n\n\nclass intercoop(TemplateView):\n template_name = 'dashboard/intercoop.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data()\n year = date.today().year\n\n total_accounts = IntercoopAccount.objects.all().count()\n new_accounts = IntercoopAccount.objects.filter(registration_date__year=year).count()\n entities = []\n for entity in IntercoopEntity.objects.all():\n intercoop_accounts = IntercoopAccount.objects.filter(entity=entity).count()\n entities.append({\n 'entity': entity,\n 'accounts_count': intercoop_accounts\n })\n print(entities)\n context['total_accounts'] = total_accounts\n context['new_accounts'] = new_accounts\n context['entities'] = entities\n\n return context","repo_name":"Mercado-Social-de-Madrid/gestionMES","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"23557345781","text":"def tidy(n):\n prev = n[0]\n for x in n:\n if prev > x:\n return False\n prev = x\n return True\n \nt = int(input())\n\nfor i in range(1, t + 1):\n n = input() \n\n if tidy(n):\n pass\n print(\"Case #{0}: {1}\".format(i, n))\n continue\n\n #check which digit is out of order\n for a in range(0, len(n) - 1):\n if n[a] >= n[a + 1]:\n break \n\n \n if a == 0 and n[a] == '1':\n print(\"Case #{0}: {1}\".format(i, '9'*(len(n)-1)))\n else:\n print(\"Case #{0}: {1}\".format(i, n[:a] + str(int(n[a]) - 1) + len(n[a+1:])*'9'))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3020.py","file_name":"3020.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27163597379","text":"n = int(input(\"Введите количество cтрок из чисел от 1 до 9: \"))\ni = 1\nline = \"\"\nwhile i <= n:\n line = line + str(i)\n print(line)\n i += 11\n\n#\"Лесенка\"\n# Задание\n# По данному числу n выведите лесенку из n ступенек. Кадлая i-я\n# ступень состоит из чисел от 1 до i без пробелов\n#\n# Пример\n# Для n = 4\n# Нужно вывести:\n# 1\n# 12\n# 123\n# 1234\n#\n# Формат входных данных\n# Дано целое число в диапазоне [1, 9]\n#\n# Формат выходных данных\n# Вывести лесенку из чисел.\n","repo_name":"PaulMazuka/python1_specialist","sub_path":"Module3/03_07_practice_IMPORTANT.py","file_name":"03_07_practice_IMPORTANT.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29729184942","text":"from AirSimClient import *\nimport cv2\nimport time\n\n# connect to the AirSim simulator \nclient = CarClient()\nclient.confirmConnection()\ncar_controls = CarControls()\n\nstart = time.time()\n\nprint(\"Time,Speed,Gear,PX,PY,PZ,OW,OX,OY,OZ\")\n\n# monitor car state while you drive it manually.\nwhile (cv2.waitKey(1) & 0xFF) == 0xFF:\n # get state of the car\n car_state = client.getCarState()\n pos = car_state.kinematics_true.position\n orientation = car_state.kinematics_true.orientation\n milliseconds = (time.time() - start) * 1000\n print(\"%s,%d,%d,%f,%f,%f,%f,%f,%f,%f\" % \\\n (milliseconds, car_state.speed, car_state.gear, pos.x_val, pos.y_val, pos.z_val, \n orientation.w_val, orientation.x_val, orientation.y_val, orientation.z_val))\n time.sleep(0.1)\n","repo_name":"nekrald/quadrotor_reinforcement_learning","sub_path":"AirSim/PythonClient/car_monitor.py","file_name":"car_monitor.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23422590231","text":"__author__ = 'iToR'\nimport sys\n\nfile_name = \"B-small-attempt0.in\"\n\ndef computeUseTime(init_cookie_per_sec, parameter, farm_buy):\n cookie_farm_cost = float(parameter[0])\n cookie_farm_income = float(parameter[1])\n target_cookie = float(parameter[2])\n cookie_per_sec = init_cookie_per_sec\n req_time = 0\n for i in range(farm_buy):\n req_time += cookie_farm_cost/cookie_per_sec\n cookie_per_sec += cookie_farm_income\n req_time += target_cookie/cookie_per_sec\n req_time = round(req_time, 7)\n return req_time\n\ndef generateOutput(ans, case):\n ans_file = open(\"answer.txt\", \"a\")\n ans_file.write(\"Case #\" + str(case) + \": \")\n ans_file.write(str(ans) + \"\\n\")\n\nf = open(file_name)\n\ninput_num = int(f.readline())\n\nfor i in range(1,input_num+1):\n input_parameter = f.readline()\n parameter = input_parameter.split()\n\n cookie_farm_cost = float(parameter[0])\n cookie_farm_income = float(parameter[1])\n target_cookie = float(parameter[2])\n\n req_time = target_cookie/2.0\n req_time = round(req_time, 7)\n farm_buy = 1\n while True:\n test_req_time = computeUseTime(2.0, parameter, farm_buy)\n if req_time < test_req_time:\n break\n else:\n req_time = test_req_time\n farm_buy += 1\n generateOutput(req_time, i)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1669.py","file_name":"1669.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23392919531","text":"import itertools\r\n\r\nusableSums = [0, 1, 4];\r\n\r\nclass unique_element:\r\n def __init__(self,value,occurrences):\r\n self.value = value\r\n self.occurrences = occurrences\r\n\r\ndef perm_unique(elements):\r\n eset=set(elements)\r\n listunique = [unique_element(i,elements.count(i)) for i in eset]\r\n u=len(elements)\r\n return perm_unique_helper(listunique,[0]*u,u-1)\r\n\r\ndef perm_unique_helper(listunique,result_list,d):\r\n if d < 0:\r\n yield tuple(result_list)\r\n else:\r\n for i in listunique:\r\n if i.occurrences > 0:\r\n result_list[d]=i.value\r\n i.occurrences-=1\r\n for g in perm_unique_helper(listunique,result_list,d-1):\r\n yield g\r\n i.occurrences+=1\r\n\t\r\ndef rep(s, m):\r\n a, b = divmod(m, len(s))\r\n return s * a + s[:b]\r\n\r\ndef sumSqrDigs(x):\r\n\tsum = 0\r\n\tfor dig in x:\r\n\t\tval = int(dig)\r\n\t\tsum += val*val\r\n\r\n\treturn sum\r\n\t\r\ndef makePalindrome(x, middle=''):\r\n\treturn x + middle + x[::-1]\r\n\t\r\ndef possiblePalindromes(x):\r\n\tglobal usableSums\r\n\t\r\n\tsum = sumSqrDigs(x) * 2\r\n\tif sum < 10:\r\n\t\tyield makePalindrome(x, '')\r\n\t\tfor i, n in enumerate(usableSums):\r\n\t\t\tif sum+n <= 10:\r\n\t\t\t\tyield makePalindrome(x, str(i))\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\r\ndef countInRange(low, high):\r\n\tcount = 0\r\n\tif low <= 1 and high >= 1:\r\n\t\tcount += 1\r\n\tif low <= 4 and high >= 4:\r\n\t\tcount += 1\r\n\tif low <= 9 and high >= 9:\r\n\t\tcount += 1\r\n\tfor ones in range(1,5):\r\n\t\tfor zeros in range(25):\r\n\t\t\tfor dig in range(1, 3):\r\n\t\t\t\ttoTest = str(dig)*ones + '0'*zeros\r\n\t\t\t\tm = makePalindrome(toTest)\r\n\t\t\t\tif int(m)*int(m) > high:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tfor perm in perm_unique([c for c in toTest]):\r\n\t\t\t\t\tif perm[0] is not '0':\r\n\t\t\t\t\t\tfor pal in possiblePalindromes(''.join(str(c) for c in perm)):\r\n\t\t\t\t\t\t\tsquared = int(pal)*int(pal)\r\n\t\t\t\t\t\t\tif squared <= high and squared >= low:\r\n\t\t\t\t\t\t\t\tcount += 1\r\n\r\n\treturn count\r\n\t\r\nfr = open(\"C-small-attempt.in\", 'r')\r\nfw = open(\"output.txt\", 'w')\r\nlines = [line for line in fr]\r\n\r\nnumTests = 0\r\n\r\nfor i, line in enumerate(lines):\r\n\tif i == 0:\r\n\t\tnumTests = int(line)\r\n\telse:\r\n\t\ttest = [a.replace('\\n','') for a in line.split(' ')]\r\n\t\tfw.write(\"Case #%d: %s\\n\" % (i, countInRange(int(test[0]), int(test[1]))))\r\n\t\r\nfr.close()\r\nfw.close()\r\n#print countInRange(100,1000)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/1836.py","file_name":"1836.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23482553021","text":"from code_jam import *\n\n@autosolve\n@collects\ndef solve(tokens):\n done = set()\n input = tokens.next_token(int)\n\n current = input\n for i in range(10000):\n for char in str(current):\n done.add(char)\n if len(done) == 10:\n return current\n current += input\n return \"Insomnia\"","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/3232.py","file_name":"3232.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41391480753","text":"import turtle\n\nt= turtle.Turtle()\nt.speed(0)\nturtle.bgcolor(\"black\")\nt.pencolor(\"white\")\n\nx=0\ny=0\n\nt.penup()\nt.goto(0,200)\nt.pendown()\n\nwhile True:\n t.forward (x)\n t.right (y)\n x= x+3\n y= y+1\n if y== 200:\n break\n t.hideturtle()\n\nturtle.done()\nturtle.exitonclick() ","repo_name":"PragyaSingh-1/DigitalArt-Using-Python","sub_path":"MINI PROJECT/07_CIRCLE_DESIGN/VIBRATE.py","file_name":"VIBRATE.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74583183554","text":"import pygame as pg\nimport pygame_menu\n\nimport random\n\npg.init()\npg.display.set_caption('Flappy Bird')\n# Определение констант\nSCREEN_WIDTH = 480\nSCREEN_HEIGHT = 640\nBIRD_WIDTH = 50\nBIRD_HEIGHT = 50\nPIPE_WIDTH = 80\nPIPE_GAP = 250\nSPEED = 5\nWHITE = (255, 255, 255)\nGREEN = (0, 128, 0)\nYELLOW = (255, 255, 0)\nscreen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npg.display.set_caption('Flappy Bird')\nscore = 0\nfont = pg.font.Font(None, 36) # Шрифт для отображения очков\n\n\ndef draw_score():\n score_text = font.render(f\"Score: {score}\", True, (0, 0, 0))\n screen.blit(score_text, (10, 10))\n\n\nclass Bird(pg.sprite.Sprite):\n def __init__(self):\n super().__init__()\n # self.image = pg.Surface([BIRD_WIDTH, BIRD_HEIGHT])\n # self.image.fill((255, 255, 0))\n self.image = pg.image.load('bird.png')\n self.image = pg.transform.scale(self.image, size=[BIRD_WIDTH, BIRD_HEIGHT])\n self.sprite_copy = self.image\n self.rect = self.image.get_rect(\n center=(SCREEN_WIDTH // 4, SCREEN_HEIGHT // 2)\n )\n self.gravity = 1\n self.lift = -10\n self.velocity = 0\n\n def update(self):\n self.image = pg.transform.rotate(\n self.sprite_copy, -self.velocity)\n self.velocity += self.gravity\n self.rect.y += self.velocity\n if self.rect.top < 0:\n self.rect.top = 0\n self.velocity = 0\n\n if self.rect.bottom > SCREEN_HEIGHT:\n self.rect.bottom = SCREEN_HEIGHT\n self.velocity = 0\n\n def jump(self):\n self.velocity = self.lift\n\n\nclass Pipe(pg.sprite.Sprite):\n TOP = 1\n BOTTOM = 2\n\n def __init__(self, type, gap_start):\n super().__init__()\n self.image = pg.image.load('pipe.png')\n self.image = pg.transform.scale(self.image, [PIPE_WIDTH, self.image.get_height() / 2])\n if type == self.TOP:\n self.image = pg.transform.flip(self.image, False, True)\n self.rect = self.image.get_rect(bottomleft=(SCREEN_WIDTH, gap_start))\n elif type == self.BOTTOM:\n self.rect = self.image.get_rect(topleft=(SCREEN_WIDTH, gap_start + PIPE_GAP))\n self.passed = False\n\n def update(self):\n self.rect.x -= SPEED\n if self.rect.right < 0:\n self.kill()\n\n\nbird = Bird()\npipes = pg.sprite.Group()\n\n\ndef make_pipes():\n gap_start = random.randint(50, SCREEN_HEIGHT - PIPE_GAP - 50)\n top_pipe = Pipe(Pipe.TOP, gap_start)\n bottom_pipe = Pipe(Pipe.BOTTOM, gap_start)\n pipes.add(top_pipe, bottom_pipe)\n\n\nmake_pipes()\n\nclock = pg.time.Clock()\n\n\ndef main():\n global score\n score = 0\n bird.rect.center = (SCREEN_WIDTH // 4, SCREEN_HEIGHT // 2)\n bird.velocity = 0\n pipes.empty()\n make_pipes()\n while True:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n return\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_SPACE:\n bird.jump()\n # global SPEED\n # if event.key == pg.K_RIGHT:\n # SPEED +=10\n # if event.key == pg.K_LEFT:\n # SPEED -=10\n\n pipes.update()\n if pipes.sprites()[-1].rect.x <= SCREEN_WIDTH / 2:\n make_pipes()\n for p in pipes:\n if p.rect.right < bird.rect.left and not p.passed:\n p.passed = True\n score += 0.5\n bird.update()\n\n collisions = pg.sprite.spritecollide(bird, pipes, False)\n if collisions:\n show_end_screen()\n return\n\n screen.fill(WHITE)\n screen.blit(bird.image, bird.rect)\n pipes.draw(screen)\n draw_score()\n pg.display.update()\n clock.tick(40)\n\n\n\ndef show_end_screen():\n end_menu = pygame_menu.Menu('Игра окончена', 300, 400,\n theme=pygame_menu.themes.THEME_BLUE)\n end_menu.add.label(f'Всего очков: {score}', font_size=30)\n end_menu.add.button('Заново', main)\n end_menu.add.button('Выйти', pygame_menu.events.EXIT)\n end_menu.mainloop(screen)\n\n\ndef show_start_screen():\n menu = pygame_menu.Menu('Flappy Bird', 300, 400, theme=pygame_menu.themes.THEME_BLUE)\n menu.add.button('Начать', main)\n menu.add.button('Выйти', pygame_menu.events.EXIT)\n menu.mainloop(screen)\n\n\nif __name__ == '__main__':\n show_start_screen()\n","repo_name":"Norrica/NewPythonCourse","sub_path":"module_2/12. Flappy Bird 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23639116401","text":"\ndef problemB(inputFile,outputFile):\n\tf = open(inputFile,\"r\")\n\tinput = f.readlines()\n\toutput = \"\"\n\n\tfor i, line in enumerate(input[1:]):\n\t\toutput += \"Case #\" + str(i+1) + \": \" + str(parseLine(line)) + \"\\n\"\n\t\n\tg = open(outputFile,\"w\")\n\tg.write(output)\n\tg.close()\n\tf.close()\n\ndef parseLine(line):\n\tnumbers = line.split()\n\tS = int(numbers[1])\n\tp = int(numbers[2])\n\t\n\tcount = 0\n\tfor score in numbers[3:]:\n\t\tscore = int(score)\n\t\tv = highScore(score,p)\n\t\tif v == 1:\n\t\t\tcount += 1\n\t\telif v == 0:\n\t\t\tif S > 0:\n\t\t\t\tcount += 1\n\t\t\t\tS -= 1\n\treturn count\n\n\"\"\"\n\tReturn 1 if p can be obtained naturally\n\tReturn 0 if p can be obtained with the use of a surprising judgement\n\tReturn -1 if p cannot be obtained\n\"\"\"\ndef highScore(score,p):\n\tif p != 0 and score == 0:\n\t\treturn -1\n\telif p == 10 and score < 30:\n\t\treturn -1\n\telif 3*p <= score+2:\n\t\treturn 1\n\telif 3*p <= score+4:\n\t\treturn 0\n\telse:\n\t\treturn -1\n\n\nproblemB(\"p2-input.txt\",\"p2-output.txt\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72086796675","text":"import random\nimport operator\nimport collections\nimport time\nfrom Poker import *\nimport holdem_calc\n\n# Initialize bankroll amount\n\nbankroll = int(input(\"Welcome to WPT! How much would you like to buy in for?\\n\"))\n\nplay = input(f'Your current bankroll is ${bankroll}. Would you like to play a hand? \\n[1]: Yes\\n[2]: No\\n')\n\nwhile play == \"1\":\n\n deck = card_deck()\n\n # Player input bet amounts and validate each bet amount against bankroll\n print(f'Starting Bankroll: {bankroll}')\n\n ante = budget_validation(\"How much would you like to ante: \", bankroll)\n bankroll -= ante\n hole_bonus_bet = budget_validation(\"How much would you like to bet on the Hole Card Bonus: \", bankroll)\n bankroll -= hole_bonus_bet\n final_bonus_bet = budget_validation(\"How much would you like to bet on the Final Hand Bonus: \", bankroll)\n bankroll -= final_bonus_bet\n\n time.sleep(1)\n\n # Deal hands for player and dealer\n player_hand = []\n deal_player(deck, player_hand)\n dealer_hand = []\n deal_player(deck, dealer_hand)\n\n # Calculate Pre-Flop Win Percentage \n unknown_hand = ['?', '?']\n pre_flop_hands = player_hand + unknown_hand\n\n\n # Show player hand and ask for a raise\n print(f'Player: {player_hand}')\n\n \n # Evaluate hole card bonus result and adjusts bankroll accordingly\n if hole_bonus_bet > 0:\n hole_pay = hole_bonus(player_hand)\n \n if hole_pay > 0:\n bankroll += ((hole_bonus_bet * hole_pay) + hole_bonus_bet)\n time.sleep(2)\n print(f'Hole Bonus Wins ${hole_bonus_bet * hole_pay}')\n time.sleep(1)\n print(f'Post Hole Bonus Bankroll: ${bankroll}')\n time.sleep(1)\n \n # Display Pre-Flop Win Percentage \n print('Calculating Win Percentage')\n win_pct = holdem_calc.calculate(None,False,100,None,pre_flop_hands,False)[1]\n win_pct = format(win_pct, \".2%\")\n print(f'Win Percentage: {win_pct}')\n time.sleep(1.5)\n\n all_in = raise_validation(f'Would you like to raise? \\n[1]: Yes\\n[2]: No\\n', ante, bankroll)\n if all_in ==\"1\":\n time.sleep(1)\n raised = ante * 3\n bankroll -= raised\n else:\n # Reassign ante and raise amounts so player gets paid nothing for folding while still being eligible for bonus payouts\n raised = 0 \n ante = 0\n\n # Show dealer hand after player raises\n print(f'Dealer: {dealer_hand}')\n \n # Calculate Win Percentage \n combo_hands = player_hand + dealer_hand\n win_pct = holdem_calc.calculate(None,False,20000,None,combo_hands,False)[1]\n win_pct = format(win_pct, \".2%\")\n\n # Initialize board and deal board cards\n board = []\n print(f'Player: {player_hand} vs. Dealer: {dealer_hand}: Win Pct: {win_pct} ')\n time.sleep(2)\n deal_game(deck, board, 3)\n time.sleep(3)\n deal_game(deck, board, 1)\n time.sleep(2)\n deal_game(deck, board, 1)\n\n\n # Identify final hands for player and dealer\n final_hand = showdown_hand(player_hand, board)\n dealer_final_hand = showdown_hand(dealer_hand, board)\n time.sleep(2)\n\n # Evaluate Final Hand Bonus\n if final_bonus_bet > 0:\n final_pay = best_hand(final_hand)[\"bonus\"]\n\n if final_pay > 0:\n bankroll += ((final_bonus_bet * final_pay) + final_bonus_bet)\n print(f'{best_hand(final_hand)[\"hand\"]}: Final Hand Bonus Wins ${final_bonus_bet * final_pay}') \n time.sleep(2)\n print(f'Post Final Bonus Bankroll: ${bankroll}')\n time.sleep(2)\n \n # Evaluate winner and corresponing payout\n winner = winning_player(final_hand, dealer_final_hand)\n if winner == \"player\" and all_in == \"1\":\n payout = (ante * 2) + (raised * 2)\n winning_hand = best_hand(final_hand)[\"hand\"]\n winning_showdown = best_hand(final_hand)[\"showdown\"]\n print(f'{winner.capitalize()} wins with a {winning_hand} {winning_showdown}')\n elif winner == \"player\" and all_in != \"2\":\n payout = 0\n winning_hand = best_hand(final_hand)[\"hand\"]\n winning_showdown = best_hand(final_hand)[\"showdown\"]\n print(f'Scared money don\\'t make money. You would have won with a {winning_hand}: {winning_showdown}')\n elif winner == \"dealer\":\n payout = 0\n winning_hand = best_hand(dealer_final_hand)[\"hand\"]\n winning_showdown = best_hand(dealer_final_hand)[\"showdown\"]\n print(f'{winner.capitalize()} wins with a {winning_hand} {winning_showdown}')\n else:\n payout = ante + raised\n winning_hand = best_hand(final_hand)[\"hand\"]\n winning_showdown = best_hand(final_hand)[\"showdown\"]\n print(f'PUSH: {winning_hand} {winning_showdown}')\n\n bankroll += payout\n time.sleep(2)\n\n print(f'Post Hand Complete Bankroll: ${bankroll}')\n time.sleep(1)\n\n if bankroll <= 0:\n play = input(\"You are out of Money! Would you like to re-buy? \\n[1]: Yes\\n[2]: No\\n\")\n if play == \"1\":\n bankroll = int(input(\"How much would you like to re-buy for?\\n\"))\n else:\n play = input(\"Would you like to play another hand? \\n[1]: Yes\\n[2]: No\\n\")\n\n\n\n \"\"\"\n add available budget after each bet\n add warning that bet amount will not allow a player to raise\n write showdown function for poker.py\n \"\"\"","repo_name":"habetyes/WPT-Game","sub_path":"WPT_Refactor.py","file_name":"WPT_Refactor.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30743936208","text":"from django.shortcuts import render\n\nfrom .models import Service, ServiceCategory, Mug\n\n# Create your views here.\ndef muggum(request):\n\n services = Service.objects.filter(online=True).order_by('-energy').all()\n\n #services_categories = ServiceCategory.objects.filter(service_set__in=services).distinct('id').order_by('-energy').all()\n categories = ServiceCategory.objects.all()\n services_categories = []\n for service in services:\n for category in categories:\n if category in service.category.all():\n if category not in services_categories:\n services_categories.append(category)\n\n selection = Service.objects.filter(selection=True, online=True).order_by('-energy').all()\n\n ekip = Mug.objects.all()\n\n context = {\n 'services_categories': services_categories,\n 'selection': selection,\n 'ekip': ekip,\n }\n\n return render(request, 'muggum/muggum.html', context)\n","repo_name":"muggum/community","sub_path":"onepage/muggum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39613716374","text":"from django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"forum\", \"0005_auto_20151119_2224\"),\n ]\n\n state_operations = [\n migrations.CreateModel(\n name=\"TopicFollowed\",\n fields=[\n (\"id\", models.AutoField(verbose_name=\"ID\", serialize=False, auto_created=True, primary_key=True)),\n (\"email\", models.BooleanField(default=False, db_index=True, verbose_name=b\"Notification par courriel\")),\n (\"topic\", models.ForeignKey(to=\"forum.Topic\", on_delete=models.CASCADE)),\n (\n \"user\",\n models.ForeignKey(\n related_name=\"topics_followed\", to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Sujet suivi\",\n \"verbose_name_plural\": \"Sujets suivis\",\n \"db_table\": \"notification_topicfollowed\",\n },\n bases=(models.Model,),\n ),\n ]\n\n operations = [migrations.SeparateDatabaseAndState(state_operations=state_operations)]\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/notification/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"} +{"seq_id":"27795744751","text":"\"\"\"group URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views \n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app01 import views as app01_v\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('home/', app01_v.home),\n path('login/', app01_v.login),\n path('register/', app01_v.register),\n path('app01/ajax/uniqueAccount/', app01_v.uniqueAccount),\n path('logout/',app01_v.logout),\n \n path('search/',app01_v.search),\n # path('Comment/',app01_v.Comment),\n path('index/', app01_v.index),\n path('index/submit/', app01_v.submit), \n path('index/saveHeader/', app01_v.saveHeader),\n path('index/saveSignature/',app01_v.saveSignature),\n path('detailpage//',app01_v.detailpage),\n path('gotodetailpage/',app01_v.gotodetailpage),\n \n# <---------6.28-李鑫--start------>\n path('search/',app01_v.search),\n# <---------6.28-李鑫--end-------->\n # path('gotocomment/',app01_v.gotocomment),\n path('comment/',app01_v.comment),\n \n path('showArticle//',app01_v.showArticle),\n path('showComment//',app01_v.showComment),\n path('gotoShowArticle/',app01_v.gotoShowArticle),\n\n # ------music--------------\n path('music/',app01_v.music),\n path('m1/',app01_v.m1),\n path('m2/',app01_v.m2),\n path('m3/',app01_v.m3),\n path('m4/',app01_v.m4),\n path('m5/',app01_v.m5),\n path('m6/',app01_v.m6),\n path('m7/',app01_v.m7),\n path('m8/',app01_v.m8),\n path('m9/',app01_v.m9),\n\n #------------movie-----------\n path('movie/',app01_v.movie),\n path('moviepages_spider-man/',app01_v.moviepage),\n\n # <------详情页点赞----->\n path('praise_status/',app01_v.praise_status),\n path('praiscounr//', app01_v.praise),\n\n # <-------book--------->\n path('book/',app01_v.book),\n path('book/b1/',app01_v.b1),\n path('book/b2/',app01_v.b2),\n path('book/b3/',app01_v.b3),\n path('book/b4/',app01_v.b4),\n path('book/b5/',app01_v.b5),\n path('book/b6/',app01_v.b6),\n path('book/b7/',app01_v.b7),\n path('book/b8/',app01_v.b8),\n]\n","repo_name":"alanlazzzz/Carrot-forum","sub_path":"胡萝卜论坛/group/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15796475204","text":"# Coyright 2017-2019 Nativepython Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom object_database.web import cells as cells\nfrom object_database.web.CellsTestPage import CellsTestPage\n\n\nclass ClickableText(CellsTestPage):\n def cell(self):\n slot = cells.Slot(0)\n\n return cells.Clickable(\n cells.Subscribed(lambda: f\"You've clicked on this text {slot.get()} times\"),\n lambda: slot.set(slot.get() + 1),\n )\n\n def text(self):\n return \"You should see some text that you can click on to increment a counter.\"\n\n\ndef test_clickable_displays(headless_browser):\n demo_root = headless_browser.get_demo_root_for(ClickableText)\n assert demo_root\n assert demo_root.get_attribute(\"data-cell-type\") == \"Clickable\"\n\n\ndef test_clickable_clicking_works(headless_browser):\n demo_root = headless_browser.get_demo_root_for(ClickableText)\n query = \"{} > *\".format(headless_browser.demo_root_selector)\n subscribed = headless_browser.find_by_css(query)\n assert subscribed.text == \"You've clicked on this text 0 times\"\n demo_root.click()\n headless_browser.wait(2).until(\n headless_browser.expect.text_to_be_present_in_element(\n (headless_browser.by.CSS_SELECTOR, query), \"You've clicked on this text 1 times\"\n )\n )\n\n\nclass LinkText(CellsTestPage):\n def cell(self):\n return cells.Clickable(\n \"www.google.com\", lambda: \"https://www.google.com\", aTarget=\"_blank\"\n )\n\n def text(self):\n return \"You should see some link-text that opens in a new tab or window.\"\n\n\ndef test_linktext_displays(headless_browser):\n demo_root = headless_browser.get_demo_root_for(LinkText)\n assert demo_root\n assert demo_root.get_attribute(\"data-cell-type\") == \"Clickable\"\n query = \"{} > *\".format(headless_browser.demo_root_selector)\n link = headless_browser.find_by_css(query)\n assert link.text == \"www.google.com\"\n assert len(headless_browser.window_handles) == 1\n\n link.click()\n headless_browser.wait(10).until(headless_browser.expect.number_of_windows_to_be(2))\n assert len(headless_browser.window_handles) == 2\n headless_browser.switch_to_window(headless_browser.window_handles[1])\n assert headless_browser.current_url == \"https://www.google.com/\"\n\n\nclass ClickableOverFlex(CellsTestPage):\n def cell(self):\n slot = cells.Slot(0)\n\n return cells.Clickable(\n cells.Highlighted(\n cells.Flex(\n cells.Subscribed(lambda: f\"You've clicked on this text {slot.get()} times\")\n )\n ),\n lambda: slot.set(slot.get() + 1),\n )\n\n def text(self):\n return \"You should see a clickable with highlighted text under it.\"\n","repo_name":"APrioriInvestments/object_database","sub_path":"object_database/web/cells_demo/clickable.py","file_name":"clickable.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"10316689530","text":"counter = 0\ntotalex1 = 0.0\n\nresponse = input(\"Do you want to compute your average scores? (Yes or No)\") \n\nwhile response == \"Yes\":\n counter = counter + 1\n lastname = input( \"Enter Student Last Name\")\n Score1 = float(input(\"Enter Exam Score 1: \"))\n Score2 = float(input(\"Enter Exam Score 2: \"))\n average = (Score1 + Score2)/2\n print(lastname, \"Has Average of\", average)\n totalex1 = totalex1 + Score1\n response = input(\"Do you want to compute your average scores Yes or No\") \n\navgex1 = totalex1 / counter\nprint (\"Number of student: \", counter)\nprint (\"Average exam score 1\", avgex1)","repo_name":"TClayton91/CIS106-Timothy-Clayton","sub_path":"Python/PS7P3.py","file_name":"PS7P3.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"680079259","text":"from collections import defaultdict\nimport csv\nimport gzip\nimport json\n\n\nclass Cfg:\n weather_file = '../../data/weather_output.csv'\n stops_file = '../../data/stopinfo.csv'\n jpId_stops_file = '../../data/routes.txt'\n\n\n# Useful functions -------------------------------------\n\ndef bin_time(dt):\n if dt.hour <= 4:\n return 'early_am'\n elif dt.hour >= 5 and dt.hour <= 12:\n return 'am'\n elif dt.hour >= 13 and dt.hour <= 20:\n return 'pm'\n elif dt.hour >= 21:\n return 'late_pm'\n else:\n raise ValueError('Invalid Hour')\n\n\ndef init_coroutine(coroutine, *args):\n \"\"\"takes a coroutine and optional parameters to be passed, returns a primed\n instance of the coroutine\"\"\"\n c = coroutine() if not args else coroutine(*args)\n next(c)\n return c\n\n\ndef read_csv(fname):\n with open(fname, 'rt', newline='') as f:\n reader = csv.reader(f)\n for row in reader:\n yield row\n\n\ndef csv_writer_coroutine(fname, close_sig='CLOSE'):\n \"\"\"Coroutine. Opens specified file for writing as csv. waits to receive rows\n (lists) by the send() method. File is closed when 'CLOSE' is sent.\n @raises : StopIteration\n \"\"\"\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n while True:\n row = yield\n if row == close_sig:\n f.close()\n break\n writer.writerow(row)\n\n\n# functions and constants to get various data from files ---------\n# DO NOT IMPORT DIRECTLY. Import constants defined in next section.\n\ndef __get_weather_data():\n \"\"\"Returns the weather data in the format of:\n {DD/MM/YYYY: {TIME_BIN: [cloud, rain, temp, wind]}}\n \"\"\"\n weather = defaultdict(lambda: defaultdict(list))\n rows = read_csv(Cfg.weather_file)\n next(rows) # first line is the column headers\n for row in rows:\n weather[row[0]][row[1]] = row[2:]\n\n return weather\n\n\ndef __get_stop_coords():\n \"\"\"Returns a dict of {str(stopId) : [str(lat), str(lon)]}\"\"\"\n stops = read_csv(Cfg.stops_file)\n\n # dict keys of the stopId, value is [lat, lon] list\n d = defaultdict(list)\n\n for r in stops:\n d[str(r[4])] = [str(r[7]), str(r[8])]\n\n return d\n\n\ndef __get_journey_pattern_id_stops():\n \"\"\"Returns a dict of {str(vjId): [ str(stopId) ]\"\"\"\n with open(Cfg.jpId_stops_file) as f:\n routes = json.load(f)\n d = defaultdict(list)\n for jpId, v in routes.items():\n d[str(jpId)] = [str(stopId) for stopId in v['stops']]\n return d\n\n\n# Constants for easy imports ------------------------------\nWEATHER_DATA = __get_weather_data()\nSTOP_COORDS = __get_stop_coords()\nJOURNEY_PATTERN_ID_STOPS = __get_journey_pattern_id_stops()\n","repo_name":"andaca/ETA_BaseTableBuilder","sub_path":"src/csv_parsing/depricated/processing_tools.py","file_name":"processing_tools.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36378103678","text":"# https://www.acmicpc.net/problem/1393\nimport math\n\n\n# 56ms\n\ndef solution():\n xs, ys = map(int, input().split())\n xe, ye, dx, dy = map(int, input().split())\n\n distance = (xs - xe)**2 + (ys - ye)**2\n\n after1SecDistance = (xe + dx - xs)**2 + (ye + dy - ys)**2\n\n if after1SecDistance > distance:\n print(xe, ye)\n elif dx == 0 and dy == 0:\n print(xe, ye)\n else:\n if dy == 0:\n print(xs, ye)\n elif dx == 0:\n print(xe, ys)\n else:\n shortestX = ((dx*dy)/(dy**2 + dx**2)) * \\\n (dx/dy*xs + ys + dy/dx*xe - ye)\n shortestY = (dy/dx)*shortestX - (dy/dx*xe) + ye\n print(int(shortestX), int(shortestY))\n\n# solution()\n\n\n# 틀렸습니다.\n\ndef wrong():\n xs, ys = map(int, input().split())\n xe, ye, dx, dy = map(int, input().split())\n\n distance = (xs - xe)**2 + (ys - ye)**2\n\n after1SecDistance = (xe + dx - xs)**2 + (ye + dy - ys)**2\n\n if after1SecDistance > distance:\n print(xe, ye)\n elif dx == 0 and dy == 0:\n print(xe, ye)\n else:\n if dy == 0:\n print(xs, ye)\n elif dx == 0:\n print(xe, ys)\n else:\n slop = dy/dx\n counterSlop = dx/dy\n shortestX = (1/(slop + counterSlop)) * \\\n (counterSlop*xs + ys + slop*xe - ye)\n shortestY = slop*(shortestX - xe) + ye\n print(int(shortestX), int(shortestY))\n\n\n# 40ms\n\ndef optimize():\n xs, ys = map(int, input().split())\n xe, ye, dx, dy = map(int, input().split())\n\n if dx == 0 and dy == 0:\n print(xe, ye)\n exit()\n\n def getDeg(v1, v2):\n sizeV1 = math.sqrt(v1[0]**2 + v1[1]**2)\n sizeV2 = math.sqrt(v2[0]**2 + v2[1]**2)\n\n # v1*v2\n innerProduct = v1[0]*v2[0] + v1[1]*v2[1]\n\n # |v1|*|v2|\n sizeMultiply = sizeV1 * sizeV2\n\n angleDeg = math.degrees(math.acos(innerProduct/sizeMultiply))\n return angleDeg\n\n angleDeg = getDeg([xs-xe, ys-ye], [dx, dy])\n\n if angleDeg < 90:\n if dy == 0:\n print(xs, ye)\n elif dx == 0:\n print(xe, ys)\n else:\n shortestX = ((dx*dy)/(dy**2 + dx**2)) * \\\n (dx/dy*xs + ys + dy/dx*xe - ye)\n shortestY = (dy/dx)*shortestX - (dy/dx*xe) + ye\n print(int(shortestX), int(shortestY))\n\n else:\n print(xe, ye)\n\n\noptimize()\n","repo_name":"studying-ice-bear/pparkkkimeom","sub_path":"KimChaeJung/math/1393_음하철도구구구.py","file_name":"1393_음하철도구구구.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72417043393","text":"import PySimpleGUI as sg\n\nfrom random import choice\n\n\ndef randomize(text, *texts):\n returned = []\n #print(texts)\n if not texts:\n texts = \"\"\n else:\n texts = \"\".join([el for el in texts])\n all_text = (text + texts).split(\" \")\n while len(all_text) != 0:\n word = choice(all_text)\n returned.append(word)\n all_text.remove(word)\n return \" \".join(returned)\n\n\n\nsg.theme('DarkAmber') # Add a touch of color\n# All the stuff inside your window.\nlayout = [[sg.Text('first text'),sg.Multiline(key=\"input-1\",size=(50,5), tooltip=\"Mettre le premier texte à cut up ici\")],\n [sg.Text('second text'), sg.Multiline(key=\"input-2\", size=(50, 5), tooltip=\"(optionnel) Mettre le second texte à cut up ici\")],\n [sg.Text(\"resultat: \"),sg.Multiline(size=(50,5), key=\"--RES--\")],\n [sg.Button('Cut up'), sg.Button('Cancel')], [sg.Text(\"by Alexandre W\")]]\n\n# Create the Window\nwindow = sg.Window('Window Title',layout, size=(600,600), resizable = True )\n\n\n# Event Loop to process \"events\" and get the \"values\" of the inputs\nwhile True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n elif event == \"Cut up\":\n print(values[\"input-1\"])\n print(values[\"input-2\"])\n print(randomize(values[\"input-1\"],values[\"input-2\"]))\n t = randomize(values[\"input-1\"],values[\"input-2\"])\n window[\"--RES--\"].update(t)\n\n\nwindow.close()","repo_name":"ArtiskOnGit/cut_ups","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71793004034","text":"from pytest import fixture\nimport numpy as np\nimport torch\n\nfrom grad_june.symptoms import SymptomsSampler, SymptomsUpdater\n\n\nclass TestSymptomsSampler:\n @fixture(name=\"input\")\n def test__input(self):\n input = {\n \"stages\": [\n \"recovered\",\n \"susceptible\",\n \"asymptomatic\",\n \"symptomatic\",\n \"critical\",\n \"dead\",\n ],\n \"stage_transition_probabilities\": {\n \"asymptomatic\": {\"0-50\": 1.0, \"50-100\": 0.5},\n \"symptomatic\": {\"0-100\": 0.2},\n \"critical\": {\"0-100\": 0.1},\n },\n \"stage_transition_times\": {\n \"asymptomatic\": {\"dist\": \"LogNormal\", \"loc\": 1.1, \"scale\": 0.2},\n \"symptomatic\": {\"dist\": \"Normal\", \"loc\": 10.2, \"scale\": 3.0},\n \"critical\": {\"dist\": \"LogNormal\", \"loc\": 1.7, \"scale\": 0.5},\n },\n \"recovery_times\": {\n \"asymptomatic\": {\"dist\": \"LogNormal\", \"loc\": 1.2, \"scale\": 0.3},\n \"symptomatic\": {\"dist\": \"LogNormal\", \"loc\": 1.3, \"scale\": 0.5},\n \"critical\": {\"dist\": \"LogNormal\", \"loc\": 1.4, \"scale\": 0.8},\n },\n }\n return input\n\n @fixture(name=\"sp\")\n def make_sp(self, input):\n params = {\"system\": {\"device\": \"cpu\"}, \"symptoms\": input}\n return SymptomsSampler.from_parameters(params)\n\n def test__read_input(self, sp):\n assert sp.stages == [\n \"recovered\",\n \"susceptible\",\n \"asymptomatic\",\n \"symptomatic\",\n \"critical\",\n \"dead\",\n ]\n assert (sp.stage_transition_probabilities[0] == torch.zeros(100)).all()\n assert (sp.stage_transition_probabilities[1] == torch.zeros(100)).all()\n assert (sp.stage_transition_probabilities[2][:50] == 1.0 * torch.ones(50)).all()\n assert (sp.stage_transition_probabilities[2][50:] == 0.5 * torch.ones(50)).all()\n assert (sp.stage_transition_probabilities[3] == 0.2 * torch.ones(100)).all()\n assert (sp.stage_transition_probabilities[4] == 0.1 * torch.ones(100)).all()\n\n assert sp.stage_transition_times[2].__class__.__name__ == \"LogNormal\"\n assert np.isclose(sp.stage_transition_times[2].loc.item(), 1.1)\n assert np.isclose(sp.stage_transition_times[2].scale.item(), 0.2)\n assert np.isclose(\n sp.stage_transition_times[2].mean.item(), np.exp(1.1 + 0.2**2 / 2)\n )\n assert sp.stage_transition_times[3].__class__.__name__ == \"Normal\"\n assert np.isclose(sp.stage_transition_times[3].loc.item(), 10.2)\n assert np.isclose(sp.stage_transition_times[3].scale.item(), 3.0)\n assert np.isclose(sp.stage_transition_times[3].mean.item(), 10.2)\n assert sp.stage_transition_times[4].__class__.__name__ == \"LogNormal\"\n assert np.isclose(sp.stage_transition_times[4].loc.item(), 1.7)\n assert np.isclose(sp.stage_transition_times[4].scale.item(), 0.5)\n assert np.isclose(\n sp.stage_transition_times[4].mean.item(), np.exp(1.7 + 0.5**2 / 2)\n )\n\n assert sp.recovery_times[2].__class__.__name__ == \"LogNormal\"\n assert np.isclose(sp.recovery_times[2].loc.item(), 1.2)\n assert np.isclose(sp.recovery_times[2].scale.item(), 0.3)\n assert np.isclose(sp.recovery_times[2].mean.item(), np.exp(1.2 + 0.3**2 / 2))\n assert sp.recovery_times[3].__class__.__name__ == \"LogNormal\"\n assert np.isclose(sp.recovery_times[3].loc.item(), 1.3)\n assert np.isclose(sp.recovery_times[3].scale.item(), 0.5)\n assert np.isclose(sp.recovery_times[3].mean.item(), np.exp(1.3 + 0.5**2 / 2))\n assert sp.recovery_times[4].__class__.__name__ == \"LogNormal\"\n assert np.isclose(sp.recovery_times[4].loc.item(), 1.4)\n assert np.isclose(sp.recovery_times[4].scale.item(), 0.8)\n assert np.isclose(sp.recovery_times[4].mean.item(), np.exp(1.4 + 0.8**2 / 2))\n\n def test__sample(self, sp):\n ages = torch.tensor([0, 20, 40, 60, 80, 99])\n current_stage = torch.tensor([0, 1, 2, 3, 4, 5])\n next_stage = torch.tensor([0, 1, 3, 4, 5, 5])\n time_to_next_stage = torch.tensor([1.1, 2.5, 0.7, 0.9, 0.1, 0.5])\n\n time = 1.0\n need_to_trans = sp._get_need_to_transition(\n current_stage, time_to_next_stage, time\n )\n assert (need_to_trans == torch.tensor([0, 0, 1, 1, 1, 0]).to(torch.bool)).all()\n\n probability_next_symptomatic_stage = sp._get_prob_next_symptoms_stage(\n ages, next_stage\n )\n assert (\n probability_next_symptomatic_stage\n == torch.tensor([0.0, 0.0, 0.2, 0.1, 0.0, 0])\n ).all()\n\n currents = torch.zeros(6)\n nexts = torch.zeros(6)\n stage_times = torch.zeros(6)\n n = 1000\n for i in range(n):\n current_stage = torch.tensor([0, 1, 2, 3, 4, 5])\n next_stage = torch.tensor([0, 1, 3, 4, 5, 5])\n time_to_next_stage = torch.tensor([1.1, 2.5, 0.7, 0.9, 0.1, 0.5])\n current, next, stage_time = sp.sample_next_stage(\n ages, current_stage, next_stage, time_to_next_stage, time\n )\n currents += current\n nexts += next\n stage_times += stage_time\n\n # Check new current stages\n currents = currents / n\n currents = currents.numpy()\n assert currents[0] == 0\n assert currents[1] == 1\n assert currents[2] == 3\n assert currents[3] == 4\n assert currents[4] == 5\n assert currents[5] == 5\n\n # Check new next stages\n nexts = nexts / n\n nexts = nexts.numpy()\n assert nexts[0] == 0\n assert nexts[1] == 1\n assert np.isclose(nexts[2], 4 * 0.2, rtol=0.1)\n assert np.isclose(nexts[3], 5 * 0.1, rtol=0.1)\n assert nexts[4] == 5\n assert nexts[5] == 5\n\n stage_times = stage_times / n\n stage_times = stage_times.numpy()\n assert np.isclose(stage_times[0], 1.1, rtol=1e-1)\n assert np.isclose(stage_times[1], 2.5, rtol=1e-1)\n expected = 0.7 + 0.2 * 10.2 + 0.8 * np.exp(1.3 + 0.5**2 / 2)\n assert np.isclose(stage_times[2], expected, rtol=1e-1)\n expected = (\n 0.9 + 0.1 * np.exp(1.7 + 0.5**2 / 2) + 0.9 * np.exp(1.4 + 0.8**2 / 2)\n )\n assert np.isclose(stage_times[3], expected, rtol=1e-1)\n assert np.isclose(stage_times[4], 0.1, rtol=1e-1)\n assert stage_times[5] == 0.5\n\n\nclass TestSymptomsUpdater:\n @fixture(name=\"sp\")\n def make_sp(self):\n return SymptomsSampler.from_file()\n\n @fixture(name=\"su\")\n def make_su(self, sp):\n return SymptomsUpdater(sp)\n\n def test__update_symptoms(self, su, data, timer):\n n_agents = len(data[\"agent\"].id)\n data[\"agent\"][\"symptoms\"][\"current_stage\"] = 2 * torch.ones(\n n_agents, dtype=torch.long\n )\n data[\"agent\"][\"symptoms\"][\"next_stage\"] = 3 * torch.ones(\n n_agents, dtype=torch.long\n ) # all infectious\n data[\"agent\"][\"symptoms\"][\"time_to_next_stage\"] = torch.zeros(n_agents)\n symptoms = su(\n data=data, timer=timer, new_infected=torch.zeros(n_agents, dtype=torch.bool)\n )\n assert (\n symptoms[\"current_stage\"] == 3 * torch.ones(n_agents, dtype=torch.long)\n ).all()\n will_recover = len(symptoms[\"next_stage\"][symptoms[\"next_stage\"] == 0])\n assert will_recover < n_agents / 2\n will_symptom = len(symptoms[\"next_stage\"][symptoms[\"next_stage\"] == 4])\n assert will_symptom > n_agents / 2\n assert will_recover + will_symptom == n_agents\n\n def test__dead_stay_dead(self, su, data, timer):\n n_agents = len(data[\"agent\"].id)\n data[\"agent\"][\"symptoms\"][\"current_stage\"] = 6 * torch.ones(\n n_agents, dtype=torch.long\n )\n data[\"agent\"][\"symptoms\"][\"next_stage\"] = 7 * torch.ones(\n n_agents, dtype=torch.long\n ) # all infectious\n data[\"agent\"][\"symptoms\"][\"time_to_next_stage\"] = torch.zeros(n_agents)\n for i in range(100):\n symptoms = su(\n data=data,\n timer=timer,\n new_infected=torch.zeros(n_agents, dtype=torch.bool),\n )\n assert (\n symptoms[\"current_stage\"] == 7 * torch.ones(n_agents, dtype=torch.long)\n ).all()\n next(timer)\n\n def test__symptoms_differentiable(self, su, data, timer):\n # increase mortality for the test\n su.symptoms_sampler.stage_transition_probabilities[2:, :] = torch.ones(\n su.symptoms_sampler.stage_transition_probabilities[2:, :].shape\n )\n beta = torch.nn.Parameter(torch.tensor(10.0))\n probs = 1 - torch.exp(-beta) * torch.ones(data[\"agent\"].id.shape)\n new_infected = torch.nn.functional.gumbel_softmax(probs, tau=0.1, hard=True)\n symptoms = su(data=data, timer=timer, new_infected=new_infected)\n new_infected_2 = torch.zeros(new_infected.shape)\n for _ in range(100):\n next(timer)\n symptoms = su(data=data, timer=timer, new_infected=new_infected_2)\n deaths = symptoms[\"current_stage\"][symptoms[\"current_stage\"] == 7]\n assert len(deaths) > 0\n total_deaths = deaths.sum()\n total_deaths.backward()\n assert deaths.requires_grad\n assert symptoms[\"current_stage\"].requires_grad\n assert symptoms[\"time_to_next_stage\"].requires_grad\n assert symptoms[\"next_stage\"].requires_grad\n","repo_name":"arnauqb/GradABM-JUNE","sub_path":"test/unit/test_symptoms.py","file_name":"test_symptoms.py","file_ext":"py","file_size_in_byte":9632,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"1287387106","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated, AllowAny, IsAdminUser\nfrom .models import PaymentTransaction, Wallet\nfrom .mpesa import sendSTK\nfrom .sms import *\nimport json\nfrom rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST\nfrom .signals import updateAvailableBalance, sendSMSReceipt\nfrom commons.smsutils import *\nfrom django.http import JsonResponse\nfrom commons.app_constants import *\nfrom orders.orderutils import update_order_status\n\n\n# Create your views here.\nclass SubmitView(APIView):\n permission_classes = [\n AllowAny,\n ]\n\n def post(self, request):\n data = request.data\n phone_number = data['phone_number']\n amount = data['amount']\n orderId = data['order_id']\n\n print(phone_number)\n print(amount)\n sendSTK(phone_number, amount, orderId)\n # b2c()\n message = {\"status\": \"ok\"}\n return Response(message, status=HTTP_200_OK)\n\n\nclass CheckTransaction(APIView):\n permission_classes = [\n AllowAny,\n ]\n\n def post(self, request):\n data = request.data\n trans_id = data['transaction_id']\n try:\n transaction = PaymentTransaction.objects.filter(id=trans_id).get()\n if transaction:\n return JsonResponse({\n \"message\": \"ok\",\n \"finished\": transaction.isFinished,\n \"successful\": transaction.isSuccessFull\n },\n status=200)\n else:\n # TODO : Edit order if no transaction is found\n return JsonResponse({\n \"message\": \"Error. Transaction not found\",\n \"status\": False\n },\n status=400)\n except PaymentTransaction.DoesNotExist:\n return JsonResponse({\n \"message\": \"Server Error. Transaction not found\",\n \"status\": False\n },\n status=400)\n\n\nclass RetryTransaction(APIView):\n permission_classes = [\n AllowAny,\n ]\n\n def post(self, request):\n trans_id = request.data['transaction_id']\n try:\n transaction = PaymentTransaction.objects.filter(id=trans_id).get()\n if transaction and transaction.isSuccessFull:\n return JsonResponse({\n \"message\": \"ok\",\n \"finished\": transaction.isFinished,\n \"successful\": transaction.isSuccessFull\n },\n status=200)\n else:\n response = sendSTK(\n phone_number=transaction.phone_number,\n amount=transaction.amount,\n orderId=transaction.order_id,\n transaction_id=trans_id)\n return JsonResponse({\n \"message\": \"ok\",\n \"transaction_id\": response\n },\n status=200)\n\n except PaymentTransaction.DoesNotExist:\n return JsonResponse({\n \"message\": \"Error. Transaction not found\",\n \"status\": False\n },\n status=400)\n\n\nclass ConfirmView(APIView):\n permission_classes = [\n AllowAny,\n ]\n\n def post(self, request):\n # save the data\n print(\"confirm view reached\")\n request_data = json.dumps(request.data)\n request_data = json.loads(request_data)\n body = request_data.get('Body')\n print(\"Body \" + json.dumps(body))\n resultcode = body.get('stkCallback').get('ResultCode')\n # Perform your processing here e.g. print it out...\n if resultcode == 0:\n print('Payment successful')\n requestId = body.get('stkCallback').get('CheckoutRequestID')\n transaction = PaymentTransaction.objects.get(\n checkoutRequestID=requestId)\n if transaction:\n transaction.isFinished = True\n transaction.isSuccessFull = True\n transaction.save()\n wallet = None\n try:\n wallet = Wallet.objects.get(\n phone_number=transaction.phone_number)\n if not wallet:\n wallet = Wallet.objects.create(\n phone_number=transaction.phone_number)\n wallet.actual_balance += transaction.amount\n wallet.save()\n\n except Wallet.DoesNotExist:\n wallet = Wallet.objects.create(\n phone_number=transaction.phone_number)\n wallet.actual_balance += transaction.amount\n wallet.save()\n\n updateAvailableBalance.send(sender=Wallet, wallet=wallet.id)\n sendSMSReceipt.send(\n sender=Wallet,\n message=build_receipt(transaction.amount),\n phone_number=\"+{}\".format(transaction.phone_number))\n\n else:\n print('unsuccessfull')\n requestId = body.get('stkCallback').get('CheckoutRequestID')\n transaction = PaymentTransaction.objects.get(\n checkoutRequestID=requestId)\n if transaction:\n transaction.isFinished = True\n transaction.isSuccessFull = False\n transaction.save()\n\n # Prepare the response, assuming no errors have occurred. Any response\n # other than a 0 (zero) for the 'ResultCode' during Validation only means\n # an error occurred and the transaction is cancelled\n message = {\n \"ResultCode\": 0,\n \"ResultDesc\": \"The service was accepted successfully\",\n \"ThirdPartyTransID\": \"1237867865\"\n }\n\n # Send the response back to the server\n return Response(message, status=HTTP_200_OK)\n\n def get(self, request):\n return Response(\"Confirm callback\", status=HTTP_200_OK)\n\n\nclass ValidateView(APIView):\n permission_classes = [\n AllowAny,\n ]\n\n def post(self, request):\n # save the data\n request_data = request.data\n\n # Perform your processing here e.g. print it out...\n print(\"validate data\" + request_data)\n\n # Prepare the response, assuming no errors have occurred. Any response\n # other than a 0 (zero) for the 'ResultCode' during Validation only means\n # an error occurred and the transaction is cancelled\n message = {\n \"ResultCode\": 0,\n \"ResultDesc\": \"The service was accepted successfully\",\n \"ThirdPartyTransID\": \"1234567890\"\n }\n\n # Send the response back to the server\n return Response(message, status=HTTP_200_OK)\n","repo_name":"Vorane/Pigeon-api","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27266468293","text":"from google.cloud.resourcemanager_v3 import (\n FoldersClient,\n OrganizationsClient,\n ProjectsClient,\n)\n\n\ndef get_organization_id():\n # Gets organization ID - service account must only have rights to a single org\n # Requires Organization Viewer role at Organization level\n organizations_client = OrganizationsClient()\n orgs = organizations_client.search_organizations()\n for org in orgs:\n return org.name\n\n return \"\"\n\n\ndef list_folders(parent_id):\n # Lists folders under a parent - requires Folder Viewer role at Organization level\n folders_client = FoldersClient()\n folders = folders_client.list_folders(parent=parent_id)\n folder_list = [f.name for f in folders]\n\n return folder_list\n\n\ndef list_projects(parent_id):\n # Lists projects under a parent - requires Folder Viewer role at Organization level\n projects_client = ProjectsClient()\n projects = projects_client.list_projects(parent=parent_id)\n project_list = [p.project_id for p in projects if not p.project_id.startswith(\"sys-\")]\n\n return project_list\n\n\ndef list_all_projects():\n # Get organization ID\n org_id = get_organization_id()\n\n # Get all the project IDs at the organization level\n all_projects = list_projects(org_id)\n\n # Now retrieve all the folders directly under the organization\n folder_ids = list_folders(org_id)\n\n # Make sure that there are actually folders under the org\n if len(folder_ids) == 0:\n return all_projects\n\n # Start iterating over the folders\n while folder_ids:\n # Get the last folder of the list\n current_id = folder_ids.pop()\n\n # Get subfolders and add them to the list of folders\n subfolders = list_folders(current_id)\n\n if subfolders:\n folder_ids.extend(f for f in subfolders)\n\n # Get the projects under that folder\n projects_under_folder = list_projects(current_id)\n\n # Add projects if there are any\n if projects_under_folder:\n all_projects.extend(p for p in projects_under_folder)\n\n # Finally, return all the projects\n return all_projects\n","repo_name":"domain-protect/domain-protect-gcp","sub_path":"manual-scans/utils_gcp.py","file_name":"utils_gcp.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"61"} +{"seq_id":"71020476354","text":"import numpy as np\nimport PlotSummaryDataRobinson as psdr\nimport OutputRobinsonDataExcel as orde\nimport RobinsonCode as rc\nimport generating_probs as gen_prob\nimport lhs_sim as lhsSim\n\n# ExampleUsingRobinsonCodeNew\n\n# this sets the name of the outp[ut file. It stores the data for each of\n# your tests so you'll need to call it something that indicates what\n# experiment it is\n# output_file='RobinsonTestExperimentTest1.mat'\noutput_file_xls = 'RobinsonTestExperimentTest1_LHS.xlsx'\n\n# mean time to get between each nest\ntime_means = np.array([[1, 36, 143], [36, 1, 116], [143, 116, 1]]) #The paper mentions that ants tend to reject the low-quality nest and will continue searching for the good quality nest even if its far.\n#time_means = np.array([[1, 50, 50], [50, 1, 50], [50, 50, 1]])\n\n# these parameters are for the first experiment\n#\n# probabilities of visiting each site from each other\nprobs = gen_prob.compute_probs(time_means,0.2)\n#probs = np.array([[0.91, 0.15, 0.03], [0.06, 0.8, 0.06], [0.03, 0.05, 0.9]])\n#probstt = np.array([[0.92, 0.15, 0.08], [0.03, 0.83, 0.05], [0.04, 0.08, 0.86]]) #The mean time from site 1 - site 0 is way less than site1-site2 then why is the prob of going to both of them from site1 same? (The site1-site0 prob should be greater implement and check (The error increases and a high number of ants select site 1 instead of site 2))\n#probst = np.array([[np.round(np.random.uniform(0.91,0.99,1)[0],2), np.round(np.random.uniform(0.1,0.19,1)[0],2), np.round(np.random.uniform(0.02,0.09,1)[0],2)], [np.round(np.random.uniform(0.03,0.09,1)[0],2), np.round(np.random.uniform(0.8,0.9,1)[0],2), np.round(np.random.uniform(0.03,0.09,1)[0],2)], [np.round(np.random.uniform(0.03,0.09,1)[0],2), np.round(np.random.uniform(0.05,0.09,1)[0],2), np.round(np.random.uniform(0.8,0.99,1)[0],2)]])\n#print(probst)\n#print(probs)\n#probs = np.array([[0.91, 0.5, 0.5], [0.5, 0.8, 0.5], [0.5, 0.5, 0.91]]) #Ants have equal prob to visit sites.\n\n# standard deviation of time to get between each nest\ntime_stddevs = time_means / 5\n\n# mean quality of each nest. Note home is -infinity so it never gets picked\nquals = np.array([-np.inf, 3, 6]) #The ants will always select site 1 if the probability of visiting from site 1 to site 2 is same and site 2 has significantly good quality than site 1\n\n# standard deviation of quality: essentially this controls\n# how variable the ants assessment of each nest is. This is currently set\n# as in the 1st experiment where the variability is the same for each nest\nqual_stddev = np.array([1, 1, 1])\n# However, if you want to change is so nests perceived w different accuracy\n# you could do eg qual_stddev = [1, 1, 4]\n\n# set the number of ants\nn = 27\n\n# these govern the ant's threshold\nthreshold_mean = 5\nthreshold_stddev = 1\n\ncurrent_time, discovers, visits, accepts, Ants, rnd_seed, antsSelectedOne, antSelectedTwo, preqtimes, preqdiscovers, preqvisits, preqaccepts = \\\n rc.RobinsonCode(n, quals, probs, threshold_mean, threshold_stddev, qual_stddev, time_means, time_stddevs, [], [])\n\n# note: I have changed the order of saving data and plotting, as the matplotlib figure was blocking execution as long\n# as it was open\n\n# save the data as matlab variables - NOT IMPLEMENTED IN PYTHON\n# save(output_file)\n\n# save some of the data as excel\norde.OutputRobinsonDataExcel(output_file_xls, Ants, current_time, accepts, discovers, visits)\n\n# Plot Summary data\npsdr.PlotSummaryDataRobinson(current_time, accepts, discovers, visits, Ants)\n\nprint(\"Ants that selected the poor site \", antsSelectedOne)\n\nif (len(antsSelectedOne)!=0):\n\tprint(\"Do you want them to move to the better nest (y/n)?\")\n\tans = input()\n\tif(ans == 'y'):\n\t\tcurrent_time, discovers, visits, accepts, Ants, rnd_seed, preqtimes, preqdiscovers, preqvisits, preqaccepts = \\\n\t\t\tlhsSim.lhsSim(len(antsSelectedOne), quals, probs, threshold_mean, threshold_stddev, qual_stddev, time_means, time_stddevs, [], [])\n\t\tpsdr.PlotSummaryDataRobinson(current_time, accepts, discovers, visits, Ants)\n\n\telse:\n\t\tprint(\"Ants do not leave their own behind!\")\n\n\n\"\"\"ants_site1 = []\nants_site2 = []\nratio = []\nfor i in range(3):\n\tcurrent_time, discovers, visits, accepts, Ants, rnd_seed, preqtimes, preqdiscovers, preqvisits, preqaccepts = \\\n\t\trc.RobinsonCode(n, quals, probs, threshold_mean, threshold_stddev, qual_stddev, time_means, time_stddevs, [], [])\n\n\t#psdr.PlotSummaryDataRobinson(current_time, accepts, discovers, visits, Ants)\n\n\tfor i in range(len(accepts)):\n\t\tif (accepts[i] == 1):\n\t\t\tants_site1.append(accepts[i])\n\t\telse:\n\t\t\tants_site2.append(accepts[i])\n\nratio.append(len(ants_site2)/len(ants_site1))\"\"\"\n","repo_name":"piyushag-git/sentient_ants","sub_path":"ExampleUsingRobinsonCode.py","file_name":"ExampleUsingRobinsonCode.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18599202279","text":"import sys\n\nN = int(sys.stdin.readline()) #N<=20\n\ndamage = []\nhappy = []\n\n\ndamage = list(map(int,sys.stdin.readline().split(\" \"))) #0<= <=100\nhappy = list(map(int,sys.stdin.readline().split(\" \"))) #0<= <=100\n\n\n#\n# def Brute_Force(start, energy,point,depth):\n# global max_happy\n# visited[start] = True\n# if max_happy < point:\n# max_happy = point\n# for i in range(N):\n# if visited[i] == False and energy - damage[i] > 0:\n# Brute_Force(i,energy - damage[i],point + happy[i],depth+1)\n#\n#\n#\n#\n# max_happy = 0\n#\n# for i in range(N):\n# visited = [False] * N\n# if 100 - damage[i] > 0:\n# Brute_Force(i,100-damage[i],happy[i],0)\n# print(max_happy)\n\n\nDP = [[0 for i in range(100)] for i in range(N+1)]\nfor i in range(1,N+1):\n for k in range(1,100):\n if k < damage[i-1]:\n DP[i][k] = DP[i-1][k]\n else:\n DP[i][k] = max(DP[i-1][k] , happy[i-1] + DP[i-1][k-damage[i-1]])\n\nprint(DP[-1][-1])","repo_name":"Andrevile/Algorithm","sub_path":"BOJ PS/No.1535 안녕.py","file_name":"No.1535 안녕.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23644598621","text":"fob = open('c:/Users/Nemanja/Documents/python/Recycled Numbers/C-small-attempt0.in', 'r');\r\ncases = int(fob.readline());\r\noutput = [];\r\nfor case in range(1,cases+1):\r\n line = fob.readline();\r\n line = line.split();\r\n a = int(line[0]);\r\n b = int(line[1]);\r\n lenght = len(str(a));\r\n counter = 0;\r\n for n in range (a,b):\r\n for m in range (b,a,-1):\r\n for x in range (1, lenght):\r\n n1 = n%10**x;\r\n tmp = n/10**x;\r\n a1 = n1*10**(lenght-x)+tmp;\r\n if a1 == m and n prob:\n return _slacking_text[i]\n return _slacking_text[-1]\n\n\ndef file_to_array(filename, reduce=True):\n from keras.preprocessing import image as kerasimage\n import numpy as np\n import PIL\n\n loaded_img = kerasimage.load_img(filename)\n\n if (reduce):\n img = loaded_img.resize([IMAGE_WIDTH, IMAGE_HEIGHT],PIL.Image.ANTIALIAS)\n else:\n img = loaded_img\n\n imgarr = kerasimage.img_to_array(img)\n\n nparr = np.asarray(imgarr)\n\n return nparr\n#return np.reshape(nparr, (-1, IMAGE_WIDTH * IMAGE_HEIGHT * 3))\n\ndef file_to_label(filename):\n import re\n if re.match(r\".*YES.*\", filename):\n return [1]\n else:\n return [0]\n\n\n","repo_name":"dantaeyoung/glug","sub_path":"procratch/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29576522065","text":"'''\nDescription: 初步搭建 Apriori 频繁模式挖掘框架\nAuthor: stepondust\nDate: 2020-05-31\n'''\nimport copy\n\nimport numpy as np\n\n\ndef findFrequentOneItemsets(dataset, min_sup):\n '''\n msg: 发现所有满足最小支持度计数阈值的频繁 1 项集\n param {\n dataset:numpy.ndarray Groceries 事务数据集\n min_sup:int 最小支持度计数阈值\n }\n return{\n frequent_one_itemsets:dict 字典形式的频繁 1 项集的集合,每个键值对的键为包含商品的元组,值为商品对应的支持度计数\n }\n '''\n goods_list = [i for i in dataset.flatten() if i != ''] # 将 numpy.ndarray 类型的数据集平铺并去除所有空字符串 '',得到所有商品的列表\n goods_set = set(goods_list)\n frequent_one_itemsets = {}\n for key in goods_set:\n num = goods_list.count(key) # 每件商品的支持度计数\n if num < min_sup:\n continue # 当前商品的支持度计数小于最小支持度计数阈值,放弃该商品并搜索下一件商品\n else:\n frequent_one_itemsets[(key,)] = num # 当前商品的支持度计数大于或等于最小支持度计数阈值,将该商品加入频繁 1 项集的集合\n return frequent_one_itemsets\n\n\ndef kMinusOneSubset(superset):\n '''\n msg: 得到 k 项集的所有 k - 1 项子集\n param {\n superset:tuple k 项集\n }\n return{\n k_minus_one_subset:set k 项集的所有 k - 1 项子集\n }\n '''\n sub_sorted = sorted(superset)\n k_minus_one_subset = set([tuple(sub_sorted[:i] + sub_sorted[i + 1:]) for i in range(len(sub_sorted))])\n return k_minus_one_subset\n\n\ndef aprioriGen(frequent_k_minus_one_itemsets, frequent_one_itemsets):\n '''\n msg: 根据频繁(k - 1)项集的集合得到候选 k 项集的集合\n param {\n frequent_k_minus_one_itemsets:dict 频繁(k - 1)项集的集合\n }\n return{\n candidate_k_set:set 候选 k 项集的集合\n }\n '''\n k_minus_one_list_sorted = sorted(frequent_k_minus_one_itemsets.keys())\n candidate_k_set = set()\n for i in k_minus_one_list_sorted:\n temp_set = set([\n tuple(set(i + j)) for j in frequent_one_itemsets if i < j\n ]) # 连接步:将频繁(k - 1)项集的集合与其自己做连接,得到候选的 k 项集的集合\n tempList=[]\n for j in temp_set:\n k_minus_one_subset=kMinusOneSubset(j)\n for x in k_minus_one_subset.copy():\n if x.__contains__('\"1\"') or x.__contains__('\"2\"') or x.__contains__('\"3\"'):\n continue\n else:\n k_minus_one_subset.remove(x)\n if len(k_minus_one_subset- set(frequent_one_itemsets)-set(frequent_k_minus_one_itemsets)) == 0:\n candidate_k_set.update([tuple(sorted(j))])\n # candidate_k_set.update([\n # tuple(sorted(j)) for j in temp_set if len(kMinusOneSubset(j) - set(frequent_one_itemsets)-set(frequent_k_minus_one_itemsets)) == 0\n # ]) # 剪枝步:根据先验性质,删除非频繁的候选 k 项集\n return candidate_k_set\n\n\ndef candidateItemsets(candidate_k_set, t):\n '''\n msg: 得到事务 t 的候选子集,这些候选子集均是候选 k 项集的集合的元素\n param {\n candidate_k_set:set 候选 k 项集的集合\n t:numpy.ndarray Groceries 事务数据集中的事务\n }\n return{\n candidate:list 以事务 t 的候选子集为元素的列表,这些候选子集均是候选 k 项集的集合的元素\n }\n '''\n candidate = []\n for i in candidate_k_set:\n if t.issuperset(i): # 判断事务 t 是否是候选 k 项集 i 的超集,即判断候选 k 项集 i 是否是事务 t 的子集\n candidate.append(tuple(sorted(i)))\n return candidate\n\n\ndef apriori(dataset, frequent_one_itemsets,frequent_result_itemsets, min_sup):\n '''\n msg: 获得所有的频繁项集\n param {\n dataset:numpy.ndarray Groceries 事务数据集\n frequent_one_itemsets:dict 字典形式的频繁 1 项集的集合,每个键值对的键为包含商品的元组,值为商品对应的支持度计数\n min_sup:int 最小支持度计数阈值\n }\n return{\n frequent_itemsets:dict 字典形式的频繁项集的集合\n }\n '''\n frequent_itemsets_list = [frequent_result_itemsets] # 创建一个以每一层的频繁项集的集合为元素的列表\n k = 1\n while (len(frequent_itemsets_list[k - 1]) != 0): # 判断上一层的(k - 1)项集的集合是否为空,为空则说明不能再找到频繁项集,退出循环,不为空则继续循环\n candidate_k_set = aprioriGen(frequent_itemsets_list[k - 1], frequent_one_itemsets) # 根据频繁(k - 1)项集的集合得到候选 k 项集的集合\n if len(candidate_k_set) == 0: # 判断候选 k 项集的集合是否为空,为空则说明不能再找到频繁项集,退出循环,不为空则继续\n frequent_itemsets_list.append({}) # 对应 del frequent_itemsets_list[-1] 语句,防止误删\n break\n candidate_itemsets_list = [] # 创建一个以各个事务的子集为元素的列表,它们均是候选的\n for t in dataset: # t 为事务数据集中的事务\n ct = candidateItemsets(candidate_k_set, set(t)) # 得到事务 t 的候选子集,这些候选子集均是候选 k 项集的集合的元素\n candidate_itemsets_list.extend(ct)\n candidate_set = set(candidate_itemsets_list)\n candidate_dict = {} # 创建字典形式的频繁 k 项集的集合\n for key in candidate_set:\n num = candidate_itemsets_list.count(key) # 每个候选 k 项集的支持度计数\n if num < min_sup:\n continue # 当前候选 k 项集不满足最小支持度计数阈值,放弃该候选 k 项集并搜索下一候选 k 项集\n else:\n candidate_dict[key] = num # 当前候选 k 项集满足最小支持度计数阈值,将该候选 k 项集加入频繁 k 项集的集合\n frequent_itemsets_list.append(candidate_dict)\n k += 1\n del frequent_itemsets_list[-1] # 上一层的的项集的集合为空,退出循环并且删除上一层的的项集\n frequent_itemsets = {} # 创建字典形式的频繁项集的集合\n for i in frequent_itemsets_list:\n frequent_itemsets.update(i)\n return frequent_itemsets\n\n\ndef allProperSubset(superset):\n '''\n msg: 获得集合的所有非空真子集\n param {\n superset:tuple 元组形式的集合\n }\n return{\n proper_subsets:list 列表形式的集合,列表当中的每个元素均为元组形式的给定集合的非空真子集\n }\n '''\n n = len(superset)\n proper_subsets = []\n for i in range(1, 2 ** n - 1): # 根据子集个数,循环遍历所有非空真子集\n proper_subset = []\n for j in range(n):\n if (i >> j) % 2: # 判断二进制下标为 j 的位置数是否为 1\n proper_subset.append(superset[j])\n proper_subsets.append(tuple(proper_subset))\n return proper_subsets\n\ndef extractSpicy(frequent_itemset):\n for i in frequent_itemset:\n if i=='\"1\"' or i=='\"2\"' or i=='\"3\"':\n return i\n return\n\n\ndef associationRules(frequent_itemsets, frequent_result_itemsets,min_conf):\n '''\n msg: 由频繁项集产生强关联规则\n param {\n frequent_itemsets:dict 字典形式的频繁项集的集合\n min_conf:double 最小置信度阈值\n }\n return{\n rules_list:list 以规则为元素的列表,其中规则以三元组 (频繁项集 Z,子集 S,子集 Z-S) 形式组织,对应规则 S⇒Z−S\n }\n '''\n rules_list = [] # 创建一个以规则为元素的列表,其中规则以三元组 (频繁项集 Z,子集 S,子集 Z-S) 形式组织,对应规则 S⇒Z−S\n for frequent_itemset in frequent_itemsets: # 遍历所有的频繁项集\n if len(frequent_itemset) == 1: # 判断是否是频繁 1 项集,因为使用频繁 1 项集产生的规则是无用的\n continue\n else:\n # proper_subsets = allProperSubset(frequent_itemset) # 得到当前频繁项集的所有非空真子集\n # frequent_itemset_support = frequent_itemsets[frequent_itemset] # 得到当前频繁项集对应的支持度计数\n # for proper_subset in proper_subsets: # 遍历���前频繁项集的所有非空真子集并生成对应规则\n # if frequent_itemset_support / frequent_itemsets[proper_subset] >= min_conf: # 判断当前规则是否满足最小置信度阈值\n # rules_list.append((\n # frequent_itemset, proper_subset, tuple(sorted(set(frequent_itemset) - set(proper_subset)))\n # )) # 当前规则满足最小置信度阈值,将其以三元组形式加入规则列表\n spicies=extractSpicy(frequent_itemset)\n conf=frequent_itemsets[frequent_itemset]/frequent_itemsets[(spicies,)]\n proper_subset=list(copy.deepcopy(frequent_itemset))\n proper_subset.remove(spicies)\n if conf>=min_conf:\n rules_list.append((\n frequent_itemset,tuple(proper_subset),tuple(sorted(set(frequent_itemset)-set(tuple(proper_subset)))),conf\n ))\n\n\n return rules_list\n\n\ndef lift(rule, total_num):\n '''\n msg: 使用提升度评判关联规则\n param {\n rule:tuple 以三元组 (频繁项集 Z,子集 S,子集 Z-S) 形式组织的规则\n total_num:int 事务数据集中所有事务的数量\n }\n return{\n rule_lift:double 关联规则的提升度\n }\n '''\n # rule_lift = frequent_itemsets[rule[0]] * total_num / (frequent_itemsets[rule[1]] * frequent_itemsets[rule[2]])\n # return rule_lift\n return .0\n\n\ndef printRules(frequent_itemsets, rules_list, total_num, min_sup, min_conf):\n '''\n msg: 以固定格式打印所有规则\n param {\n frequent_itemsets:dict 字典形式的频繁项集的集合\n rules_list:list 以规则为元素的列表,其中规则以三元组 (频繁项集 Z,子集 S,子集 Z-S) 形式组织,对应规则 S⇒Z−S\n total_num:int 事务数据集中所有事务的数量\n min_sup:int 最小支持度计数阈值\n min_conf:double 最小置信度阈值\n }\n return: None\n '''\n min_sup_threshold = min_sup / total_num\n for rule in rules_list:\n rule_lift = lift(rule, total_num)\n print(\"{: <40}--> {: <22}: support >= {:.2%}, confidence >= {:.2%}, lift = {:.3}\" \\\n .format(str(rule[1]), str(rule[2]), min_sup_threshold, min_conf, rule_lift))\n\ndef mostConfRule(X,y):\n\n min_sup = 0.1\n min_conf = 0.1\n\n frequent_one_itemsets = findFrequentOneItemsets(X, min_sup)\n # frequent_one_itemsets = findFrequentOneItemsets(dataset, min_sup)\n # frequent_one_itemsets = findFrequentOneItemsets(dataset[:,-1],min_sup)\n\n frequent_result_itemsets = findFrequentOneItemsets(y, min_sup)\n\n dataset = np.column_stack((X, y))\n\n frequent_itemsets = apriori(dataset, frequent_one_itemsets,frequent_result_itemsets, min_sup)\n\n rules_list = associationRules(frequent_itemsets, frequent_result_itemsets,min_conf)\n\n # printRules(frequent_itemsets, rules_list, len(dataset), min_sup, min_conf)\n mostRule=rules_list[0]\n for rule in rules_list:\n if rule[3]>mostRule[3]:\n mostRule=rule\n return mostRule\n\n\n\n# if __name__ == \"__main__\":\n# min_sup = 0.1\n# min_conf = 0.1\n\n# dataset = np.loadtxt(\"./iris/iris.csv\", delimiter=\",\", usecols=range(1, 6), skiprows=1, dtype=str)\n\n# frequent_one_itemsets = findFrequentOneItemsets(dataset[:,0:-1], min_sup)\n# # frequent_one_itemsets = findFrequentOneItemsets(dataset, min_sup)\n# # frequent_one_itemsets = findFrequentOneItemsets(dataset[:,-1],min_sup)\n\n# frequent_result_itemsets = findFrequentOneItemsets(dataset[:,-1], min_sup)\n\n# frequent_itemsets = apriori(dataset, frequent_one_itemsets,frequent_result_itemsets, min_sup)\n\n# rules_list = associationRules(frequent_itemsets, frequent_result_itemsets,min_conf)\n\n# printRules(frequent_itemsets, rules_list, len(dataset), min_sup, min_conf)\n","repo_name":"Athenais-Zhang/decisionTree_Distance","sub_path":"DT_apriori_test/apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31654606117","text":"from django.urls import path\nfrom . import views\n\napp_name = 'website'\n\nurlpatterns = [\n path('', views.index_page_view, name='index'),\n path('login/', views.login_page_view, name='login'),\n path('logout', views.logout_view, name='logout'),\n path(\"pesquisa\", views.pesquisa_view_page, name=\"pesquisa\"),\n path('introducao', views.introducao_page_view, name='introducao'),\n path('nprinc', views.nprinc_page_view, name='nprinc'),\n path('exercicios', views.exercicios_page_view, name='exercicios'),\n path('aboutus', views.aboutus_page_view, name='aboutus'),\n path('inserir_contato', views.inserir_contato_page_view, name='inserir_contato'),\n path('contatos', views.contatos_page_view, name='contatos'),\n path('editar_contato/', views.editar_contato_page_view, name='editar_contato'),\n path('apagar_contato/', views.apagar_contato_view, name='apagar_contato'),\n path('comentarios', views.comentarios_page_view, name='comentarios'),\n path('relatorios', views.relatorios_page_view, name='relatorios'),\n path('quiz', views.quiz_page_view, name='quiz'),\n path('aulas', views.aulas_page_view, name='aulas'),\n path('aulas_seccao/', views.aulas_section_view, name=\"aulas_seccao\"),\n path('aula/', views.aula_page_view, name='aula'),\n path('adicionar_contato/', views.adicionar_aluno_view, name='adicionar_contato'),\n path('remover_contato/,', views.remover_aluno_view, name='remover_contato'),\n]\n","repo_name":"jlgpereira/20060705-Projeto-PW-DjangoP2","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7115151122","text":"# 16017\n\nimport sys\n\nnum = \"\"\nfor _ in range(4):\n num += sys.stdin.readline().rstrip()\n\nif (((num[0] == \"8\") or (num[0] == \"9\")) and ((num[-1] == \"8\") or (num[-1] == \"9\")) and (num[1] == num[2])):\n print(\"ignore\")\nelse:\n print(\"answer\")\n","repo_name":"soohyeon21/study","sub_path":"BaekJoon/bronze4_1/16017.py","file_name":"16017.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33237789473","text":"import os\nimport FreeCAD\nimport DraftVecUtils\nfrom BimTranslateUtils import *\n\n#Part_Builder\n\n\nclass BIM_Builder:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Shapebuilder.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_Builder\", \"Shape builder...\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_Builder\", \"Advanced utility to create shapes\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Builder\")\n\n\n\nclass BIM_Offset2D:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Offset2D.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_Offset2D\", \"2D Offset...\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_Offset2D\", \"Utility to offset planar shapes\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Offset2D\")\n\n\nclass BIM_Extrude:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Extrude.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_Extrude\", \"Extrude...\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_Extrude\", \"Extrude a selected sketch\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Extrude\")\n\n\nclass BIM_Cut:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Cut.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"BIM_Cut\", \"Difference\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"BIM_Cut\", \"Make a difference between two shapes\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Cut\")\n\n\nclass BIM_Fuse:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Fuse.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_Fuse\", \"Union\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_Fuse\", \"Make a union of several shapes\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Fuse\")\n\n\nclass BIM_Common:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Common.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_Common\", \"Intersection\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_Common\", \"Make an intersection of two shapes\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Common\")\n\n\nclass BIM_Compound:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Part_Compound.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_Compound\", \"Make compound\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_Compound\", \"Make a compound of several shapes\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_Compound\")\n\n\nclass BIM_SimpleCopy:\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Tree_Part.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"Part_SimpleCopy\", \"Create simple copy\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"Part_SimpleCopy\", \"Create a simple non-parametric copy\"),\n }\n \n def Activated(self):\n \n import FreeCADGui\n import PartGui\n FreeCADGui.runCommand(\"Part_SimpleCopy\")\n \n\nclass BIM_TDPage:\n\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"techdraw-PageDefault.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"BIM_TDPage\", \"Page\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"BIM_TDPage\", \"Creates a new TechDraw page from a template\")}\n\n def IsActive(self):\n\n if FreeCAD.ActiveDocument:\n return True\n else:\n return False\n\n def Activated(self):\n\n from PySide import QtCore,QtGui\n import TechDraw\n\n templatedir = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/BIM\").GetString(\"TDTemplateDir\",\"\")\n if not templatedir:\n templatedir = None\n filename = QtGui.QFileDialog.getOpenFileName(QtGui.QApplication.activeWindow(), translate(\"BIM\",\"Select page template\"), templatedir, \"SVG file (*.svg)\");\n if filename:\n filename = filename[0]\n name = os.path.splitext(os.path.basename(filename))[0]\n FreeCAD.ActiveDocument.openTransaction(\"Create page\")\n page = FreeCAD.ActiveDocument.addObject('TechDraw::DrawPage',\"Page\")\n page.Label = name\n template = FreeCAD.ActiveDocument.addObject('TechDraw::DrawSVGTemplate','Template')\n template.Template = filename\n template.Label = translate(\"BIM\",\"Template\")\n page.Template = template\n FreeCAD.ActiveDocument.commitTransaction()\n FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/BIM\").SetString(\"TDTemplateDir\",filename.replace(\"\\\\\",\"/\"))\n for txt in [\"scale\",\"Scale\",\"SCALE\",\"scaling\",\"Scaling\",\"SCALING\"]:\n if txt in page.Template.EditableTexts:\n val = page.Template.EditableTexts[txt]\n if val:\n if \":\" in val:\n val.replace(\":\",\"/\")\n if \"/\" in val:\n try:\n page.Scale = eval(val)\n except:\n pass\n else:\n break\n else:\n try:\n page.Scale = float(val)\n except:\n pass\n else:\n break\n else:\n page.Scale = FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/BIM\").GetFloat(\"DefaultPageScale\",0.01)\n FreeCAD.ActiveDocument.recompute()\n\n\nclass BIM_TDArchView:\n\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"techdraw-ArchView.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"BIM_TDArchView\", \"View\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"BIM_TDArchView\", \"Creates a TechDraw view from a section plane or 2D objects\")}\n\n def IsActive(self):\n\n if FreeCAD.ActiveDocument:\n return True\n else:\n return False\n\n def Activated(self):\n\n import FreeCADGui\n import Draft\n\n sections = []\n page = None\n drafts = []\n for obj in FreeCADGui.Selection.getSelection():\n t = Draft.getType(obj)\n if t == \"SectionPlane\":\n sections.append(obj)\n elif t == \"TechDraw::DrawPage\":\n page = obj\n else:\n drafts.append(obj)\n if not page:\n pages = FreeCAD.ActiveDocument.findObjects(Type='TechDraw::DrawPage')\n if pages:\n page = pages[0]\n if (not page) or ((not sections) and (not drafts)):\n FreeCAD.Console.PrintError(translate(\"BIM\",\"No section view or draft objects selected, or no page selected, or no page found in document\")+\"\\n\")\n return\n FreeCAD.ActiveDocument.openTransaction(\"Create view\")\n for section in sections:\n view = FreeCAD.ActiveDocument.addObject('TechDraw::DrawViewArch','ArchView')\n view.Label = section.Label\n view.Source = section\n page.addView(view)\n if page.Scale:\n view.Scale = page.Scale\n for draft in drafts:\n view = FreeCAD.ActiveDocument.addObject('TechDraw::DrawViewDraft','DraftView')\n view.Label = draft.Label\n view.Source = draft\n page.addView(view)\n if page.Scale:\n view.Scale = page.Scale\n FreeCAD.ActiveDocument.commitTransaction()\n FreeCAD.ActiveDocument.recompute()\n\n\nclass BIM_ImagePlane:\n\n\n def GetResources(self):\n\n return {'Pixmap' : os.path.join(os.path.dirname(__file__),\"icons\",\"Image_CreateImagePlane.svg\"),\n 'MenuText': QT_TRANSLATE_NOOP(\"BIM_ImagePlane\", \"Image plane\"),\n 'ToolTip' : QT_TRANSLATE_NOOP(\"BIM_ImagePlane\", \"Creates a plane from an image\")}\n\n def IsActive(self):\n\n if FreeCAD.ActiveDocument:\n return True\n else:\n return False\n\n def Activated(self):\n\n import FreeCADGui\n from PySide import QtCore,QtGui\n try:\n import DraftTrackers\n except Exception:\n import draftguitools.gui_trackers as DraftTrackers\n\n self.tracker = DraftTrackers.rectangleTracker()\n self.basepoint = None\n self.opposite = None\n (filename, _filter) = QtGui.QFileDialog.getOpenFileName(QtGui.QApplication.activeWindow(),\n translate(\"BIM\",\"Select image\"), \n None, \n translate(\"BIM\",\"Image file (*.png *.jpg *.bmp)\"))\n if filename:\n self.filename = filename\n im = QtGui.QImage(self.filename)\n self.proportion = float(im.height())/float(im.width())\n if hasattr(FreeCADGui,\"Snapper\"):\n FreeCADGui.Snapper.getPoint(callback=self.PointCallback,movecallback=self.MoveCallback)\n\n def MoveCallback(self,point,snapinfo):\n\n if point and self.basepoint and (point != self.basepoint):\n chord = point.sub(self.basepoint)\n length = DraftVecUtils.project(chord,self.tracker.u).Length\n height = length * self.proportion\n self.opposite = FreeCAD.Vector(self.tracker.u).multiply(length).add(FreeCAD.Vector(self.tracker.v).multiply(height))\n self.opposite = self.basepoint.add(self.opposite)\n self.tracker.update(self.opposite)\n\n def PointCallback(self,point,snapinfo):\n\n import FreeCADGui\n import Image\n\n if not point:\n # cancelled\n self.tracker.off()\n return\n elif not self.basepoint:\n # this is our first clicked point, nothing to do just yet\n self.basepoint = point\n self.tracker.setorigin(point)\n self.tracker.on()\n FreeCADGui.Snapper.getPoint(last=point,callback=self.PointCallback,movecallback=self.MoveCallback)\n else:\n # this is our second point\n self.tracker.off()\n midpoint = self.basepoint.add(self.opposite.sub(self.basepoint).multiply(0.5))\n rotation = FreeCAD.DraftWorkingPlane.getRotation().Rotation\n diagonal = self.opposite.sub(self.basepoint)\n length = DraftVecUtils.project(diagonal,FreeCAD.DraftWorkingPlane.u).Length\n height = DraftVecUtils.project(diagonal,FreeCAD.DraftWorkingPlane.v).Length\n FreeCAD.ActiveDocument.openTransaction(\"Create image plane\")\n image = FreeCAD.activeDocument().addObject('Image::ImagePlane','ImagePlane')\n image.Label = os.path.splitext(os.path.basename(self.filename))[0]\n image.ImageFile = self.filename\n image.Placement = FreeCAD.Placement(midpoint,rotation)\n image.XSize = length\n image.YSize = height\n FreeCAD.ActiveDocument.commitTransaction()\n FreeCAD.ActiveDocument.recompute()\n","repo_name":"springtiger/BIM_Workbench","sub_path":"BimWrappedTools.py","file_name":"BimWrappedTools.py","file_ext":"py","file_size_in_byte":12089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"19650403106","text":"# -*- encoding: utf-8 -*-\nimport random\n\nfrom flask import Blueprint\nfrom App import models\nfrom App.models import *\n\n\ndef init_views(app):\n app.register_blueprint(blueprint=blue)\n\n\nblue = Blueprint(\"blue\", __name__, template_folder=\"../templates\")\n\n\n@blue.route(\"/index/\")\ndef index():\n return \"success\"\n\n\n@blue.route(\"/add_student/\")\ndef add_student():\n stu = Students()\n stu.s_age = 14\n stu.s_name = \"lionel%s\" % random.randrange(1000)\n stu.save()\n\n return \"add students success\"\n\n\n@blue.route(\"/add_school/\")\ndef add_school():\n school = School()\n school.position = \"光白路%s\" % random.randrange(1000)\n school.student_id = Students.query.order_by(Students.id.desc()).first().id\n school.save()\n return \"add school success\"\n","repo_name":"aaaasule/stu_flask","sub_path":"day22/App/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6788628227","text":"\"\"\"\nSome functions for working with puzzles\n\"\"\"\nfrom puzzle import Puzzle\nfrom collections import deque\n# set higher recursion limit\n# which is needed in PuzzleNode.__str__\n# uncomment the next two lines on a unix platform, say CDF\n# import resource\n# resource.setrlimit(resource.RLIMIT_STACK, (2**29, -1))\nimport sys\nsys.setrecursionlimit(10**6)\n\n\n# implement depth_first_solve\n# do NOT change the type contract\n# you are welcome to create any helper functions\n# you like\ndef depth_first_solve(puzzle):\n \"\"\"\n Return a path from PuzzleNode(puzzle) to a PuzzleNode containing\n a solution, with each child containing an extension of the puzzle\n in its parent. Return None if this is not possible.\n\n @type puzzle: Puzzle\n @rtype: PuzzleNode\n \"\"\"\n\n if puzzle.fail_fast():\n return None\n elif puzzle.is_solved():\n return PuzzleNode(puzzle)\n else:\n ret = PuzzleNode(puzzle)\n for i in puzzle.extensions():\n cur = depth_first_solve(i)\n if cur is None:\n continue\n cur.parent = ret\n ret.children.append(cur)\n\n\n# implement breadth_first_solve\n# do NOT change the type contract\n# you are welcome to create any helper functions\n# you like\n# Hint: you may find a queue useful, that's why\n# we imported deque\n\n\ndef breadth_first_solve(puzzle):\n \"\"\"\n Return a path from PuzzleNode(puzzle) to a PuzzleNode containing\n a solution, with each child PuzzleNode containing an extension\n of the puzzle in its parent. Return None if this is not possible.\n\n @type puzzle: Puzzle\n @rtype: PuzzleNode\n \"\"\"\n\n queue = deque()\n inode = PuzzleNode(puzzle)\n queue.append(inode)\n while queue:\n cur = queue.pop()\n if cur.puzzle.fail_fast():\n continue\n if cur.puzzle.is_solved():\n i1 = cur\n i2 = cur.parent\n while i2 is not None:\n i2.children.append(i1)\n if len(i2.children) >= 2:\n break\n i1 = i2\n i2 = i2.parent\n else:\n for i in cur.extersions():\n new_node = PuzzleNode(i)\n new_node.parent = cur\n queue.append(cur)\n\n\n# Class PuzzleNode helps build trees of PuzzleNodes that have\n# an arbitrary number of children, and a parent.\nclass PuzzleNode:\n \"\"\"\n A Puzzle configuration that refers to other configurations that it\n can be extended to.\n \"\"\"\n\n def __init__(self, puzzle=None, children=None, parent=None):\n \"\"\"\n Create a new puzzle node self with configuration puzzle.\n\n @type self: PuzzleNode\n @type puzzle: Puzzle | None\n @type children: list[PuzzleNode]\n @type parent: PuzzleNode | None\n @rtype: None\n \"\"\"\n self.puzzle, self.parent = puzzle, parent\n if children is None:\n self.children = []\n else:\n self.children = children[:]\n\n def __eq__(self, other):\n \"\"\"\n Return whether PuzzleNode self is equivalent to other\n\n @type self: PuzzleNode\n @type other: PuzzleNode | Any\n @rtype: bool\n\n >>> from word_ladder_puzzle import WordLadderPuzzle\n >>> pn1 = PuzzleNode(WordLadderPuzzle(\"on\", \"no\", {\"on\", \"no\", \"oo\"}))\n >>> pn2 = PuzzleNode(WordLadderPuzzle(\"on\", \"no\", {\"on\", \"oo\", \"no\"}))\n >>> pn3 = PuzzleNode(WordLadderPuzzle(\"no\", \"on\", {\"on\", \"no\", \"oo\"}))\n >>> pn1.__eq__(pn2)\n True\n >>> pn1.__eq__(pn3)\n False\n \"\"\"\n return (type(self) == type(other) and\n self.puzzle == other.puzzle and\n all([x in self.children for x in other.children]) and\n all([x in other.children for x in self.children]))\n\n def __str__(self):\n \"\"\"\n Return a human-readable string representing PuzzleNode self.\n\n # doctest not feasible.\n \"\"\"\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))\n","repo_name":"osipovat/puzzleSolver","sub_path":"puzzle_tools.py","file_name":"puzzle_tools.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19731143588","text":"from time import perf_counter\nfrom typing import List, Set, Dict, Tuple, Optional, Any\nimport os.path as path\nfrom itertools import combinations\nfrom time import perf_counter\nfrom pysat.solvers import Glucose3\n\n\n# Input\ndef readfile(file):\n if path.isfile(file)==False: return 'No file directory.'\n file=open(file)\n mat_size=file.readline().split(' ')\n s=file.readlines()\n s=[i.split(' ') for i in s]\n file.close()\n return int(mat_size[0]), int(mat_size[1]),[[int(num) for num in tmp]for tmp in s]\n\n\n# Output\ndef printMatrix(mat):\n for row in mat:\n for i in row:\n print(i,end=' ')\n print()\n\n\n# Find adjacent cells, then get the combination. After that, divide into True and False set.\n# Parameter returns a list of CNF clause\ndef generateClauses(r, adjacentList):\n clauses = []\n for trueSet in combinations(adjacentList, r):\n falseSet = list(set(adjacentList) - set(trueSet))\n for item in trueSet: clauses.append([item] + falseSet)\n for item in falseSet: clauses.append([-item] + [-i for i in trueSet])\n return clauses\n\n\n# Combine CNFs\ndef unifiedCNF_Clauses(mix_clauses):\n res = []\n for i in mix_clauses:\n if set(i) not in res: res.append(set(i))\n return res\n\n# Find adjacent cells\ndef getAdjacent(m, n, row, col):\n adjacent_cell=[]\n for i in range(row-1, row+2):\n for j in range(col-1, col+2):\n if i >- 1 and i < m and j > -1 and j < n: adjacent_cell.append(i*n+j+1)\n return adjacent_cell\n\n # Backtrack algorithm (fixes and backups)\n# Cell position\ndef getPosCells(mat, m, n):\n posCells=[]\n for row in range(m):\n for col in range(n):\n if mat[row][col] >= 0 and mat[row][col] <= 9: posCells.append((mat[row][col], row, col))\n return posCells\n\n\n# CNF fetch\ndef getCNF_Clauses(input, m, n):\n clauses = []\n for i in range(0, m):\n for j in range(0, n):\n if input[i][j] > -1:\n adjacent_list=getAdjacent(m, n, i, j)\n clauses=clauses+generateClauses(input[i][j], adjacent_list)\n return clauses\n\ndef completeClauses(mat, m, n):\n return unifiedCNF_Clauses(getCNF_Clauses(mat, m, n))\n\n\n # Main brute-force algorithm\n# Test whether solution is successful\ndef testResult(input,m, n, result):\n for row in range(m):\n for col in range(n):\n if (input[row][col] >= 0 and input[row][col] <= 9):\n count_green, right_green = 0, int(input[row][col])\n for i in range(row - 1, row + 2):\n if (i >= 0 and i < m):\n for j in range(col - 1, col + 2):\n if (j >= 0 and j < n):\n if(result[i][j] == 1):\n count_green += 1\n if (right_green != count_green):\n return False\n return True\n\n\n# Mark color of a cell using brute-force algorithm\ndef BFAssignment(row, col, input, m, n, result):\n if (row == m):\n return testResult(input, m, n, result)\n for color in range(2):\n result[row][col] = color\n next_row, next_col = row, col + 1\n if (next_col == n):\n next_row += 1\n next_col = 0\n if (BFAssignment(next_row, next_col, input, m, n, result)):\n return True\n return False\n\n\n\n# Solve matrix input brute-force\ndef use_Brute_Force(mat, m, n):\n start = perf_counter()\n result = [[0 for i in range(n)]for j in range(m)]\n if (not BFAssignment (0, 0, mat, m, n, result)):\n for row in range(m):\n for col in range(n): result[row][col]=-1\n end = perf_counter()\n return result, end - start\n\n\n # Main backtrack algorithm\n# Get red cells in checked areas\ndef checkRedAdjCell(checkCell, m, n, result):\n redAdjList = []\n countRedAdjCells, totalAdjCell = 0, 0\n for i in range(checkCell[1]-1, checkCell[1]+2):\n if (i >= 0 and i < m):\n for j in range(checkCell[2]-1, checkCell[2]+2):\n if (j >= 0 and j < n):\n if (result[i][j] == 0):\n countRedAdjCells += 1\n redAdjList.append((i, j))\n totalAdjCell += 1\n\n return (totalAdjCell, countRedAdjCells, redAdjList)\n\n\n# Mark cell green using backtrack algorithm\ndef BTAssignment(indexCell, posCell, redAdjIndex, redAdj, green, input, m, n, result):\n for adj in range(redAdjIndex, redAdj[1]):\n result[redAdj[2][adj][0]][redAdj[2][adj][1]] = 1\n conflict = 0\n for i in range(0, indexCell):\n temp = checkRedAdjCell(posCell[i], m, n, result)\n temp2 = int(posCell[i][0]) - (temp[0] - temp[1])\n if (temp2 != 0):\n conflict = 1\n break\n if (conflict == 0):\n green -= 1\n if (green > 0):\n if(BTAssignment(indexCell, posCell, adj+1, redAdj, green, input, m, n, result)):\n return True\n elif(green == 0):\n if (solveBTCells(indexCell+1, posCell, input, m, n, result)):\n return True\n green += 1\n result[redAdj[2][adj][0]][redAdj[2][adj][1]] = 0\n return False\n\n\n# Consider suitable color for the area of solved cell:\ndef solveBTCells(index, posCell, input, m, n, result):\n if (index < len(posCell)):\n redAdj = checkRedAdjCell(posCell[index], m, n, result)\n green = int(posCell[index][0]) - (redAdj[0] - redAdj[1])\n if (green > 0):\n return BTAssignment(index, posCell, 0, redAdj, green, input, m, n, result)\n elif (green == 0):\n return solveBTCells(index+1, posCell, input, m, n, result)\n else: return False\n return True\n\n\n# Solve matrix input backtrack\ndef use_Backtracking(mat, m, n):\n start = perf_counter()\n result = [[0 for i in range(n)]for j in range(m)]\n cell_with_unnega_value = getPosCells(mat, m, n)\n if (not solveBTCells(0, cell_with_unnega_value, mat, m, n, result)):\n for row in range(m):\n for col in range(n): result[row][col]=-1\n end = perf_counter()\n return result, end - start\n\n\n # Main PySAT\n# Solve matrix input pySAT\ndef use_pysat(mat, m, n):\n result = []\n start = perf_counter()\n clauses = completeClauses(mat, m, n)\n glucose = Glucose3()\n for i in clauses: glucose.add_clause([int(k) for k in i])\n glucose.solve()\n model = glucose.get_model()\n result = [[1 if (i*n + j + 1) in model else 0 for j in range(n)]for i in range(m)]\n stop = perf_counter()\n return result, stop - start\n\n","repo_name":"19127467/Project02-ColoringPuzzle","sub_path":"CNF.py","file_name":"CNF.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18635649156","text":"\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport matplotlib.pyplot as plt\n\ntf.set_random_seed(1)\nnp.random.seed(1)\n\nenv = gym.make('CartPole-v0')\n\nclass Brain():\n\tdef __init__(self):\n\t\tself.lr_rate = 0.001\n\t\tself.epsilon = 0.9\n\t\tself.epsilon_max = 0.9\n\t\tself.epsilon_increment = 0.01\n\t\tself.reward_decay = 0.01\n\t\tself.gamma = 0.9\n\t\tself.replace_targ_iter = 300\n\t\tself.mem_size = 1000\n\t\tself.batch_size = 128\n\t\tself.nb_actions = 2\n\t\tself.n_features = 4\n\t\tself.nb_features = self.n_features*2+2 # (4 in state, 1 rew, 1 action, 4 in state_)\n\n\t\t# learning step counter\n\t\tself.learn_step_counter = 0\n\t\t# initialize memory\n\t\tself.memory = np.zeros((self.mem_size, self.nb_features))\n\n\t\t# build model\n\t\tself.build_model()\n\n\t\tt_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')\n\t\te_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net')\n\n\t\twith tf.variable_scope('soft_replacement'):\n\t\t\tself.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]\n\n\t\tself.sess = tf.Session()\n\t\ttf.summary.FileWriter('./graph/cartpole_dqn', self.sess.graph)\n\t\tself.sess.run(tf.global_variables_initializer())\t\n\t\tself.cost_his = []\n\n\tdef build_layers(self, state):\n\t\tW1 = tf.Variable(tf.random_uniform([4,16], -1.0, 1.0), name=\"W1\")\n\t\tW2 = tf.Variable(tf.random_uniform([16,2], -1.0, 1.0), name=\"W2\")\n\t\tb1 = tf.Variable(tf.random_uniform([16], -1.0, 1.0), name=\"b1\")\n\t\tb2 = tf.Variable(tf.random_uniform([2], -1.0, 1.0), name=\"b2\")\n\n\t\tlayer_1 = tf.nn.relu(tf.matmul(state, W1) + b1, name=\"layer1\")\n\t\tpredicted = tf.add(tf.matmul(layer_1, W2), b2, name=\"layer2\")\n\n\t\t# w_initializer, b_initializer = tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)\n\n\t\t# l1 = tf.layers.dense(state, 16, tf.nn.relu, kernel_initializer=w_initializer,\n\t\t# \t\t\t\t\t\t\tbias_initializer=b_initializer, name='l1')\n\t\t# predicted = tf.layers.dense(l1, 2, kernel_initializer=w_initializer,\n\t\t# \t\t\t\t\t\t\tbias_initializer=b_initializer, name='q')\n\t\treturn predicted\n\n\n\tdef build_model(self):\n\t\tself.ntwrk_state = tf.placeholder(tf.float32, [None,self.n_features], name=\"state\")\n\t\tself.ntwrk_state_ = tf.placeholder(tf.float32, [None,self.n_features], name=\"state_\")\n\t\tself.ntwrk_reward = tf.placeholder(tf.float32, [None,], name=\"reward\")\n\t\tself.ntwrk_action = tf.placeholder(tf.int32, [None,], name=\"action\")\n\t\t\n\t\t# build layers of eval_net (changes in network, prediction)\n\t\twith tf.variable_scope('eval_net'):\n\t\t\tself.predicted_eval = self.build_layers(self.ntwrk_state)\n\t\t\n\t\t# build layers of target_net (target)\n\t\twith tf.variable_scope('target_net'): \n\t\t\tself.predicted_target = self.build_layers(self.ntwrk_state_)\n\n\t\t# cal prediction\n\t\twith tf.variable_scope('q_eval'):\n\t\t\t# self.eval = tf.reduce_sum(tf.multiply(self.predicted_eval, self.ntwrk_action), reduction_indices=1)\n\t\t\ta_indices = tf.stack([tf.range(tf.shape(self.ntwrk_action)[0], dtype=tf.int32), self.ntwrk_action], axis=1)\n\t\t\tself.eval = tf.gather_nd(params=self.predicted_eval, indices=a_indices) # shape=(None, )\n\n\t\t# calc target\n\t\twith tf.variable_scope('q_target'):\n\t\t\ttarget = self.ntwrk_reward + self.gamma * tf.reduce_max(self.predicted_target, reduction_indices=1, name=\"Qmax\")\n\t\t\tself.target = tf.stop_gradient(target)\n\n\t\t# loss function\n\t\twith tf.variable_scope('loss'):\n\t\t\tself.loss = tf.reduce_mean(tf.squared_difference(self.target, self.eval, name='TD_error'))\n\n\t\t# optimizer\n\t\twith tf.variable_scope('train'):\n\t\t\tself.taining = tf.train.RMSPropOptimizer(self.lr_rate).minimize(self.loss)\n\n\t\tmerged_summary = tf.summary.merge_all()\n\n\t\tsess = tf.InteractiveSession()\n\t\tsummary_writer = tf.summary.FileWriter('trainsummary',sess.graph)\n\t\tsess.run(tf.global_variables_initializer())\n\n\tdef learn(self):\n\t\t# replace targets\n\t\tif self.learn_step_counter % self.replace_targ_iter == 0:\n\t\t\tself.sess.run(self.target_replace_op)\n\t\t\tprint(\"\\ntarget replaced\")\n\t\t# choose from batch\n\t\tif self.memory_counter > self.mem_size:\n\t\t\tidx = np.random.choice(self.mem_size, size=self.batch_size)\n\t\telse:\n\t\t\tidx = np.random.choice(self.memory_counter, size=self.batch_size)\n\t\tbatch_mem = self.memory[idx, :]\n\n\t\t_, cost = self.sess.run([self.loss, self.target_replace_op],\n\t\t\t\t\t\t\t\t\tfeed_dict={ \n\t\t\t\t\t\t\t\t\t\tself.ntwrk_state: batch_mem[:, :self.n_features],\n\t\t\t\t\t\t\t\t\t\tself.ntwrk_action: batch_mem[:, self.n_features],\n\t\t\t\t\t\t\t\t\t\tself.ntwrk_reward: batch_mem[:, self.n_features+1],\n\t\t\t\t\t\t\t\t\t\tself.ntwrk_state_: batch_mem[:, -self.n_features:]\n\t\t\t\t\t\t\t\t})\n\t\tself.cost_his.append(cost)\n\n\t\tself.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n\t\tself.learn_step_counter += 1\n\n\tdef store_transition(self, state, action, reward, state_):\n\t\tif not hasattr(self, 'memory_counter'):\n\t\t\tself.memory_counter = 0\n\t\ttransition = np.hstack((state, action, reward, state_))\n\t\tidx = self.memory_counter % self.mem_size\n\t\tself.memory[idx, :] = transition\n\t\tself.memory_counter += 1\n\n\n\tdef choose_action(self, state):\n\t\tstate = state[np.newaxis, :]\n\t\t# action=0\n\t\tif np.random.random() < self.epsilon:\n\t\t\taction = env.action_space.sample()\n\t\telse:\n\t\t\taction_val = self.sess.run(self.predicted_eval, feed_dict={self.ntwrk_state: state})\n\t\t\taction = np.argmax(action_val)\n\t\treturn action\n\n\tdef plot_cost(self):\n\t\tplt.plot(np.arange(len(self.cost_his)), self.cost_his)\n\t\tplt.ylabel('Cost')\n\t\tplt.xlabel('training steps')\n\t\tplt.show()\n\nbrain = Brain()\n\nstep = 0\nfor episode in range(1, 1000):\n\n\tstate = env.reset()\n\tprint(\"Episode: \", episode)\n\twhile True:\n\t\tenv.render()\n\n\t\t# take action using epsilon-greedy\n\t\taction = brain.choose_action(state) \n\n\t\tstate_, reward, done, info = env.step(action) \n\n\t\t# Store transition in replay memory\n\t\tbrain.store_transition(state, action, reward, state_)\n\t\t# print(brain.memory)\n\n\t\tif step > 200:\n\t\t\tbrain.learn()\n\n\t\tstate = state_\n\n\t\tif done:\n\t\t\tbreak\n\n\t\tstep+=1\n\n\nprint(\"GAME OVER !!!\")\nbrain.plot_cost()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"adesgautam/Reinforcement-Learning","sub_path":"cartpole/cartpole_dqn.py","file_name":"cartpole_dqn.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"30305657201","text":"import os\nimport docx\nimport PyPDF2\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport mysql.connector\n\n# Set up NLTK stopwords\nnltk.download('stopwords', quiet=True)\nnltk.download('punkt', quiet=True)\nstop_words = list(stopwords.words('english'))\n\n# Function to convert PDF to plain text\ndef pdf2txt(file_path):\n with open(file_path, 'rb') as f:\n pdf_reader = PyPDF2.PdfReader(f)\n num_pages = len(pdf_reader.pages)\n\n text = \"\"\n for i in range(num_pages):\n page = pdf_reader.pages[i]\n text += page.extract_text()\n\n return text\n\n# Function to convert DOCX to plain text\ndef docx2txt(file_path):\n document = docx.Document(file_path)\n text = \"\"\n for paragraph in document.paragraphs:\n text += paragraph.text\n\n return text\n\n# Function to extract plain text from all files in directory\ndef extract_text():\n for file_name in os.listdir('.'):\n if file_name.endswith('.pdf') and not file_name.startswith('~$'):\n file_path = os.path.abspath(file_name)\n text = pdf2txt(file_path)\n txt_path = os.path.join(str(\"readyForSum\")+'.txt')\n with open(txt_path, 'w', encoding='utf-8') as f:\n f.write(text)\n os.remove(file_name)\n elif file_name.endswith('.docx') and not file_name.startswith('~$'):\n file_path = os.path.abspath(file_name)\n text = docx2txt(file_path)\n txt_path = os.path.join(str(\"readyForSum\")+'.txt')\n with open(txt_path, 'w', encoding='utf-8') as f:\n f.write(text)\n os.remove(file_name)\n\n# Function to generate summary\ndef generate_summary():\n # Extract text from files\n extract_text()\n\n # Read text from file\n with open('readyForSum.txt', 'r') as f:\n text = f.read()\n\n # Tokenize sentences\n sentences = nltk.sent_tokenize(text)\n\n # Create TF-IDF matrix\n vectorizer = TfidfVectorizer(stop_words=stop_words)\n tfidf_matrix = vectorizer.fit_transform(sentences)\n\n # Calculate similarity matrix\n sim_matrix = cosine_similarity(tfidf_matrix, tfidf_matrix)\n\n # Generate summary\n summary = \"\"\n ranked_sentences = sorted(((sim_matrix[i][j], i, j) for i in range(len(sentences)) for j in range(len(sentences)) if i != j), reverse=True)\n for i in range(min(len(sentences), 5)):\n index = ranked_sentences[i][1]\n summary += sentences[index] + \" \"\n\n # Print summary\n print(\"Summarize Text: \\n\", summary)\n\n # Connect to MySQL database\n \n # Use your own db credentials\n \n mycursor = mydb.cursor()\n \n # Define SQL query to update abstract field\n sql = \"UPDATE document SET abstract = %s WHERE id = (SELECT MAX(ids) FROM (SELECT id as ids FROM document) AS temabs)\"\n val = summary\n \n # Try to execute query and commit changes to database\n mycursor.execute(sql, (val,))\n mydb.commit()\n \n # Close database connection\n mycursor.close()\n mydb.close()\n \n return summary\n\n# filename = 'validate.txt'\n# if not os.path.exists(filename):\n# with open(filename, 'w') as f:\n# f.write('Filesdasasd created')\ngenerate_summary()\n\n\n","repo_name":"jw2579/notes-sharing-platform","sub_path":"upload/capturenew.py","file_name":"capturenew.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33901033191","text":"import collections\nimport functools\nimport glob\nimport logging\nimport os\nimport re\nimport time\nfrom datetime import datetime\n\nfrom fdutils.strings import get_string_index_info\n\nlog = logging.getLogger(__name__)\n\n\ndef slugify(text, delim='_', lowercase=True):\n \"\"\" simple slugify of text \"\"\"\n text = str(text).strip()\n\n _punct_re = re.compile(r'[\\s!\"#$%&\\'()*\\-/<=>?@\\[\\\\\\]^_`{|},\\.;:]+')\n\n \"\"\"Generates an slightly worse ASCII-only slug.\"\"\"\n result = []\n if lowercase:\n text = text.lower()\n for word in _punct_re.split(text):\n if word:\n result.append(word)\n return str(delim.join(result))\n\n\ndef get_filename_suffixed(file_path, suffix, sep='_', is_folder=False):\n \"\"\" returns a filename suffixed with a word (before extension)\n\n Args:\n file_path: filename to sufix (ie: myfile.ext)\n suffix: suffix to add to filename (ie: suffix='new' then result will be myfile_new.ext\n sep: separates the old filename with the suffix\n\n Returns:\n\n \"\"\"\n if not is_folder:\n name, ext = os.path.splitext(file_path)\n else:\n name = file_path\n ext = ''\n return name + sep + suffix + ext\n\n\ndef get_filename_timestamped(file_path, timestamp=None, is_folder=False):\n \"\"\" stamp a filename with an added timestamp or string (new_stamp) between the filename and the extension\n\n \"\"\"\n return get_filename_suffixed(file_path, timestamp or get_timestamp(), is_folder=is_folder)\n\n\ndef _get_filename_index_info(file_path, is_folder):\n \"\"\" stamp a filename with an added indexed incremented to what it had before\n\n \"\"\"\n if not is_folder:\n name, ext = os.path.splitext(file_path)\n else:\n name = file_path\n ext = ''\n return get_string_index_info(name) + (ext,)\n\n\ndef _get_filename_index_value(file_path):\n \"\"\" stamp a filename with an added indexed incremented to what it had before\n\n \"\"\"\n file_idx_re = re.compile(r'^(?P.+?)_(?P\\d+)$')\n name, ext = os.path.splitext(file_path)\n m = file_idx_re.match(name)\n if m:\n return int(m.group('idx'))\n else:\n return -1\n\n\ndef get_filename_indexed(file_path, is_folder=False):\n \"\"\" stamp a filename with an added indexed incremented to what it had before\n\n \"\"\"\n name, underscore, idx, ext = _get_filename_index_info(file_path, is_folder)\n\n if idx is None:\n idx = 0\n\n name = name + underscore\n highest_idx_file = sorted(glob.glob('{}*{}'.format(name, ext)),\n key=lambda x: _get_filename_index_value(x), reverse=True)\n if highest_idx_file:\n name, underscore, idx, ext = _get_filename_index_info(highest_idx_file[0], is_folder)\n name = name + underscore\n if idx is None:\n idx = 0\n\n return '{}{:02d}{}'.format(name, idx + 1, ext)\n\n\ndef split_filepath_into_components(file_path):\n \"\"\" splits a file path a/b/c/d/e into a list of [a,b,c,d,e]\n\n :param file_path:\n :return:\n \"\"\"\n folders = []\n while 1:\n file_path, folder = os.path.split(file_path)\n\n if folder != \"\":\n folders.append(folder)\n else:\n if file_path != \"\":\n folders.append(file_path)\n\n break\n\n folders.reverse()\n return folders\n\n\ndef duplicate_file_with_stamp(file_path):\n \"\"\" make a copy of a file name on the same folder. Time stamp the new file if no new file name is given.\n\n \"\"\"\n import shutil\n\n new_file_name = get_filename_timestamped(file_path)\n shutil.copy(file_path, new_file_name)\n return new_file_name\n\n\ndef replace_text_in_file(file_path, replace_this, for_that, case_insensitive=False, is_regex=False, keep_copy=False,\n number_of_subs=0):\n \"\"\" replace a string or regex (if is_regex is set) from a file given in file_path, with another string.\n\n This is a replacement for sed if needed.\n\n @param str file_path: path to the file to be changed\n @param str replace_this: string or regex to match and replace\n @param str for_that: string that will replace the match\n @param bool case_insensitive: flag to indicate if case is important\n @param bool is_regex: flag to indicate if replace_this is a regular expression or a plain string\n @param bool keep_copy: flag to keep copy of original file or not. The original file will be timestamped\n @param int number_of_subs: number of times to do the substitution. A zero means replace all\n @rtype: tuple\n\n \"\"\"\n\n if not is_regex:\n replace_this = re.escape(replace_this)\n\n new_file_path = duplicate_file_with_stamp(file_path) if keep_copy else file_path\n\n import fileinput\n\n for current_line in fileinput.input(file_path, inplace=True):\n current_line, num_subs_made = re.subn(replace_this, for_that, current_line,\n flags=(re.IGNORECASE if case_insensitive else 0), count=number_of_subs)\n number_of_subs = 0 if not number_of_subs else (number_of_subs - num_subs_made)\n\n return file_path, new_file_path\n\n\ndef seconds_to_hour_min_sec(secs):\n \"\"\" simple formatter\n\n :param secs:\n :return:\n \"\"\"\n hours = int(secs / 3600)\n secs -= hours * 3600\n mins = int(secs / 60)\n secs -= mins * 60\n return '{:02d}:{:02d}:{:02d}'.format(hours, mins, int(secs))\n\n\ndef get_timestamp():\n from time import strftime\n return strftime('%Y%m%d_%H%M%S')\n\n\ndef get_files_sorted_by_creation(dirpath, match=\"*.*\", reverse=False):\n from stat import S_ISREG, ST_CTIME, ST_MODE\n\n # get all entries in the directory w/ stats\n entries = (os.path.join(dirpath, fn) for fn in glob.glob(dirpath + \"/\" + match))\n entries = ((os.stat(path), path) for path in entries)\n\n # leave only regular files, insert creation date\n entries = ((stat[ST_CTIME], path) for stat, path in entries if S_ISREG(stat[ST_MODE]))\n\n return sorted(entries, reverse=reverse)\n\n\nclass Tailer:\n \"\"\" Implements tailing and heading functionality like GNU tail and head commands. \"\"\"\n line_terminators = ('\\r\\n', '\\n', '\\r')\n\n def __init__(self, file_name, read_size=1024, num_lines=0, end=True):\n self.read_size = read_size\n self.file = file_name\n self.start_pos = self.file.tell()\n self.num_lines = num_lines\n self.captured_data = []\n if end:\n self.seek_end()\n\n def seek_end(self):\n self.captured_data = []\n self.seek(0, 2)\n\n def seek(self, pos, whence=0):\n self.file.seek(pos, whence)\n\n def read(self, read_size=None):\n if read_size:\n read_str = self.file.read(read_size)\n else:\n read_str = self.file.read()\n\n return len(read_str), read_str\n\n def seek_line(self):\n \"\"\"\\\n Searches backwards from the current file position for a line terminator\n and seeks to the character after it.\n \"\"\"\n pos = end_pos = self.file.tell()\n read_size = self.read_size\n if pos > read_size:\n pos -= read_size\n else:\n pos = 0\n read_size = end_pos\n\n self.seek(pos)\n\n bytes_read, read_str = self.read(read_size)\n\n if bytes_read and read_str[-1] in self.line_terminators:\n # The last charachter is a line terminator, don't count this one\n bytes_read -= 1\n\n if read_str[-2:] == '\\r\\n' and '\\r\\n' in self.line_terminators:\n # found crlf\n bytes_read -= 1\n\n while bytes_read > 0:\n # Scan backward, counting the newlines in this bufferfull\n i = bytes_read - 1\n while i >= 0:\n if read_str[i] in self.line_terminators:\n self.seek(pos + i + 1)\n return self.file.tell()\n i -= 1\n\n if pos == 0 or pos - self.read_size < 0:\n # Not enought lines in the buffer, send the whole file\n self.seek(0)\n return None\n\n pos -= self.read_size\n self.seek(pos)\n\n bytes_read, read_str = self.read(self.read_size)\n\n return None\n\n def iterator(self):\n \"\"\"\\\n Return the last lines of the file.\n \"\"\"\n i = 1\n\n while True:\n prev_pos = self.file.tell()\n new_pos = self.seek_line()\n if not new_pos:\n new_pos = 0\n\n data = self.file.read(prev_pos - new_pos)\n self.seek(new_pos)\n if data:\n i += 1\n self.captured_data.append(data.strip())\n yield data.strip()\n if not new_pos or (self.num_lines and i > self.num_lines):\n raise StopIteration()\n else:\n raise StopIteration()\n\n def __iter__(self):\n return self.iterator()\n\n def close(self):\n self.file.close()\n\n\ndef get_gmt_offset_by(timestr, seconds):\n \"\"\" returns a gmt time as string with some seconds added to it\n\n \"\"\"\n return time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(int(timestr) + seconds))\n\n\ndef name_if_file_exists(full_path_filename, add_index=False):\n \"\"\" Checks if the filename (full path) exists and add a timestamp if it does or an index.\n It does not change the name of an existing file, this will usually be used as the name of file\n\n Args:\n add_index (bool):\n full_path_filename (str):\n\n Returns:\n str: the new name for the file if there was a file with the same name, if not, return the same full_path_filename\n \"\"\"\n if not os.path.exists(full_path_filename):\n return full_path_filename\n\n new_full_path_filename = full_path_filename\n tries = 0\n\n while os.path.exists(new_full_path_filename) and tries < 10:\n if add_index:\n new_full_path_filename = get_filename_indexed(new_full_path_filename)\n else:\n new_full_path_filename = get_filename_timestamped(full_path_filename)\n\n log.debug('File already exists. Will check ' + new_full_path_filename)\n\n tries += 1\n\n if tries >= 10:\n raise Exception('There must be some racing condition as we tried using 10 different timestamp and '\n 'they did not work. Check your code for any threading/multiprocess issue !!')\n\n return new_full_path_filename\n\nindexed_name_if_file_exists = functools.partial(name_if_file_exists, add_index=True)\n\n\ndef json_utils_default(obj):\n \"\"\" defaults function used by json.dumps function to make datetime values javascript compatible\n\n \"\"\"\n if isinstance(obj, datetime):\n return obj.isoformat()\n return obj\n\n\ndef get_process_id(name, args=None):\n \"\"\" try to find the process which name starts with name and with arguments given by args\n\n @rtype: psutil.Process or None\n\n \"\"\"\n import psutil\n for pid in psutil.get_pid_list():\n try:\n proc = psutil.Process(pid)\n except psutil.error.NoSuchProcess: # in case a process dies in the middle of this\n continue\n if proc.name.find(name) >= 0:\n pass\n if proc.name.find(name) >= 0 and args and args == proc.cmdline[1:]:\n return proc\n return None\n\n\ndef are_objects_the_same(a, b, attrib=None):\n \"\"\" utility for __eq__ comparison for objects\n\n :param a: object a (self in __eq__)\n :param b: object b (other in __eq__)\n :param list or tuple attrib: list of attributes to compare objects. If none we will use vars to retrieve the attributes from a (self)\n\n \"\"\"\n if attrib is None:\n attrib = vars(a)\n for att in attrib:\n if not getattr(a, att) == getattr(b, att):\n return False\n return True\n\n\ndef cmd_output_split_parser(out, columns=(), starts_at=0, stops_at=0, split_re=r'\\s+', column_info_at=-1,\n start_after_columns_line=False):\n \"\"\" Function to parse the output from a command in a shell\n\n :param out: list with the values separated by split_re regex\n :param columns: list of column names\n :param starts_at: line where to start reading from\n :param stops_at: line where to stop reading at. This will usually be zero to mean read all or negative to read until the last N line\n :param split_re: the regex expression to use to split the values\n :param column_info_at: if we want to retrieve the information about the columns from a line instead of from the columns argument\n :param start_after_columns_line: this will try to find the start where the columns line is by finding a line with the columns values given on the argument columns\n :return: dictionary with the key being the first column and the value being either a dictionary with the keys being the columns from 1\n \"\"\"\n def add_columns(key, values):\n if not key in ret:\n ret[key] = dict()\n for i, c in enumerate(columns[1:]):\n if i > len(values):\n return\n ret[key][c] = values[i]\n\n continuation = re.compile(r'^\\s{4,}')\n ret = collections.OrderedDict()\n last = ''\n split_number = len(columns) - 1\n stop = len(out) if not stops_at else stops_at\n\n if column_info_at != -1:\n columns = re.split(split_re, out[column_info_at])\n elif start_after_columns_line:\n column_i = [v.lower() for v in columns]\n found = False\n for starts_at, line in enumerate(out):\n if column_i == [v.lower() for v in re.split(split_re, line, split_number)]:\n found = True\n starts_at += 1\n break\n if not found:\n raise Exception('Header Columns not found')\n\n if len(out) > starts_at:\n for v in out[starts_at:stop]:\n # check if line is empty:\n if not v.strip():\n continue\n # check if line is continuation:\n if continuation.match(v):\n add_columns(last, re.split(split_re, v, split_number))\n continue\n a = re.split(split_re, v, split_number)\n last = a[0]\n if len(a) > split_number:\n add_columns(last, a[1:])\n elif len(a) == 1:\n ret[a[0]] = dict()\n # as the prompt changes with the cd we need a way to remove the prompt element added that was not removed during the expect processing\n return ret\n\n\ndef return_list_of_files_sorted_by_date(folder, newest_first=True, count=None, extension='', include_folder=True, starts_with=''):\n \"\"\"\n\n :param folder: folder where files are located\n :param newest_first: date sorted\n :param count: how many to return or None to return all\n :param starts_with: if given we will filter files by files that starts with this string\n :return:\n \"\"\"\n from stat import S_ISREG, ST_CTIME, ST_MODE\n import os\n\n # get all entries in the directory w/ stats\n entries = (os.path.join(folder, fn) for fn in os.listdir(folder) if fn.startswith(starts_with) and fn.endswith(extension))\n entries = ((os.stat(path), path) for path in entries)\n\n # leave only regular files, insert creation date\n entries = ((stat[ST_CTIME], path)\n for stat, path in entries if S_ISREG(stat[ST_MODE]))\n\n if count is not None and count <= 0:\n count = None\n if include_folder:\n return [path for cdate, path in sorted(entries, reverse=newest_first)[:count]]\n else:\n return [os.path.basename(path) for cdate, path in sorted(entries, reverse=newest_first)[:count]]\n\n\ndef modify_filename(filename_path, prepend='', append='', timestamp=False):\n \"\"\" modifies the filename part of a path\n\n \"\"\"\n filepath, filename_whole = os.path.split(filename_path)\n filename, ext = os.path.splitext(filename_whole)\n new_filename_path = os.path.join(filepath, prepend + filename + append + ext)\n if timestamp:\n return get_filename_timestamped(new_filename_path)\n return new_filename_path\n\n\ndef set_attribute_against_dict(self, info, dictionary):\n \"\"\" sets attributes values to an object\n\n :param info:\n :param dictionary:\n :return:\n \"\"\"\n for key, val in info.items():\n if key in dictionary:\n setattr(self, dictionary[key], val)\n\n\ndef utc_now():\n \"\"\" type helper function\n\n :return:\n \"\"\"\n return datetime.utcnow().replace(microsecond=0)\n\n\ndef md5_checksum_stream(s, buffer_size=8192):\n \"\"\" calculate md5 for a stream in case of a large file that cannot be hold in memory\n\n :param s: stream\n :return:\n \"\"\"\n import hashlib\n m = hashlib.md5()\n while True:\n data = s.read(buffer_size)\n if not data:\n break\n m.update(data)\n return m.hexdigest()\n\n\ndef md5_checksum(filePath):\n \"\"\" calculates md5 using the md5_checksum_stream function\n\n :param filePath:\n :return:\n \"\"\"\n with open(filePath, 'rb') as fh:\n return md5_checksum_stream(fh)\n\n# following functions modified\n# from selenium.driver.firefox.webdriver.py\n\n# Licensed to the Software Freedom Conservancy (SFC) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The SFC licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\ndef get_default_windows_location(exec_names, win_program_file_loc):\n program_files = [os.getenv(\"PROGRAMFILES\", r\"C:\\Program Files\"),\n os.getenv(\"PROGRAMFILES(X86)\", r\"C:\\Program Files (x86)\")]\n for path in program_files:\n for file in exec_names:\n for filepath in sorted(glob.glob(os.path.join(path, win_program_file_loc, file)),\n key=lambda x: len(x), reverse=True):\n if os.access(filepath, os.X_OK):\n return filepath\n return \"\"\n\n\ndef find_exe_in_registry(keys):\n if not keys:\n return \"\"\n try:\n from _winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER\n except ImportError:\n from winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER\n import shlex\n command = \"\"\n for path in keys:\n try:\n key = OpenKey(HKEY_LOCAL_MACHINE, path)\n command = QueryValue(key, \"\")\n break\n except OSError:\n try:\n key = OpenKey(HKEY_CURRENT_USER, path)\n command = QueryValue(key, \"\")\n break\n except OSError:\n pass\n else:\n return \"\"\n\n if not command:\n return \"\"\n\n return shlex.split(command)[0]\n\n\ndef get_executable_path(exec_names, osx_default_start_cmd=None, win_program_file_loc='', registry_keys=()):\n \"\"\"Return the command to start firefox.\"\"\"\n import platform\n import os\n import sys\n import shutil\n\n if isinstance(exec_names, str):\n exec_names = [exec_names]\n\n # try which first\n for name in exec_names:\n path = shutil.which(name)\n if path:\n return path\n\n err_msg = \"Could not find {} in your system PATH. Please specify the binary location or install or add to PATH\" \\\n \"\".format(exec_names)\n start_cmd = None\n\n # try finding on common places\n if platform.system() == \"Darwin\" and osx_default_start_cmd:\n start_cmd = osx_default_start_cmd\n if not os.path.exists(start_cmd):\n start_cmd = os.path.expanduser(\"~\") + start_cmd\n elif platform.system() == \"Windows\":\n start_cmd = find_exe_in_registry(registry_keys)\n if not start_cmd and win_program_file_loc:\n start_cmd = get_default_windows_location(exec_names, win_program_file_loc)\n elif platform.system() == 'Java' and 'nt' in sys.builtin_module_names:\n start_cmd = get_default_windows_location(win_program_file_loc)\n\n if not start_cmd:\n raise RuntimeError(err_msg)\n\n return start_cmd\n\n\ndef csv_data_class(filepath, index_by, headers=(), headers_mapping=None, csv_has_headers=True):\n \"\"\" function to convert a csv into a dictionary with the keys being defined by index_by and the values\n being a SimpleNamespace instance\n\n Args:\n filepath (str):\n index_by (str):\n headers (list or tuple):\n headers_mapping (dict): a dictionary to map the headers name to new names\n csv_has_headers (bool):\n\n Returns:\n\n \"\"\"\n import types\n import csv\n\n if not csv_has_headers and not headers:\n raise Exception('There were no headers provided and you say that csv does not have headers')\n\n results = {}\n\n with open(filepath, newline='') as csvf:\n reader = csv.reader(csvf)\n csv_headers = []\n while csv_has_headers and not csv_headers:\n try:\n csv_headers = next(reader)\n except StopIteration:\n raise Exception(\"There does not seem to be anything in this csv file\")\n\n headers = headers or csv_headers\n\n if headers_mapping:\n headers = [headers_mapping.get(h, h) for h in headers]\n\n for row in reader:\n d = dict(zip(headers, row))\n key = d.pop(index_by)\n if key in results:\n raise Exception('We cannot have duplicate indexes')\n results[key] = types.SimpleNamespace(**d)\n\n return results\n","repo_name":"filintod/pyremotelogin","sub_path":"fdutils/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":21813,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2014044939","text":"from kivy.vector import Vector\nfrom utils.approx import Approx\n\n# default relpoints\nTOP = Vector(0.5, 1.0)\nTOP_LEFT = Vector(0.0, 1.0)\nTOP_RIGHT = Vector(1.0, 1.0)\n\nBOTTOM = Vector(0.5, 0.0)\nBOTTOM_LEFT = Vector(0.0, 0.0)\nBOTTOM_RIGHT = Vector(1.0, 0.0)\n\nCENTER = Vector(0.5, 0.5)\nCENTER_LEFT = LEFT = Vector(0.0, 0.5)\nCENTER_RIGHT = RIGHT = Vector(1.0, 0.5)\n\n\ndef relpoint_to_coords(widget, rel_point):\n \"\"\"Compute the real coordinates of a relative point w.r.t. 'widget'.\"\"\"\n return Vector(widget.x + rel_point[0] * widget.width,\n widget.y + rel_point[1] * widget.height)\n\n\ndef coords_to_relpoint(widget, point):\n \"\"\"Translate the given coordinates into a relative point w.r.t. 'widget'.\"\"\"\n return Vector(float(point[0] - widget.x) / widget.width,\n float(point[1] - widget.y) / widget.height)\n\n\ndef align_coords(widget, rel_point=CENTER, x=None, y=None):\n \"\"\"A utility function for aligning a point in a widget with the given coordinates. Only the\n coordinates that are specified (x, y, or both) are changed.\"\"\"\n x0, y0 = relpoint_to_coords(widget, rel_point)\n if x is not None:\n widget.x += x - x0\n if y is not None:\n widget.y += y - y0\n p = relpoint_to_coords(widget, rel_point)\n q = (x0 if x is None else x, y0 if y is None else y)\n assert Approx(0.0) == (p - q).length()\n\n\ndef align_widget(follower, align_by, followee, align_to=None):\n \"\"\"Align a relpoint in a widget to a relpoint in another widget.\"\"\"\n if align_to is None:\n align_to = align_by\n x, y = relpoint_to_coords(followee, align_to)\n align_coords(follower, align_by, x, y)\n\n\nclass AlignLink(object):\n \"\"\"An AlignLink object keeps a relpoint (align_by) in one widget (follower) aligned with a\n relpoint (align_to) in another widget (followee) just like align_widget() does. However,\n instead of being one-shot like align_widget(), the alignment is also automatically updated\n when the second widget changes position or any of the two widgets changes size. Note that\n each AlignLink is responsible for aligning only one axis.\"\"\"\n X_AXIS = \"x\"\n Y_AXIS = \"y\"\n AXES = (X_AXIS, Y_AXIS)\n\n # make default relpoints available through this class\n TOP = TOP\n TOP_LEFT = TOP_LEFT\n TOP_RIGHT = TOP_RIGHT\n BOTTOM = BOTTOM\n BOTTOM_LEFT = BOTTOM_LEFT\n BOTTOM_RIGHT = BOTTOM_RIGHT\n CENTER = CENTER\n CENTER_LEFT = LEFT = LEFT\n CENTER_RIGHT = RIGHT = RIGHT\n\n def __init__(self, axis, follower, align_by, followee, align_to=None):\n if axis not in AlignLink.AXES:\n raise ValueError(\"invalid axis - %s\" % (axis,))\n if align_to is None:\n align_to = align_by\n link_attr = \"__%s_alignlink\" % axis\n if hasattr(follower, link_attr):\n raise Exception(\"widgets can only have one active %s align link\" % (axis,))\n setattr(follower, link_attr, self)\n self.axis = axis\n self.follower = follower\n self.align_by = align_by\n self.followee = followee\n self.align_to = align_to\n follower.bind(size=self.update)\n followee.bind(size=self.update, pos=self.update)\n self.update()\n\n @classmethod\n def X(cls, follower, align_by, followee, align_to=None):\n return cls(cls.X_AXIS, follower, align_by, followee, align_to)\n\n @classmethod\n def Y(cls, follower, align_by, followee, align_to=None):\n return cls(cls.Y_AXIS, follower, align_by, followee, align_to)\n\n def __str__(self):\n return (\"%s.%s(%s%08x%s -> %s%08x%s)\" %\n (type(self).__name__, self.axis,\n type(self.follower).__name__, id(self.follower), self.align_by,\n type(self.followee).__name__, id(self.followee), self.align_to))\n\n def update(self, *args):\n x, y = relpoint_to_coords(self.followee, self.align_to)\n if self.axis is AlignLink.X_AXIS:\n align_coords(self.follower, self.align_by, x=x)\n else:\n align_coords(self.follower, self.align_by, y=y)\n\n def destroy(self):\n link_attr = \"__%s_alignlink\" % self.axis\n delattr(self.follower, link_attr)\n self.follower.unbind(size=self.update)\n self.followee.unbind(size=self.update, pos=self.update)\n\n @staticmethod\n def get(widget, axis):\n link_attr = \"__%s_alignlink\" % axis\n return getattr(widget, link_attr, None)\n","repo_name":"2xR/legacy","sub_path":"kivyx/links/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31254640775","text":"import os\r\nimport logging\r\nimport datetime\r\nimport requests\r\nfrom azure.cosmosdb.table.tableservice import TableService, TableBatch\r\nimport azure.functions as func\r\n\r\nAPI_KEY = os.environ['API_KEY']\r\nGITHUB_USER = os.environ['GITHUB_USER']\r\nTABLE_SERVICE_CONNECTION_STRING = os.environ['TABLE_SERVICE_CONNECTION_STRING']\r\n\r\n\r\ndef get_all_repositories():\r\n service_url = f'https://api.github.com/users/{GITHUB_USER}/repos'\r\n auth = (GITHUB_USER, API_KEY)\r\n response = requests.get(service_url, auth=auth)\r\n\r\n if response.status_code != 200:\r\n logging.error(response.content)\r\n return\r\n\r\n results = response.json()\r\n results = filter(lambda x: x['fork'] is False, results)\r\n results = map(lambda x:\r\n {'repo_id': x['id'],\r\n 'repo_url': x['url'],\r\n 'repo_name': x['name']}, results)\r\n results = list(results)\r\n\r\n return results\r\n\r\n\r\ndef get_repo_attention(url: str, attention: str = 'clones'):\r\n assert attention in ('clones', 'views')\r\n\r\n service_url = url + '/traffic/' + attention\r\n auth = (GITHUB_USER, API_KEY)\r\n response = requests.get(service_url, auth=auth)\r\n\r\n if response.status_code != 200:\r\n logging.error(response.content)\r\n return\r\n\r\n results = response.json()\r\n results = results[attention]\r\n\r\n return results\r\n\r\n\r\ndef get_and_update_repo_attention(\r\n table_service: TableService,\r\n repo_id: int, repo_url: str, repo_name: str,\r\n attention: str = 'clones'):\r\n assert attention in ('clones', 'views')\r\n\r\n results = get_repo_attention(url=repo_url, attention=attention)\r\n\r\n batch = TableBatch()\r\n\r\n if results is not None:\r\n for el in results:\r\n batch.insert_or_replace_entity({\r\n 'PartitionKey': str(repo_id),\r\n 'RowKey': el['timestamp'],\r\n 'name': repo_name,\r\n 'count': el['count'],\r\n 'uniques': el['uniques']\r\n })\r\n\r\n table_service.commit_batch(table_name=f'github{attention}', batch=batch)\r\n\r\n\r\ndef main(mytimer: func.TimerRequest) -> None:\r\n utc_timestamp = datetime.datetime.utcnow().replace(\r\n tzinfo=datetime.timezone.utc).isoformat()\r\n\r\n if mytimer.past_due:\r\n logging.info('The timer is past due!')\r\n\r\n table_service = TableService(\r\n connection_string=TABLE_SERVICE_CONNECTION_STRING\r\n )\r\n\r\n repositories = get_all_repositories()\r\n\r\n for repo in repositories:\r\n get_and_update_repo_attention(table_service, attention='clones', **repo)\r\n get_and_update_repo_attention(table_service, attention='views', **repo)\r\n\r\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\r\n","repo_name":"ImScientist/AzureFunction-DataCollection","sub_path":"TimeTrigger1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35706128157","text":"#!/E:\\python\\env\\python37\\Scripts\n# -*- coding:utf-8 -*-\n#@Time :2019/6/15 22:10\n#@Author :zbwu103\n#@File :微信自动撤回.py\n#@功能:开启后,收到好友发送的消息之后,如果好友把消息撤回之后,程序将把撤回的消息自动保存到文件中(文字,语音,图片)\n\n\nimport itchat,os,re\nimport time\n\n#保存撤回消息的路径\ntemp=\"D:\\\\python\\\\项目\\\\venv\\\\python小工具\\\\微信操作\\\\temp\"\nif not os.path.exists(temp):\n os.mkdir(temp)\nitchat.auto_login(hotReload=True)\n#itchat.content.TEXT是文本消息,itchat.content.PICTURE是图片消息,其他消息好像暂时无法实现\n@itchat.msg_register([itchat.content.TEXT,itchat.content.RECORDING,itchat.content.PICTURE ])#装饰器,给我们下面的函数添加新功能,好友发的消息储存在msg中\ndef resever_info(msg):\n global info_T,info_type,name_R\n info_T=msg['Text']\n info_type=msg['Type']\n name_R=msg['FileName']\n print(msg)\n@itchat.msg_register(itchat.content.NOTE)#NOTE是系统消息,收到的是系统给我们发的一条消息\ndef chehui_info(chehui_msg):\n global info_T,info_type,name_R\n\n if \"撤回了一条消息\" in chehui_msg[\"Text\"]:\n if info_type=='Recording':\n info_T(temp+'\\\\'+name_R)\n elif info_type=='Text':\n # 获取当前的时间\n # 用正则表达式获取撤回人微信名称\n reg = re.compile(\"\\\"(.*?)\\\"\", re.S)\n res = re.search(reg, chehui_msg['Text'])\n chehui_name = res.group(1)\n nowtime=time.strftime(\"%m-%d-%H:%M\", time.localtime(time.time()))\n with open(temp+'\\\\'+chehui_name+'.txt',\"ab\") as f:\n f.write((nowtime+\" :\"+info_T+'\\n').encode('utf-8'))\n elif info_type=='Picture':\n info_T(temp + '\\\\' + name_R)\nitchat.run()","repo_name":"a1169804597/home","sub_path":"python小工具/微信操作/微信自动撤回.py","file_name":"微信自动撤回.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3337258568","text":"class ConvAutoEncoder(nn.Module):\n def __init__(self, K, num_filters=32, filter_size=5):\n super(ConvAutoEncoder, self).__init__()\n \n # With padding=0, the number of pixels cut off from each image dimension\n # is filter_size // 2. Double it to get the amount of pixels lost in\n # width and height per Conv2D layer, or added back in per \n # ConvTranspose2D layer.\n filter_reduction = 2 * (filter_size // 2)\n\n # After passing input through two Conv2d layers, the shape will be\n # 'shape_after_conv'. This is also the shape that will go into the first\n # deconvolution layer in the decoder\n self.shape_after_conv = (num_filters,\n my_dataset_size[1]-2*filter_reduction,\n my_dataset_size[2]-2*filter_reduction)\n flat_size_after_conv = self.shape_after_conv[0] \\\n * self.shape_after_conv[1] \\\n * self.shape_after_conv[2]\n\n self.enc_bias = BiasLayer(my_dataset_size)\n self.enc_conv_1 = nn.Conv2d(my_dataset_size[0], num_filters, filter_size)\n self.enc_conv_2 = nn.Conv2d(num_filters, num_filters, filter_size)\n self.enc_flatten = nn.Flatten()\n self.enc_lin = nn.Linear(flat_size_after_conv, K)\n\n self.dec_lin = nn.Linear(K, flat_size_after_conv)\n self.dec_unflatten = nn.Unflatten(dim=-1, unflattened_size=self.shape_after_conv)\n self.dec_deconv_1 = nn.ConvTranspose2d(num_filters, num_filters, filter_size)\n self.dec_deconv_2 = nn.ConvTranspose2d(num_filters, my_dataset_size[0], filter_size)\n self.dec_bias = BiasLayer(my_dataset_size)\n\n def encode(self, x):\n s = self.enc_bias(x)\n s = F.relu(self.enc_conv_1(s))\n s = F.relu(self.enc_conv_2(s))\n s = self.enc_flatten(s)\n h = self.enc_lin(s)\n return h\n \n def decode(self, h):\n s = F.relu(self.dec_lin(h))\n s = self.dec_unflatten(s)\n s = F.relu(self.dec_deconv_1(s))\n s = self.dec_deconv_2(s)\n x_prime = self.dec_bias(s)\n return x_prime\n\n def forward(self, x):\n return self.decode(self.encode(x))\n\nconv_ae = ConvAutoEncoder(K=K)\nassert conv_ae.encode(my_dataset[0][0].unsqueeze(0)).numel() == K, \\\n \"Encoder output size should be K!\"\nconv_losses = train_autoencoder(conv_ae, my_dataset)\n\nplt.figure()\nplt.plot(lin_losses)\nplt.plot(conv_losses)\nplt.legend(['Lin AE', 'Conv AE'])\nplt.xlabel('Training batch')\nplt.ylabel('MSE Loss')\nplt.ylim([0,2*max(torch.as_tensor(conv_losses).median(), torch.as_tensor(lin_losses).median())])\nplt.show()\n","repo_name":"ungarl/course-content","sub_path":"tutorials/W8_AutoEncoders_GANs/solutions/W8_Tutorial1_Solution_Ex02.py","file_name":"W8_Tutorial1_Solution_Ex02.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"3812880286","text":"\nimport unittest\nfrom HTMLTestRunner import HTMLTestRunner\nfrom test.case.player.TestPlayImgsDelete import *\nimport time\n\nif __name__ == '__main__':\n suite = unittest.TestSuite(unittest.makeSuite(TestPlayerImgs_delete))\n now = time.strftime('%Y-%m-%d %H_%M_%S')\n filename = 'F:/location/产品测试/自动化测试/测试报告/' + now + 'test_playerdelete.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner(\n stream=fp,\n title=\"运动员图片批量删除测试\",\n description=\"运动员图片删除自动化测试\")\n runner.run(suite)","repo_name":"rico-o/autoTest","sub_path":"report/Test_playerReport/Test_playerimgdeleteReport.py","file_name":"Test_playerimgdeleteReport.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70089464514","text":"#Thomas Thorpe\r\n#Pet Service Validation Dialog\r\n\r\nfrom PyQt4.QtGui import *\r\n\r\nclass ValidationDialog(QDialog):\r\n def __init__(self, came_from, messages):\r\n super(QDialog,self).__init__(came_from)\r\n self.setWindowTitle(\"Validation Error\")\r\n self.messages = messages #take error messages from pervious window\r\n self.CreateValidationDialog(messages)\r\n self.setLayout(self.validation_layout)\r\n self.exec_()\r\n\r\n def CreateValidationDialog(self, messages):\r\n #create widgets\r\n self.top_lbl = QLabel(\"The following information has been entered incorrectly:\")\r\n count = 0\r\n self.message = [] #will contain list of label widgets for error messages\r\n\r\n for each in messages: #for each error message create a lable widget displaying them and add to list\r\n self.message.append(QLabel(\"\\t {0}\".format(each)))\r\n count = count + 1\r\n\r\n self.ok_button = QPushButton(\"Ok\")\r\n self.ok_button.isDefault()\r\n\r\n #create layout\r\n self.validation_layout = QVBoxLayout()\r\n self.validation_layout.addWidget(self.top_lbl)\r\n\r\n for each in self.message:\r\n self.validation_layout.addWidget(each)\r\n\r\n self.validation_layout.addWidget(self.ok_button)\r\n\r\n #connectons\r\n self.ok_button.clicked.connect(self.Close)\r\n\r\n def Close(self):\r\n self.close()\r\n\r\n\r\n\r\n","repo_name":"ThomasThorpe/PetMindingBusiness","sub_path":"Program/ValidationDialog.py","file_name":"ValidationDialog.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72681889153","text":"from sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nimport logger\nimport pandas as pd\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\nclass f_sel:\n def __init__(self,file):\n self.log_path=file\n self.log_writer=logger.App_Logger()\n\n def select(self,df):\n try:\n bestfeatures = SelectKBest(k=15)\n X=df.drop(columns='salary')\n Y=df.salary\n fit = bestfeatures.fit(X,Y)\n dfscores = pd.DataFrame(fit.scores_)\n dfcolumns = pd.DataFrame(X.columns)\n #concat two dataframes for better visualization\n featureScores = pd.concat([dfcolumns,dfscores],axis=1)\n featureScores.columns = ['Specs','Score'] #naming the dataframe columns\n return featureScores\n except Exception as e:\n self.log_writer.log(self.log_path, e)\n\n def vif_score(self,df):\n try:\n X=df.drop(columns='salary')\n X.astype(int)\n vif = pd.DataFrame()\n vif['VIF'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]\n vif['variable'] = X.columns\n return vif\n except Exception as e:\n self.log_writer.log(self.log_path, e)","repo_name":"daksha-2001/internship","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37959907002","text":"import time\n\ndef getChineseLocalStrTime()->(str,str):\n t = time.localtime()\n year = t.tm_year\n mon = t.tm_mon\n day = t.tm_mday\n H = t.tm_hour\n M = t.tm_min\n S = t.tm_sec\n date = str(year)+\"年\"+str(mon)+\"月\"+str(day)+\"日\"\n time_ = str(H)+\"时\"+str(M)+\"分\"+str(S)+\"秒\"\n return date,time_","repo_name":"lxy1492/SportsPrescription","sub_path":"Tools/strTime.py","file_name":"strTime.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20610192413","text":"import os.path as op\nimport nipype.interfaces.io as nio # Data i/o\nimport nipype.interfaces.utility as util # utility\nimport nipype.pipeline.engine as pe # pypeline engine\nimport nipype.interfaces.fsl as fsl\n\nfsl.FSLCommand.set_default_output_type('NIFTI_GZ')\n\nfrom coma.workflows.dti.tracking import anatomically_constrained_tracking\n\nfrom coma.datasets import sample\ndata_path = sample.data_path()\n\nname = 'example_fiber_track'\n\nsubjects_dir = op.join(data_path,\"subjects\")\n\ninfo = dict(dwi=[['subject_id', 'dwi']],\n bvecs=[['subject_id', 'bvecs']],\n bvals=[['subject_id', 'bvals']],\n single_fiber_mask=[['subject_id', 'subject_id', 'SingleFiberMask_dwi']],\n wm_mask=[['subject_id', 'subject_id', 'wm_seed_mask_dwi']],\n termination_mask=[['subject_id', 'subject_id', 'term_mask_dwi']],\n registration_matrix_file=[['subject_id', 'subject_id', 'dwi_to_t1_matrix']],\n registration_image_file=[['subject_id', 'subject_id', 'T1']])\n\nsubject_list = ['Bend1']\n\ninfosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id', 'subjects_dir']),\n name=\"infosource\")\n\ninfosource.iterables = ('subject_id', subject_list)\ninfosource.inputs.subjects_dir = subjects_dir\ndatasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],\n outfields=info.keys()),\n name='datasource')\n\ndatasource.inputs.template = \"%s/%s\"\ndatasource.inputs.base_directory = data_path\ndatasource.inputs.field_template = dict(dwi='data/%s/%s.nii.gz', bvecs='data/%s/%s', bvals='data/%s/%s',\n single_fiber_mask='data/%s/%s_%s.nii.gz', wm_mask='data/%s/%s_%s.nii.gz', termination_mask='data/%s/%s_%s.nii.gz',\n registration_matrix_file='data/%s/%s_%s.mat', registration_image_file='data/%s/%s_%s.nii.gz')\ndatasource.inputs.template_args = info\ndatasource.inputs.sort_filelist = True\n\ndatasink = pe.Node(interface=nio.DataSink(), name=\"datasink\")\ndatasink.inputs.base_directory = op.abspath(name)\n\n# For testing\ndatasink.overwrite = True\n\ndwi = anatomically_constrained_tracking()\n\n# For C.H.U. Liege Siemens Allegra 3T\ndwi.inputs.fsl2mrtrix.invert_x = True\ndwi.inputs.fsl2mrtrix.invert_y = False\ndwi.inputs.fsl2mrtrix.invert_z = False\n\n# Set a number of tracks\ndwi.inputs.CSDstreamtrack.desired_number_of_tracks = 10000\ndwi.inputs.CSDstreamtrack.minimum_tract_length = 10\n\nworkflow = pe.Workflow(name=name)\nworkflow.base_dir = name\n\nworkflow.connect([(infosource, datasource, [('subject_id', 'subject_id')])])\nworkflow.connect([(infosource, datasink, [('subject_id', '@subject_id')])])\n\nworkflow.connect([(infosource, dwi, [('subject_id', 'inputnode.subject_id')])])\n\nworkflow.connect([(datasource, dwi, [('dwi', 'inputnode.dwi')])])\nworkflow.connect([(datasource, dwi, [('bvecs', 'inputnode.bvecs')])])\nworkflow.connect([(datasource, dwi, [('bvals', 'inputnode.bvals')])])\nworkflow.connect([(datasource, dwi, [('single_fiber_mask', 'inputnode.single_fiber_mask')])])\nworkflow.connect([(datasource, dwi, [('wm_mask', 'inputnode.wm_mask')])])\nworkflow.connect([(datasource, dwi, [('termination_mask', 'inputnode.termination_mask')])])\nworkflow.connect([(datasource, dwi, [('registration_matrix_file', 'inputnode.registration_matrix_file')])])\nworkflow.connect([(datasource, dwi, [('registration_image_file', 'inputnode.registration_image_file')])])\n\nworkflow.connect([(dwi, datasink, [(\"outputnode.fiber_odfs\", \"subject_id.@fiber_odfs\"),\n (\"outputnode.fiber_tracks_tck_dwi\", \"subject_id.@fiber_tracks_tck_dwi\"),\n (\"outputnode.fiber_tracks_trk_t1\", \"subject_id.@fiber_tracks_trk_t1\"),\n ])])\n\nworkflow.config['execution'] = {'remove_unnecessary_outputs': 'false',\n 'hash_method': 'timestamp'}\nworkflow.write_graph()\nworkflow.run()\n#workflow.run(plugin='MultiProc', plugin_args={'n_procs' : 3})\n","repo_name":"GIGA-Consciousness/structurefunction","sub_path":"examples/example_fiber_track.py","file_name":"example_fiber_track.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"13314498286","text":"import numpy as np\nimport random\n\n\nclass DynaQAgent:\n\n def __init__(self,\n exploration_rate=0.5,\n learning_rate=0.01,\n discount_factor=0.99,\n number_of_states=16,\n number_of_actions=4,\n n=10):\n self.exploration_rate = exploration_rate\n self.learning_rate = learning_rate\n self.discount_factor = discount_factor\n self.number_of_states = number_of_states\n self.number_of_actions = number_of_actions\n self.n = n\n\n self.q_table = np.full((self.number_of_states, self.number_of_actions), fill_value=10)\n if self.n > 0:\n self.model = np.zeros((self.number_of_states, self.number_of_actions, 2))\n self.observed_state_action_pairs = set()\n\n def reset(self):\n self.q_table = np.zeros((self.number_of_states, self.number_of_actions))\n if self.n > 0:\n self.model = np.zeros((self.number_of_states, self.number_of_actions, 2))\n self.observed_state_action_pairs = set()\n\n def choose_action(self, state):\n\n if np.random.uniform(0, 1) <= self.exploration_rate:\n action_index = np.random.randint(self.number_of_actions)\n else:\n action_index = self.choose_action_greedy(state)\n\n return action_index\n\n def choose_action_greedy(self, state):\n return np.argmax(self.q_table[state])\n\n def update_q(self, state, action_index, reward, next_state):\n self.q_learning_update(state, action_index, reward, next_state)\n\n if self.n > 0:\n self.dyna_q_update(state, action_index, reward, next_state)\n\n def q_learning_update(self, state, action_index, reward, next_state):\n current_q_value = self.q_table[state, action_index]\n next_q_max = np.max(self.q_table[next_state])\n new_q_value = (\n current_q_value + self.learning_rate * (reward + self.discount_factor * next_q_max - current_q_value)\n )\n self.q_table[state, action_index] = new_q_value\n\n def dyna_q_update(self, state, action_index, reward, next_state):\n self.model[state, action_index, 0] = reward\n self.model[state, action_index, 1] = next_state\n self.observed_state_action_pairs.add((state, action_index))\n\n for s in range(self.n):\n simulated_state_action_pair = random.choice(tuple(self.observed_state_action_pairs))\n simulated_state = simulated_state_action_pair[0]\n simulated_action = simulated_state_action_pair[1]\n simulated_reward = self.model[simulated_state, simulated_action, 0]\n simulated_next_state = self.model[simulated_state, simulated_action, 1]\n self.q_learning_update(simulated_state, simulated_action, simulated_reward, int(simulated_next_state))\n","repo_name":"BimoBu/bachelor-thesis","sub_path":"dyna_q.py","file_name":"dyna_q.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72262428354","text":"import pandas as pd\nimport yaml\nimport json\n\n\nwith open(r'Data/Original/personality/ai.yml') as file:\n documents = yaml.full_load(file)\n QuestionConv = []\n rightFormat = []\n i = 1\n \n #print(item, \":\", doc)\n #turns the dictionary to a string\n QuestionConv = json.dumps(documents)\n #print(QuestionConv)\n #splits the sentence by looking for ],\n QuestionConv = QuestionConv.split(\"], \")\n\n #this splits the data, so that i can get it has individual strings\n while(i != len(QuestionConv)):\n rightFormat.append(QuestionConv[i])\n i+=1\n \n \n #removes the [ and replaces it with an empty space\n rightFormat = [k.replace(\"[\", \"\") for k in rightFormat]\n rightFormat = [k.replace('\"', \"\") for k in rightFormat]\n #removing the conversations from the start\n rightFormat = [k.replace('conversations: ', \"\") for k in rightFormat]\n\n # define punctuation\n punctuations = ''','''\n rightFormat = [k.replace(punctuations, \" BREAKHERE \") for k in rightFormat]\n\n \n print(rightFormat)\n \n #dataNow = pd.DataFrame(rightFormat , columns = ['Question'])\n #print(dataNow)\n #dataNow= rightFormat.apply(lambda x:remove_punctuation_tokenize(str(x)))\n\n\n\n\n\n\n ","repo_name":"erifejams/AI-Healthcare-Pro-","sub_path":"Coding/YamlToCsv.py","file_name":"YamlToCsv.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13737463129","text":"# -*- coding: utf-8 -*-\n'''\n@Time : 2018/3/28 19:38\n@Author : Xiaopf\n@Email : xiaopf@aukeys.com\n@File : 03-user_list.py\n'''\n\n'''\n练习3:\n 让用户在控制台上输入”find/list/add/delete/update/exit”格式字符串\n 如果输入add,则让用户继续输入用户名、年龄、联系方式等数据, 将用户数据(用户名, 年龄,联系方式),放入list中存储,在放入list之前检查用户名不重复,如果重复,则提示用户已存在\n 如果输入delete,则让用户输入”用户名”字符串,根据用户名查找list中数据,若存在数据则将该数据移除,若用户数据不存在,则提示不存在\n 如果输入update,则让用户分别输入用户名、年龄、联系方式等数据,根据用户名查找list中数据,若存在数据则将改数据更新为新的(用户名, 年龄,联系方式),若用户数据不存在,则提示不存在\n 如果用户输入find,则让用户输入”用户名” ,根据用户名查找list中数据用户名等于字符串的用户信息,并打印\n 如果用户输入list,则打印所有用户信息\n 打印用户第一个行数据为用户信息描述,从第二行开始为用户数据\n 如果用户输入exit,则打印退出程序,并退出\n'''\n\nuserlist = []\nactionlist = ['find','list','add','delete','update','exit']\n\n# 格式化\ntpl = '|{:^10}|{:^5}|{:^15}|'\nheader = tpl.format('name','age','phone')\nsplitline = '-' * len(header)\nwhile True:\n action = input(\"输入\\\"find/list/add/del/update/exit\\\" >>: \").strip()\n if action not in actionlist:\n print('输入错误,请重新输入!')\n elif action == 'add':\n txt = input('请输入用户名,年龄,联系方式 >>: ')\n exsit_val = tuple(txt.split(','))\n flag = False\n if len(exsit_val) != 3:\n print('输入错误,请重新输入!')\n else:\n for user in userlist:\n if user[0] == exsit_val[0]:\n flag = True\n print('用户已存在,请重新输入!')\n break\n if not flag:\n userlist.append(exsit_val)\n print('添加成功!%s' % userlist)\n elif action == 'del':\n name = input('请输入要删除的用户名 >>:').strip()\n flag = False\n for user in userlist:\n if name == user[0]:\n flag = True\n userlist.remove(user)\n print('已删除!%s' % userlist)\n break\n if not flag:\n print('用户不存在')\n elif action == 'update':\n name = input('请输入用户名 >>: ').strip()\n flag = False\n for user in userlist:\n if user[0] == name:\n flag = True\n txt = input('请输入新用户名,年龄,联系方式 >>: ')\n exsit_val = tuple(txt.split(','))\n userlist.remove(user)\n userlist.append(exsit_val)\n print('用户已更新')\n break\n if not flag:\n print('用户不存在')\n elif action == 'find':\n name = input('请输入用户名 >>: ').strip()\n flag = False\n print(splitline)\n print(header)\n print(splitline)\n for user in userlist:\n if user[0] == name:\n flag = True\n print(tpl.format(user[0],user[1],user[2]))\n print(splitline)\n if not flag:\n print('用户不存在')\n elif action =='list':\n if len(userlist) == 0:\n print('nothing')\n else:\n print(splitline)\n print(header)\n print(splitline)\n for user in userlist:\n print(tpl.format(user[0], user[1], user[2]))\n\n print(splitline)\n elif action == 'exit':\n print(header)\n for user in userlist:\n print(tpl.format(user[0], user[1], user[2]))\n\n break","repo_name":"gitxiaojun/actual-18-homework","sub_path":"02/xiaopengfei/03-user_list.py","file_name":"03-user_list.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"zh","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"71101950593","text":"from DerivationFrameworkCore.DerivationFrameworkMaster import *\n\nisSimulation = globalflags.DataSource()=='geant4'\n\n\n#====================================================================\n# AUGMENTATION TOOLS \n#====================================================================\n## 1/ setup vertexing tools and services\ninclude(\"DerivationFrameworkBPhys/configureVertexing.py\")\nBPHY23_VertexTools = BPHYVertexTools(\"BPHY23\")\n\n# mass bounds and constants used in the following\nX_hi = 141000.0\n\nJpsi_lo = 2600.0\nJpsi_hi = 3500.0\nZc_lo = 3650.0\nZc_hi = 4150.0\nPsi_lo = 3350.0\nPsi_hi = 4200.0\nB_lo = 4850.0\nB_hi = 5700.0\nKstar_lo = 640.0\nKstar_hi = 1140.0\nBs0_lo = 4950.0\nBs0_hi = 5800.0\nPhi_lo = 770.0\nPhi_hi = 1270.0\nUpsi_lo = 8900.0\nUpsi_hi = 9900.0\nUpsi2S_lo = 9550.0\nUpsi2S_hi = 10500.0\nDs_lo = 1660.0\nDs_hi = 2230.0\n\nMumass = 105.658\nPimass = 139.570\nKmass = 493.677\nKstarmass = 895.55\nPhimass = 1019.461\nDpmmass = 1869.66\nDspmmass = 1968.35\nJpsimass = 3096.916\nPsi2Smass = 3686.10\nX3872mass = 3871.65\nZcmass = 3887.1\nBpmmass = 5279.34\nB0mass = 5279.66\nBs0mass = 5366.92\nUpsimass = 9460.30\nUpsi2Smass = 10023.26\n\n#--------------------------------------------------------------------\n## 2/ Setup the vertex fitter tools (e.g. JpsiFinder, JpsiPlus1Track, etc).\n## These are general tools independent of DerivationFramework that do the \n## actual vertex fitting and some pre-selection.\n\n# invMass range covers phi, J/psi, psi(2S), Upsi(1S) and Upsi(2S)\nfrom JpsiUpsilonTools.JpsiUpsilonToolsConf import Analysis__JpsiFinder\nBPHY23JpsiFinder = Analysis__JpsiFinder(\n name = \"BPHY23JpsiFinder\",\n muAndMu = True,\n muAndTrack = False,\n TrackAndTrack = False,\n assumeDiMuons = True, # If true, will assume dimu hypothesis and use PDG value for mu mass\n trackThresholdPt = 2400.,\n invMassLower = Phi_lo,\n invMassUpper = Upsi2S_hi,\n Chi2Cut = 10.,\n oppChargesOnly\t = True,\n atLeastOneComb = True,\n useCombinedMeasurement = False, # Only takes effect if combOnly=True\t\n muonCollectionKey = \"Muons\",\n TrackParticleCollection = \"InDetTrackParticles\",\n V0VertexFitterTool = BPHY23_VertexTools.TrkV0Fitter, # V0 vertex fitter\n useV0Fitter = False, # if False a TrkVertexFitterTool will be used\n TrkVertexFitterTool = BPHY23_VertexTools.TrkVKalVrtFitter, # VKalVrt vertex fitter\n TrackSelectorTool = BPHY23_VertexTools.InDetTrackSelectorTool,\n VertexPointEstimator = BPHY23_VertexTools.VtxPointEstimator,\n useMCPCuts = False )\nToolSvc += BPHY23JpsiFinder\n\n#--------------------------------------------------------------------\n## 3/ setup the vertex reconstruction \"call\" tool(s). They are part of the derivation framework.\n## These Augmentation tools add output vertex collection(s) into the StoreGate and add basic \n## decorations which do not depend on the vertex mass hypothesis (e.g. lxy, ptError, etc).\n## There should be one tool per topology, i.e. Jpsi and Psi(2S) do not need two instance of the\n## Reco tool if the JpsiFinder mass window is wide enough.\n\n# https://gitlab.cern.ch/atlas/athena/-/blob/21.2/PhysicsAnalysis/DerivationFramework/DerivationFrameworkBPhys/src/Reco_mumu.cxx\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__Reco_mumu\nBPHY23_Reco_mumu = DerivationFramework__Reco_mumu(\n name = \"BPHY23_Reco_mumu\",\n JpsiFinder = BPHY23JpsiFinder,\n OutputVtxContainerName = \"BPHY23OniaCandidates\",\n PVContainerName = \"PrimaryVertices\",\n RefPVContainerName = \"SHOULDNOTBEUSED\",\n DoVertexType = 1)\nToolSvc += BPHY23_Reco_mumu\n\n## 4/ setup a new vertexing tool (necessary due to use of mass constraint) \nfrom TrkVKalVrtFitter.TrkVKalVrtFitterConf import Trk__TrkVKalVrtFitter\nBPHY23VertexFit = Trk__TrkVKalVrtFitter(\n name = \"BPHY23VertexFit\",\n Extrapolator = BPHY23_VertexTools.InDetExtrapolator,\n FirstMeasuredPoint = False, # use Perigee strategy\n MakeExtendedVertex = True)\nToolSvc += BPHY23VertexFit\n\n## 5/ setup the Jpsi+2 track finder\n# https://gitlab.cern.ch/atlas/athena/-/blob/21.2/PhysicsAnalysis/JpsiUpsilonTools/src/JpsiPlus2Tracks.cxx\nfrom JpsiUpsilonTools.JpsiUpsilonToolsConf import Analysis__JpsiPlus2Tracks\n\n# X(3872), Psi(2S) -> J/psi + pi pi\nBPHY23PsiX3872_Jpsi2Trk = Analysis__JpsiPlus2Tracks(\n name = \"BPHY23PsiX3872_Jpsi2Trk\",\n kaonkaonHypothesis\t\t = False,\n pionpionHypothesis = True,\n kaonpionHypothesis = False,\n kaonprotonHypothesis = False,\n trkThresholdPt\t\t\t= 380.,\n trkMaxEta\t\t \t = 2.6,\n oppChargesOnly = False,\n JpsiMassLower = Jpsi_lo,\n JpsiMassUpper = Jpsi_hi,\n TrkQuadrupletMassLower = Psi_lo,\n TrkQuadrupletMassUpper = Psi_hi,\n Chi2Cut = 10.,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi\t\t\t= \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool\t\t = BPHY23VertexFit,\n TrackSelectorTool\t\t = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint\t\t = False)\nToolSvc += BPHY23PsiX3872_Jpsi2Trk\n\n# Bs0 -> J/psi + K K\nBPHY23Bs0_Jpsi2Trk = Analysis__JpsiPlus2Tracks(\n name = \"BPHY23Bs0_Jpsi2Trk\",\n kaonkaonHypothesis\t\t = True,\n pionpionHypothesis = False,\n kaonpionHypothesis = False,\n kaonprotonHypothesis = False,\n trkThresholdPt\t\t\t= 380.,\n trkMaxEta\t\t \t = 2.6,\n oppChargesOnly = False,\n JpsiMassLower = Jpsi_lo,\n JpsiMassUpper = Jpsi_hi,\n TrkQuadrupletMassLower = Bs0_lo,\n TrkQuadrupletMassUpper = Bs0_hi,\n Chi2Cut = 10.,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi\t\t\t= \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool\t\t = BPHY23VertexFit,\n TrackSelectorTool\t\t = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint\t\t = False)\nToolSvc += BPHY23Bs0_Jpsi2Trk\n\n# B0 -> J/psi + K pi\nBPHY23B0_Jpsi2Trk = Analysis__JpsiPlus2Tracks(\n name = \"BPHY23B0_Jpsi2Trk\",\n kaonkaonHypothesis\t\t = False,\n pionpionHypothesis = False,\n kaonpionHypothesis = True,\n kaonprotonHypothesis = False,\n trkThresholdPt\t\t\t= 380.,\n trkMaxEta\t\t \t = 2.6,\n oppChargesOnly = False,\n JpsiMassLower = Jpsi_lo,\n JpsiMassUpper = Jpsi_hi,\n TrkQuadrupletMassLower = B_lo,\n TrkQuadrupletMassUpper = B_hi,\n Chi2Cut = 10.,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi\t\t\t= \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool\t\t = BPHY23VertexFit,\n TrackSelectorTool\t\t = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint\t\t = False)\nToolSvc += BPHY23B0_Jpsi2Trk\n\n# Upsi2S -> Upsi pi pi\nBPHY23Upsi2S_Jpsi2Trk = Analysis__JpsiPlus2Tracks(\n name = \"BPHY23Upsi2S_Jpsi2Trk\",\n kaonkaonHypothesis\t\t = False,\n pionpionHypothesis = True,\n kaonpionHypothesis = False,\n kaonprotonHypothesis = False,\n trkThresholdPt\t\t\t= 380.,\n trkMaxEta\t\t \t = 2.6,\n oppChargesOnly = False,\n JpsiMassLower = Upsi_lo,\n JpsiMassUpper = Upsi_hi,\n TrkQuadrupletMassLower = Upsi2S_lo,\n TrkQuadrupletMassUpper = Upsi2S_hi,\n Chi2Cut = 10.,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi\t\t\t= \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool\t\t = BPHY23VertexFit,\n TrackSelectorTool\t\t = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint\t\t = False)\nToolSvc += BPHY23Upsi2S_Jpsi2Trk\n\nfrom JpsiUpsilonTools.JpsiUpsilonToolsConf import Analysis__JpsiPlus1Track\n\n# Zc(3900)+ -> J/psi pi\nBPHY23Zc3900_Jpsi1Trk = Analysis__JpsiPlus1Track(\n name = \"BPHY23Zc3900_Jpsi1Trk\",\n pionHypothesis = True,\n kaonHypothesis = False,\n trkThresholdPt = 380.,\n trkMaxEta = 2.6,\n JpsiMassLower = Jpsi_lo,\n JpsiMassUpper = Jpsi_hi,\n TrkTrippletMassLower = Zc_lo,\n TrkTrippletMassUpper = Zc_hi,\n Chi2Cut = 10.0,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi = \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool = BPHY23VertexFit,\n TrackSelectorTool = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint = False)\nToolSvc += BPHY23Zc3900_Jpsi1Trk\n\n# B+ -> J/psi K\nBPHY23Bpm_Jpsi1Trk = Analysis__JpsiPlus1Track(\n name = \"BPHY23Bpm_Jpsi1Trk\",\n pionHypothesis = False,\n kaonHypothesis = True,\n trkThresholdPt = 380.,\n trkMaxEta = 2.6,\n JpsiMassLower = Jpsi_lo,\n JpsiMassUpper = Jpsi_hi,\n TrkTrippletMassLower = B_lo,\n TrkTrippletMassUpper = B_hi,\n Chi2Cut = 10.0,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi = \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool = BPHY23VertexFit,\n TrackSelectorTool = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint = False)\nToolSvc += BPHY23Bpm_Jpsi1Trk\n\n# D+, Ds+ -> phi pi\nBPHY23DpmDs_Jpsi1Trk = Analysis__JpsiPlus1Track(\n name = \"BPHY23DpmDs_Jpsi1Trk\",\n pionHypothesis = True,\n kaonHypothesis = False,\n trkThresholdPt = 380.0,\n trkMaxEta = 2.6,\n JpsiMassLower = Phi_lo,\n JpsiMassUpper = Phi_hi,\n TrkTrippletMassLower = Ds_lo,\n TrkTrippletMassUpper = Ds_hi,\n Chi2Cut = 10.0,\n JpsiContainerKey = \"BPHY23OniaCandidates\",\n TrackParticleCollection = \"InDetTrackParticles\",\n MuonsUsedInJpsi = \"Muons\",\n ExcludeJpsiMuonsOnly = True,\n TrkVertexFitterTool = BPHY23VertexFit,\n TrackSelectorTool = BPHY23_VertexTools.InDetTrackSelectorTool,\n UseMassConstraint = False)\nToolSvc += BPHY23DpmDs_Jpsi1Trk\n\n## 6/ setup the combined augmentation/skimming tool\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__Reco_Vertex\nBPHY23FourTrackReco_PsiX3872 = DerivationFramework__Reco_Vertex(\n name = \"BPHY23FourTrackReco_PsiX3872\",\n VertexSearchTool = BPHY23PsiX3872_Jpsi2Trk,\n OutputVtxContainerName = \"BPHY23FourTrack_PsiX3872\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23FourTrackReco_PsiX3872\n\nBPHY23FourTrackReco_Bs0 = DerivationFramework__Reco_Vertex(\n name = \"BPHY23FourTrackReco_Bs0\",\n VertexSearchTool = BPHY23Bs0_Jpsi2Trk,\n OutputVtxContainerName = \"BPHY23FourTrack_Bs0\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23FourTrackReco_Bs0\n\nBPHY23FourTrackReco_B0 = DerivationFramework__Reco_Vertex(\n name = \"BPHY23FourTrackReco_B0\",\n VertexSearchTool = BPHY23B0_Jpsi2Trk,\n OutputVtxContainerName = \"BPHY23FourTrack_B0\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23FourTrackReco_B0\n\nBPHY23FourTrackReco_Upsi2S = DerivationFramework__Reco_Vertex(\n name = \"BPHY23FourTrackReco_Upsi2S\",\n VertexSearchTool = BPHY23Upsi2S_Jpsi2Trk,\n OutputVtxContainerName = \"BPHY23FourTrack_Upsi2S\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23FourTrackReco_Upsi2S\n\nBPHY23ThreeTrackReco_Zc3900 = DerivationFramework__Reco_Vertex(\n name = \"BPHY23ThreeTrackReco_Zc3900\",\n VertexSearchTool = BPHY23Zc3900_Jpsi1Trk,\n OutputVtxContainerName = \"BPHY23ThreeTrack_Zc3900\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23ThreeTrackReco_Zc3900\n\nBPHY23ThreeTrackReco_Bpm = DerivationFramework__Reco_Vertex(\n name = \"BPHY23ThreeTrackReco_Bpm\",\n VertexSearchTool = BPHY23Bpm_Jpsi1Trk,\n OutputVtxContainerName = \"BPHY23ThreeTrack_Bpm\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23ThreeTrackReco_Bpm\n\nBPHY23ThreeTrackReco_DpmDs = DerivationFramework__Reco_Vertex(\n name = \"BPHY23ThreeTrackReco_DpmDs\",\n VertexSearchTool = BPHY23DpmDs_Jpsi1Trk,\n OutputVtxContainerName = \"BPHY23ThreeTrack_DpmDs\",\n PVContainerName = \"PrimaryVertices\",\n V0Tools = TrackingCommon.getV0Tools(),\n RefitPV = False,\n DoVertexType = 0)\nToolSvc += BPHY23ThreeTrackReco_DpmDs\n\n# revertex with mass constraints to reduce combinatorics\n# Psi(2S) -> J/psi pi pi\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__ReVertex\nBPHY23Rev_Psi = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Psi\",\n InputVtxContainerName = \"BPHY23FourTrack_PsiX3872\",\n TrackIndices = [ 0, 1, 2, 3 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = Psi2Smass,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Pimass, Pimass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Psi\")\nToolSvc += BPHY23Rev_Psi\n\n# X(3872) -> J/psi pi pi\nBPHY23Rev_X3872 = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_X3872\",\n InputVtxContainerName = \"BPHY23FourTrack_PsiX3872\",\n TrackIndices = [ 0, 1, 2, 3 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = X3872mass,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Pimass, Pimass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_X3872\")\nToolSvc += BPHY23Rev_X3872\n\n# Bs0 -> J/psi K K\nBPHY23Rev_Bs0 = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Bs0\",\n InputVtxContainerName = \"BPHY23FourTrack_Bs0\",\n TrackIndices = [ 0, 1, 2, 3 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = Bs0mass,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Kmass, Kmass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Bs0\")\nToolSvc += BPHY23Rev_Bs0\n\n# B0 -> J/psi K pi\nBPHY23Rev_B0Kpi = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_B0Kpi\",\n InputVtxContainerName = \"BPHY23FourTrack_B0\",\n TrackIndices = [ 0, 1, 2, 3 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = B0mass,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Kmass, Pimass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_B0Kpi\")\nToolSvc += BPHY23Rev_B0Kpi\n\n# B0 -> J/psi pi K\nBPHY23Rev_B0piK = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_B0piK\",\n InputVtxContainerName = \"BPHY23FourTrack_B0\",\n TrackIndices = [ 0, 1, 2, 3 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = B0mass,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Pimass, Kmass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_B0piK\")\nToolSvc += BPHY23Rev_B0piK\n\n# Upsi2S -> Upsi1S pi pi\nBPHY23Rev_Upsi2S = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Upsi2S\",\n InputVtxContainerName = \"BPHY23FourTrack_Upsi2S\",\n TrackIndices = [ 0, 1, 2, 3 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = Upsi2Smass,\n SubVertexMass = Upsimass,\n MassInputParticles = [Mumass, Mumass, Pimass, Pimass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Upsi2S\")\nToolSvc += BPHY23Rev_Upsi2S\n\n# Zc3900 -> J/psi pi\nBPHY23Rev_Zc3900 = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Zc3900\",\n InputVtxContainerName = \"BPHY23ThreeTrack_Zc3900\",\n TrackIndices = [ 0, 1, 2 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Pimass],\n Chi2Cut = 15.,\n BMassLower = Zc_lo,\n BMassUpper = Zc_hi,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Zc3900\")\nToolSvc += BPHY23Rev_Zc3900\n\n# Bpm -> J/psi K\nBPHY23Rev_Bpm = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Bpm\",\n InputVtxContainerName = \"BPHY23ThreeTrack_Bpm\",\n TrackIndices = [ 0, 1, 2 ],\n SubVertexTrackIndices = [ 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = Bpmmass,\n SubVertexMass = Jpsimass,\n MassInputParticles = [Mumass, Mumass, Kmass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Bpm\")\nToolSvc += BPHY23Rev_Bpm\n\n# Ds -> phi pi\nBPHY23Rev_Ds = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Ds\",\n InputVtxContainerName = \"BPHY23ThreeTrack_DpmDs\",\n TrackIndices = [ 0, 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = Dspmmass,\n MassInputParticles = [Mumass, Mumass, Pimass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Ds\")\nToolSvc += BPHY23Rev_Ds\n\n# Dpm -> phi pi\nBPHY23Rev_Dpm = DerivationFramework__ReVertex(\n name = \"BPHY23Rev_Dpm\",\n InputVtxContainerName = \"BPHY23ThreeTrack_DpmDs\",\n TrackIndices = [ 0, 1, 2 ],\n RefitPV = False,\n UseMassConstraint = True,\n VertexMass = Dpmmass,\n MassInputParticles = [Mumass, Mumass, Pimass],\n Chi2Cut = 15.,\n TrkVertexFitterTool\t = BPHY23VertexFit,\n OutputVtxContainerName = \"BPHY23Revtx_Dpm\")\nToolSvc += BPHY23Rev_Dpm\n\n\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__Select_onia2mumu\n\nBPHY23Select_Phi = DerivationFramework__Select_onia2mumu(\n name = \"BPHY23Select_Phi\",\n HypothesisName = \"Phi\",\n InputVtxContainerName = \"BPHY23OniaCandidates\",\n V0Tools = TrackingCommon.getV0Tools(),\n TrkMasses = [Mumass, Mumass],\n MassMin = Phi_lo,\n MassMax = Phi_hi,\n DoVertexType = 0)\nToolSvc += BPHY23Select_Phi\n\nBPHY23Select_Jpsi = DerivationFramework__Select_onia2mumu(\n name = \"BPHY23Select_Jpsi\",\n HypothesisName = \"Jpsi\",\n InputVtxContainerName = \"BPHY23OniaCandidates\",\n V0Tools = TrackingCommon.getV0Tools(),\n TrkMasses = [Mumass, Mumass],\n MassMin = Jpsi_lo,\n MassMax = Jpsi_hi,\n DoVertexType = 0)\nToolSvc += BPHY23Select_Jpsi\n\nBPHY23Select_Psi = DerivationFramework__Select_onia2mumu(\n name = \"BPHY23Select_Psi\",\n HypothesisName = \"Psi\",\n InputVtxContainerName = \"BPHY23OniaCandidates\",\n V0Tools = TrackingCommon.getV0Tools(),\n TrkMasses = [Mumass, Mumass],\n MassMin = Psi_lo,\n MassMax = Psi_hi,\n DoVertexType = 0)\nToolSvc += BPHY23Select_Psi\n\nBPHY23Select_Upsi = DerivationFramework__Select_onia2mumu(\n name = \"BPHY23Select_Upsi\",\n HypothesisName = \"Upsi\",\n InputVtxContainerName = \"BPHY23OniaCandidates\",\n V0Tools = TrackingCommon.getV0Tools(),\n TrkMasses = [Mumass, Mumass],\n MassMin = Upsi_lo,\n MassMax = Upsi_hi,\n DoVertexType = 0)\nToolSvc += BPHY23Select_Upsi\n\nBPHY23Select_Upsi2S = DerivationFramework__Select_onia2mumu(\n name = \"BPHY23Select_Upsi2S\",\n HypothesisName = \"Upsi2S\",\n InputVtxContainerName = \"BPHY23OniaCandidates\",\n V0Tools = TrackingCommon.getV0Tools(),\n TrkMasses = [Mumass, Mumass],\n MassMin = Upsi2S_lo,\n MassMax = Upsi2S_hi,\n DoVertexType = 0)\nToolSvc += BPHY23Select_Upsi2S\n\n########################\n### 2 trks + 0 trk ###\n########################\n\nlist_2trk0trk_hypo = [\"Psi2Phi0\", \"Psi2Jpsi0\", \"Psi2Psi0\", \"Psi2Upsi0\", \"Psi2Upsi2S0\",\n \"X3872Phi0\", \"X3872Jpsi0\", \"X3872Psi0\", \"X3872Upsi0\", \"X3872Upsi2S0\",\n \"Bs2KPhi0\", \"Bs2KJpsi0\", \"Bs2KPsi0\", \"Bs2KUpsi0\", \"Bs2KUpsi2S0\",\n \"B0KpiPhi0\", \"B0KpiJpsi0\", \"B0KpiPsi0\", \"B0KpiUpsi0\", \"B0KpiUpsi2S0\",\n \"B0piKPhi0\", \"B0piKJpsi0\", \"B0piKPsi0\", \"B0piKUpsi0\", \"B0piKUpsi2S0\",\n \"Upsi2S2Phi0\", \"Upsi2S2Jpsi0\", \"Upsi2S2Psi0\", \"Upsi2S2Upsi0\", \"Upsi2S2Upsi2S0\"]\nlist_2trk0trk_psiInput = [\"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\",\n \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\",\n \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\",\n \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\",\n \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\",\n \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\"]\nlist_2trk0trk_jpsiHypo = [\"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\"]\nlist_2trk0trk_jpsiMass = [Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Upsimass, Upsimass, Upsimass, Upsimass, Upsimass]\nlist_2trk0trk_psiMass = [Psi2Smass, Psi2Smass, Psi2Smass, Psi2Smass, Psi2Smass,\n X3872mass, X3872mass, X3872mass, X3872mass, X3872mass,\n Bs0mass, Bs0mass, Bs0mass, Bs0mass, Bs0mass,\n B0mass, B0mass, B0mass, B0mass, B0mass,\n B0mass, B0mass, B0mass, B0mass, B0mass,\n Upsi2Smass, Upsi2Smass, Upsi2Smass, Upsi2Smass, Upsi2Smass]\nlist_2trk0trk_jpsi2Mass = [Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass]\n\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__JpsiPlusPsiCascade\n\nlist_2trk0trk_obj = []\nfor hypo in list_2trk0trk_hypo:\n list_2trk0trk_obj.append( DerivationFramework__JpsiPlusPsiCascade(\"BPHY23\"+hypo) )\n\nToolSvc += list_2trk0trk_obj\n\nfor i in range(len(list_2trk0trk_obj)):\n list_2trk0trk_obj[i].HypothesisName = list_2trk0trk_hypo[i]\n list_2trk0trk_obj[i].TrkVertexFitterTool = BPHY23VertexFit\n list_2trk0trk_obj[i].JpsiVertices = \"BPHY23OniaCandidates\"\n list_2trk0trk_obj[i].JpsiVtxHypoNames = [list_2trk0trk_jpsiHypo[i]]\n list_2trk0trk_obj[i].PsiVertices = list_2trk0trk_psiInput[i]\n list_2trk0trk_obj[i].NumberOfPsiDaughters = 4\n list_2trk0trk_obj[i].MassLowerCut = 0.\n list_2trk0trk_obj[i].MassUpperCut = X_hi\n list_2trk0trk_obj[i].Chi2Cut = 30.\n list_2trk0trk_obj[i].MaxPsiCandidates = 15\n list_2trk0trk_obj[i].VxPrimaryCandidateName = \"PrimaryVertices\"\n list_2trk0trk_obj[i].RefPVContainerName = \"BPHY23\"+list_2trk0trk_hypo[i]+\"RefPrimaryVertices\"\n list_2trk0trk_obj[i].CascadeVertexCollections = [\"BPHY23\"+list_2trk0trk_hypo[i]+\"CascadeVtx1\",\"BPHY23\"+list_2trk0trk_hypo[i]+\"CascadeVtx2\",\"BPHY23\"+list_2trk0trk_hypo[i]+\"CascadeVtx3\"]\n list_2trk0trk_obj[i].RefitPV = True\n list_2trk0trk_obj[i].ApplyJpsiMassConstraint = True\n list_2trk0trk_obj[i].JpsiMass = list_2trk0trk_jpsiMass[i]\n list_2trk0trk_obj[i].ApplyPsiMassConstraint = True\n list_2trk0trk_obj[i].PsiMass = list_2trk0trk_psiMass[i]\n if list_2trk0trk_jpsi2Mass[i] != Phimass:\n list_2trk0trk_obj[i].ApplyJpsi2MassConstraint = True\n list_2trk0trk_obj[i].Jpsi2Mass = list_2trk0trk_jpsi2Mass[i]\n\n#######################\n### 1 trk + 0 trk ###\n#######################\n\nlist_1trk0trk_hypo = [\"Zc3900Phi0\", \"Zc3900Jpsi0\", \"Zc3900Psi0\", \"Zc3900Upsi0\", \"Zc3900Upsi2S0\",\n \"BpmPhi0\", \"BpmJpsi0\", \"BpmPsi0\", \"BpmUpsi0\", \"BpmUpsi2S0\",\n \"DsPhi0\", \"DsJpsi0\", \"DsPsi0\", \"DsUpsi0\", \"DsUpsi2S0\",\n \"DpmPhi0\", \"DpmJpsi0\", \"DpmPsi0\", \"DpmUpsi0\", \"DpmUpsi2S0\"]\nlist_1trk0trk_psiInput = [\"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\",\n \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Bpm\",\n \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Ds\",\n \"BPHY23Revtx_Dpm\", \"BPHY23Revtx_Dpm\", \"BPHY23Revtx_Dpm\", \"BPHY23Revtx_Dpm\", \"BPHY23Revtx_Dpm\"]\nlist_1trk0trk_jpsiHypo = [\"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\",\n \"Phi\", \"Jpsi\", \"Psi\", \"Upsi\", \"Upsi2S\"]\nlist_1trk0trk_jpsiMass = [Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Phimass, Phimass, Phimass, Phimass, Phimass,\n Phimass, Phimass, Phimass, Phimass, Phimass]\nlist_1trk0trk_psiMass = [Zcmass, Zcmass, Zcmass, Zcmass, Zcmass,\n Bpmmass, Bpmmass, Bpmmass, Bpmmass, Bpmmass,\n Dspmmass, Dspmmass, Dspmmass, Dspmmass, Dspmmass,\n Dpmmass, Dpmmass, Dpmmass, Dpmmass, Dpmmass]\nlist_1trk0trk_jpsi2Mass = [Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass,\n Phimass, Jpsimass, Psi2Smass, Upsimass, Upsi2Smass]\n\nlist_1trk0trk_obj = []\nfor hypo in list_1trk0trk_hypo:\n list_1trk0trk_obj.append( DerivationFramework__JpsiPlusPsiCascade(\"BPHY23\"+hypo) )\n\nToolSvc += list_1trk0trk_obj\n\nfor i in range(len(list_1trk0trk_obj)):\n list_1trk0trk_obj[i].HypothesisName = list_1trk0trk_hypo[i]\n list_1trk0trk_obj[i].TrkVertexFitterTool = BPHY23VertexFit\n list_1trk0trk_obj[i].JpsiVertices = \"BPHY23OniaCandidates\"\n list_1trk0trk_obj[i].JpsiVtxHypoNames = [list_1trk0trk_jpsiHypo[i]]\n list_1trk0trk_obj[i].PsiVertices = list_1trk0trk_psiInput[i]\n list_1trk0trk_obj[i].NumberOfPsiDaughters = 3\n list_1trk0trk_obj[i].MassLowerCut = 0.\n list_1trk0trk_obj[i].MassUpperCut = X_hi\n list_1trk0trk_obj[i].Chi2Cut = 30.\n list_1trk0trk_obj[i].MaxPsiCandidates = 15\n list_1trk0trk_obj[i].VxPrimaryCandidateName = \"PrimaryVertices\"\n list_1trk0trk_obj[i].RefPVContainerName = \"BPHY23\"+list_1trk0trk_hypo[i]+\"RefPrimaryVertices\"\n list_1trk0trk_obj[i].CascadeVertexCollections = [\"BPHY23\"+list_1trk0trk_hypo[i]+\"CascadeVtx1\",\"BPHY23\"+list_1trk0trk_hypo[i]+\"CascadeVtx2\",\"BPHY23\"+list_1trk0trk_hypo[i]+\"CascadeVtx3\"]\n list_1trk0trk_obj[i].RefitPV = True\n if list_1trk0trk_jpsiMass[i] != Phimass:\n list_1trk0trk_obj[i].ApplyJpsiMassConstraint = True\n list_1trk0trk_obj[i].JpsiMass = list_1trk0trk_jpsiMass[i]\n if list_1trk0trk_psiMass[i] != Zcmass:\n list_1trk0trk_obj[i].ApplyPsiMassConstraint = True\n list_1trk0trk_obj[i].PsiMass = list_1trk0trk_psiMass[i]\n if list_1trk0trk_jpsi2Mass[i] != Phimass:\n list_1trk0trk_obj[i].ApplyJpsi2MassConstraint = True\n list_1trk0trk_obj[i].Jpsi2Mass = list_1trk0trk_jpsi2Mass[i]\n\n#######################\n### 1 trk + 1 trk ###\n#######################\n\nlist_1trk1trk_hypo = [\"Zc3900Zc3900\", \"Zc3900Bpm\", \"Zc3900Ds\", \"Zc3900Dpm\",\n \"BpmBpm\", \"BpmDs\", \"BpmDpm\",\n \"DsDs\", \"DsDpm\",\n \"DpmDpm\"]\nlist_1trk1trk_psi1Input = [\"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Zc3900\",\n \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Bpm\",\n \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Ds\",\n \"BPHY23Revtx_Dpm\"]\nlist_1trk1trk_psi2Input = [\"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Dpm\"]\nlist_1trk1trk_jpsi1Mass = [Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass,\n Phimass, Phimass,\n Phimass]\nlist_1trk1trk_psi1Mass = [Zcmass, Zcmass, Zcmass, Zcmass,\n Bpmmass, Bpmmass, Bpmmass,\n Dspmmass, Dspmmass,\n Dpmmass]\nlist_1trk1trk_jpsi2Mass = [Jpsimass, Jpsimass, Phimass, Phimass,\n Jpsimass, Phimass, Phimass,\n Phimass, Phimass,\n Phimass]\nlist_1trk1trk_psi2Mass = [Zcmass, Bpmmass, Dspmmass, Dpmmass,\n Bpmmass, Dspmmass, Dpmmass,\n Dspmmass, Dpmmass,\n Dpmmass]\n\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__PsiPlusPsiCascade\n\nlist_1trk1trk_obj = []\nfor hypo in list_1trk1trk_hypo:\n list_1trk1trk_obj.append( DerivationFramework__PsiPlusPsiCascade(\"BPHY23\"+hypo) )\n\nToolSvc += list_1trk1trk_obj\n\nfor i in range(len(list_1trk1trk_obj)):\n list_1trk1trk_obj[i].HypothesisName = list_1trk1trk_hypo[i]\n list_1trk1trk_obj[i].TrkVertexFitterTool = BPHY23VertexFit\n list_1trk1trk_obj[i].Psi1Vertices = list_1trk1trk_psi1Input[i]\n list_1trk1trk_obj[i].Psi2Vertices = list_1trk1trk_psi2Input[i]\n list_1trk1trk_obj[i].NumberOfPsi1Daughters = 3\n list_1trk1trk_obj[i].NumberOfPsi2Daughters = 3\n list_1trk1trk_obj[i].MassLowerCut = 0.\n list_1trk1trk_obj[i].MassUpperCut = X_hi\n list_1trk1trk_obj[i].Chi2CutPsi1 = 12.\n list_1trk1trk_obj[i].Chi2CutPsi2 = 12.\n list_1trk1trk_obj[i].Chi2Cut = 30.\n list_1trk1trk_obj[i].MaxPsi1Candidates = 15\n list_1trk1trk_obj[i].MaxPsi2Candidates = 15\n list_1trk1trk_obj[i].RemoveDuplicatePairs = True\n list_1trk1trk_obj[i].VxPrimaryCandidateName = \"PrimaryVertices\"\n list_1trk1trk_obj[i].RefPVContainerName = \"BPHY23\"+list_1trk1trk_hypo[i]+\"RefPrimaryVertices\"\n list_1trk1trk_obj[i].CascadeVertexCollections = [\"BPHY23\"+list_1trk1trk_hypo[i]+\"CascadeVtx1\",\"BPHY23\"+list_1trk1trk_hypo[i]+\"CascadeVtx2\",\"BPHY23\"+list_1trk1trk_hypo[i]+\"CascadeVtx3\"]\n list_1trk1trk_obj[i].RefitPV = True\n if list_1trk1trk_jpsi1Mass[i] != Phimass:\n list_1trk1trk_obj[i].ApplyJpsi1MassConstraint = True\n list_1trk1trk_obj[i].Jpsi1Mass = list_1trk1trk_jpsi1Mass[i]\n if list_1trk1trk_psi1Mass[i] != Zcmass:\n list_1trk1trk_obj[i].ApplyPsi1MassConstraint = True\n list_1trk1trk_obj[i].Psi1Mass = list_1trk1trk_psi1Mass[i]\n if list_1trk1trk_jpsi2Mass[i] != Phimass:\n list_1trk1trk_obj[i].ApplyJpsi2MassConstraint = True\n list_1trk1trk_obj[i].Jpsi2Mass = list_1trk1trk_jpsi2Mass[i]\n if list_1trk1trk_psi2Mass[i] != Zcmass:\n list_1trk1trk_obj[i].ApplyPsi2MassConstraint = True\n list_1trk1trk_obj[i].Psi2Mass = list_1trk1trk_psi2Mass[i]\n\n########################\n### 2 trks + 1 trk ###\n########################\n\nlist_2trk1trk_hypo = [\"Psi2Zc3900\", \"Psi2Bpm\", \"Psi2Ds\", \"Psi2Dpm\",\n \"X3872Zc3900\", \"X3872Bpm\", \"X3872Ds\", \"X3872Dpm\",\n \"Bs2KZc3900\", \"Bs2KBpm\", \"Bs2KDs\", \"Bs2KDpm\",\n \"B0KpiZc3900\", \"B0KpiBpm\", \"B0KpiDs\", \"B0KpiDpm\",\n \"B0piKZc3900\", \"B0piKBpm\", \"B0piKDs\", \"B0piKDpm\",\n \"Upsi2S2Zc3900\", \"Upsi2S2Bpm\", \"Upsi2S2Ds\", \"Upsi2S2Dpm\"]\nlist_2trk1trk_psi1Input = [\"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\",\n \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\",\n \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\",\n \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\",\n \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\",\n \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\", \"BPHY23Revtx_Upsi2S\"]\nlist_2trk1trk_psi2Input = [\"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\",\n \"BPHY23Revtx_Zc3900\", \"BPHY23Revtx_Bpm\", \"BPHY23Revtx_Ds\", \"BPHY23Revtx_Dpm\"]\nlist_2trk1trk_jpsi1Mass = [Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Upsimass, Upsimass, Upsimass, Upsimass]\nlist_2trk1trk_psi1Mass = [Psi2Smass, Psi2Smass, Psi2Smass, Psi2Smass,\n X3872mass, X3872mass, X3872mass, X3872mass,\n Bs0mass, Bs0mass, Bs0mass, Bs0mass,\n B0mass, B0mass, B0mass, B0mass,\n B0mass, B0mass, B0mass, B0mass,\n Upsi2Smass, Upsi2Smass, Upsi2Smass, Upsi2Smass]\nlist_2trk1trk_jpsi2Mass = [Jpsimass, Jpsimass, Phimass, Phimass,\n Jpsimass, Jpsimass, Phimass, Phimass,\n Jpsimass, Jpsimass, Phimass, Phimass,\n Jpsimass, Jpsimass, Phimass, Phimass,\n Jpsimass, Jpsimass, Phimass, Phimass,\n Jpsimass, Jpsimass, Phimass, Phimass]\nlist_2trk1trk_psi2Mass = [Zcmass, Bpmmass, Dspmmass, Dpmmass,\n Zcmass, Bpmmass, Dspmmass, Dpmmass,\n Zcmass, Bpmmass, Dspmmass, Dpmmass,\n Zcmass, Bpmmass, Dspmmass, Dpmmass,\n Zcmass, Bpmmass, Dspmmass, Dpmmass,\n Zcmass, Bpmmass, Dspmmass, Dpmmass]\n\nlist_2trk1trk_obj = []\nfor hypo in list_2trk1trk_hypo:\n list_2trk1trk_obj.append( DerivationFramework__PsiPlusPsiCascade(\"BPHY23\"+hypo) )\n\nToolSvc += list_2trk1trk_obj\n\nfor i in range(len(list_2trk1trk_obj)):\n list_2trk1trk_obj[i].HypothesisName = list_2trk1trk_hypo[i]\n list_2trk1trk_obj[i].TrkVertexFitterTool = BPHY23VertexFit\n list_2trk1trk_obj[i].Psi1Vertices = list_2trk1trk_psi1Input[i]\n list_2trk1trk_obj[i].Psi2Vertices = list_2trk1trk_psi2Input[i]\n list_2trk1trk_obj[i].NumberOfPsi1Daughters = 4\n list_2trk1trk_obj[i].NumberOfPsi2Daughters = 3\n list_2trk1trk_obj[i].MassLowerCut = 0.\n list_2trk1trk_obj[i].MassUpperCut = X_hi\n list_2trk1trk_obj[i].Chi2CutPsi1 = 10.\n list_2trk1trk_obj[i].Chi2CutPsi2 = 12.\n list_2trk1trk_obj[i].Chi2Cut = 30.\n list_2trk1trk_obj[i].MaxPsi1Candidates = 15\n list_2trk1trk_obj[i].MaxPsi2Candidates = 15\n list_2trk1trk_obj[i].VxPrimaryCandidateName = \"PrimaryVertices\"\n list_2trk1trk_obj[i].RefPVContainerName = \"BPHY23\"+list_2trk1trk_hypo[i]+\"RefPrimaryVertices\"\n list_2trk1trk_obj[i].CascadeVertexCollections = [\"BPHY23\"+list_2trk1trk_hypo[i]+\"CascadeVtx1\",\"BPHY23\"+list_2trk1trk_hypo[i]+\"CascadeVtx2\",\"BPHY23\"+list_2trk1trk_hypo[i]+\"CascadeVtx3\"]\n list_2trk1trk_obj[i].RefitPV = True\n list_2trk1trk_obj[i].ApplyJpsi1MassConstraint = True\n list_2trk1trk_obj[i].Jpsi1Mass = list_2trk1trk_jpsi1Mass[i]\n list_2trk1trk_obj[i].ApplyPsi1MassConstraint = True\n list_2trk1trk_obj[i].Psi1Mass = list_2trk1trk_psi1Mass[i]\n if list_2trk1trk_jpsi2Mass[i] != Phimass:\n list_2trk1trk_obj[i].ApplyJpsi2MassConstraint = True\n list_2trk1trk_obj[i].Jpsi2Mass = list_2trk1trk_jpsi2Mass[i]\n if list_2trk1trk_psi2Mass[i] != Zcmass:\n list_2trk1trk_obj[i].ApplyPsi2MassConstraint = True\n list_2trk1trk_obj[i].Psi2Mass = list_2trk1trk_psi2Mass[i]\n\n#########################\n### 2 trks + 2 trks ###\n#########################\n\nlist_2trk2trk_hypo = [\"Psi2Psi2\", \"Psi2X3872\", \"Psi2Bs2K\", \"Psi2B0Kpi\", \"Psi2B0piK\", \"Psi2Upsi2S2\",\n \"X3872X3872\", \"X3872Bs2K\", \"X3872B0Kpi\", \"X3872B0piK\", \"X3872Upsi2S2\",\n \"Bs2KBs2K\", \"Bs2KB0Kpi\", \"Bs2KB0piK\", \"Bs2KUpsi2S2\",\n \"B0KpiB0Kpi\", \"B0KpiB0piK\", \"B0KpiUpsi2S2\",\n \"B0piKB0piK\", \"B0piKUpsi2S2\",\n \"Upsi2S2Upsi2S2\"]\nlist_2trk2trk_psi1Input = [\"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\", \"BPHY23Revtx_Psi\",\n \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_X3872\",\n \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_Bs0\",\n \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0Kpi\",\n \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_B0piK\",\n \"BPHY23Revtx_Upsi2S\"]\nlist_2trk2trk_psi2Input = [\"BPHY23Revtx_Psi\", \"BPHY23Revtx_X3872\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_Upsi2S\",\n \"BPHY23Revtx_X3872\", \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_Upsi2S\",\n \"BPHY23Revtx_Bs0\", \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_Upsi2S\",\n \"BPHY23Revtx_B0Kpi\", \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_Upsi2S\",\n \"BPHY23Revtx_B0piK\", \"BPHY23Revtx_Upsi2S\",\n \"BPHY23Revtx_Upsi2S\"]\nlist_2trk2trk_jpsi1Mass = [Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass, Jpsimass,\n Jpsimass, Jpsimass,\n Upsimass]\nlist_2trk2trk_psi1Mass = [Psi2Smass, Psi2Smass, Psi2Smass, Psi2Smass, Psi2Smass, Psi2Smass,\n X3872mass, X3872mass, X3872mass, X3872mass, X3872mass,\n Bs0mass, Bs0mass, Bs0mass, Bs0mass,\n B0mass, B0mass, B0mass,\n B0mass, B0mass,\n Upsi2Smass]\nlist_2trk2trk_jpsi2Mass = [Jpsimass, Jpsimass, Jpsimass, Jpsimass, Jpsimass, Upsimass,\n Jpsimass, Jpsimass, Jpsimass, Jpsimass, Upsimass,\n Jpsimass, Jpsimass, Jpsimass, Upsimass,\n Jpsimass, Jpsimass, Upsimass,\n Jpsimass, Upsimass,\n Upsimass]\nlist_2trk2trk_psi2Mass = [Psi2Smass, X3872mass, Bs0mass, B0mass, B0mass, Upsi2Smass,\n X3872mass, Bs0mass, B0mass, B0mass, Upsi2Smass,\n Bs0mass, B0mass, B0mass, Upsi2Smass,\n B0mass, B0mass, Upsi2Smass,\n B0mass, Upsi2Smass,\n Upsi2Smass]\n\nlist_2trk2trk_obj = []\nfor hypo in list_2trk2trk_hypo:\n list_2trk2trk_obj.append( DerivationFramework__PsiPlusPsiCascade(\"BPHY23\"+hypo) )\n\nToolSvc += list_2trk2trk_obj\n\nfor i in range(len(list_2trk2trk_obj)):\n list_2trk2trk_obj[i].HypothesisName = list_2trk2trk_hypo[i]\n list_2trk2trk_obj[i].TrkVertexFitterTool = BPHY23VertexFit\n list_2trk2trk_obj[i].Psi1Vertices = list_2trk2trk_psi1Input[i]\n list_2trk2trk_obj[i].Psi2Vertices = list_2trk2trk_psi2Input[i]\n list_2trk2trk_obj[i].NumberOfPsi1Daughters = 4\n list_2trk2trk_obj[i].NumberOfPsi2Daughters = 4\n list_2trk2trk_obj[i].MassLowerCut = 0.\n list_2trk2trk_obj[i].MassUpperCut = X_hi\n list_2trk2trk_obj[i].Chi2CutPsi1 = 10.\n list_2trk2trk_obj[i].Chi2CutPsi2 = 10.\n list_2trk2trk_obj[i].Chi2Cut = 30.\n list_2trk2trk_obj[i].MaxPsi1Candidates = 15\n list_2trk2trk_obj[i].MaxPsi2Candidates = 15\n list_2trk2trk_obj[i].RemoveDuplicatePairs = True\n list_2trk2trk_obj[i].VxPrimaryCandidateName = \"PrimaryVertices\"\n list_2trk2trk_obj[i].RefPVContainerName = \"BPHY23\"+list_2trk2trk_hypo[i]+\"RefPrimaryVertices\"\n list_2trk2trk_obj[i].CascadeVertexCollections = [\"BPHY23\"+list_2trk2trk_hypo[i]+\"CascadeVtx1\",\"BPHY23\"+list_2trk2trk_hypo[i]+\"CascadeVtx2\",\"BPHY23\"+list_2trk2trk_hypo[i]+\"CascadeVtx3\"]\n list_2trk2trk_obj[i].RefitPV = True\n list_2trk2trk_obj[i].ApplyJpsi1MassConstraint = True\n list_2trk2trk_obj[i].Jpsi1Mass = list_2trk2trk_jpsi1Mass[i]\n list_2trk2trk_obj[i].ApplyPsi1MassConstraint = True\n list_2trk2trk_obj[i].Psi1Mass = list_2trk2trk_psi1Mass[i]\n list_2trk2trk_obj[i].ApplyJpsi2MassConstraint = True\n list_2trk2trk_obj[i].Jpsi2Mass = list_2trk2trk_jpsi2Mass[i]\n list_2trk2trk_obj[i].ApplyPsi2MassConstraint = True\n list_2trk2trk_obj[i].Psi2Mass = list_2trk2trk_psi2Mass[i]\n\n\nlist_all_obj = list_2trk0trk_obj + list_1trk0trk_obj + list_1trk1trk_obj + list_2trk1trk_obj + list_2trk2trk_obj\n\nCascadeCollections = []\nRefPVContainers = []\nRefPVAuxContainers = []\npassedCandidates = []\n\nfor obj in list_all_obj:\n CascadeCollections += obj.CascadeVertexCollections\n RefPVContainers += [\"xAOD::VertexContainer#BPHY23\" + obj.HypothesisName + \"RefPrimaryVertices\"]\n RefPVAuxContainers += [\"xAOD::VertexAuxContainer#BPHY23\" + obj.HypothesisName + \"RefPrimaryVerticesAux.\"]\n passedCandidates += [ \"BPHY23\" + obj.HypothesisName + \"CascadeVtx3\" ]\n\n#--------------------------------------------------------------------\n## 7/ select the event. We only want to keep events that contain certain vertices which passed certain selection.\n## This is specified by the \"SelectionExpression\" property, which contains the expression in the following format:\n##\n## \"ContainerName.passed_HypoName > count\"\n##\n## where \"ContainerName\" is output container from some Reco_* tool, \"HypoName\" is the hypothesis name setup in some \"Select_*\"\n## tool and \"count\" is the number of candidates passing the selection you want to keep. \n\nfrom DerivationFrameworkBPhys.DerivationFrameworkBPhysConf import DerivationFramework__AnyVertexSkimmingTool\nBPHY23_SelectEvent = DerivationFramework__AnyVertexSkimmingTool(name = \"BPHY23_SelectEvent\", VertexContainerNames = passedCandidates)\n\nToolSvc += BPHY23_SelectEvent\n\n#====================================================================\n# CREATE THE DERIVATION KERNEL ALGORITHM AND PASS THE ABOVE TOOLS \n#====================================================================\n## IMPORTANT bit. Don't forget to pass the tools to the DerivationKernel! If you don't do that, they will not be \n## be executed!\n\n# The name of the kernel (BPHY23Kernel in this case) must be unique to this derivation\nfrom DerivationFrameworkCore.DerivationFrameworkCoreConf import DerivationFramework__DerivationKernel\naugmentation_tools = [BPHY23_Reco_mumu, BPHY23FourTrackReco_PsiX3872, BPHY23FourTrackReco_Bs0,\\\n BPHY23FourTrackReco_B0, BPHY23FourTrackReco_Upsi2S, BPHY23ThreeTrackReco_Zc3900,\\\n BPHY23ThreeTrackReco_Bpm, BPHY23ThreeTrackReco_DpmDs, BPHY23Rev_Psi, \\\n BPHY23Rev_X3872, BPHY23Rev_Bs0, BPHY23Rev_B0Kpi, BPHY23Rev_B0piK, BPHY23Rev_Upsi2S, \\\n BPHY23Rev_Zc3900, BPHY23Rev_Bpm, BPHY23Rev_Ds, BPHY23Rev_Dpm, BPHY23Select_Phi, \\\n BPHY23Select_Jpsi, BPHY23Select_Psi, BPHY23Select_Upsi, BPHY23Select_Upsi2S] + list_all_obj\nfor i in augmentation_tools : print(i)\nDerivationFrameworkJob += CfgMgr.DerivationFramework__DerivationKernel(\n \"BPHY23Kernel\",\n AugmentationTools = augmentation_tools,\n SkimmingTools = [BPHY23_SelectEvent]\n)\n\n#====================================================================\n# SET UP STREAM \n#====================================================================\nstreamName = derivationFlags.WriteDAOD_BPHY23Stream.StreamName\nfileName = buildFileName( derivationFlags.WriteDAOD_BPHY23Stream )\nBPHY23Stream = MSMgr.NewPoolRootStream( streamName, fileName )\nBPHY23Stream.AcceptAlgs([\"BPHY23Kernel\"])\n\naugStream = MSMgr.GetStream( streamName )\nevtStream = augStream.GetEventStream()\n\n#====================================================================\n# Slimming \n#====================================================================\n\nfrom DerivationFrameworkCore.SlimmingHelper import SlimmingHelper\nBPHY23SlimmingHelper = SlimmingHelper(\"BPHY23SlimmingHelper\")\nBPHY23_AllVariables = []\nBPHY23_StaticContent = []\n\n# Needed for trigger objects\nBPHY23SlimmingHelper.IncludeMuonTriggerContent = True\nBPHY23SlimmingHelper.IncludeBPhysTriggerContent = True\n\n## primary vertices\nBPHY23_AllVariables += [\"PrimaryVertices\"]\nBPHY23_StaticContent += RefPVContainers\nBPHY23_StaticContent += RefPVAuxContainers\n\n## ID track particles\nBPHY23_AllVariables += [\"InDetTrackParticles\"]\n\n## combined / extrapolated muon track particles \n## (note: for tagged muons there is no extra TrackParticle collection since the ID tracks\n## are stored in InDetTrackParticles collection)\nBPHY23_AllVariables += [\"CombinedMuonTrackParticles\", \"ExtrapolatedMuonTrackParticles\"]\n\n## muon container\nBPHY23_AllVariables += [\"Muons\", \"MuonSegments\"]\n\n## we have to disable vxTrackAtVertex branch since it is not xAOD compatible\nfor cascade in CascadeCollections:\n BPHY23_StaticContent += [\"xAOD::VertexContainer#%s\" % cascade]\n BPHY23_StaticContent += [\"xAOD::VertexAuxContainer#%sAux.-vxTrackAtVertex\" % cascade]\n\n# Truth information for MC only\nif isSimulation:\n BPHY23_AllVariables += [\"TruthEvents\",\"TruthParticles\",\"TruthVertices\",\"MuonTruthParticles\"]\n\nBPHY23SlimmingHelper.SmartCollections = [\"Muons\", \"PrimaryVertices\", \"InDetTrackParticles\"]\nBPHY23SlimmingHelper.AllVariables = BPHY23_AllVariables\nBPHY23SlimmingHelper.StaticContent = BPHY23_StaticContent\nBPHY23SlimmingHelper.AppendContentToStream(BPHY23Stream)\n","repo_name":"Yusuf-Manjra/athena","sub_path":"PhysicsAnalysis/DerivationFramework/DerivationFrameworkBPhys/share/BPHY23.py","file_name":"BPHY23.py","file_ext":"py","file_size_in_byte":53021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13362754069","text":"from methods import browser,format\nfrom datetime import datetime as dt\nfrom selenium.webdriver.common.by import By\nimport time\n\ndef write_file(text):\n \"\"\"Write input text into a file\"\"\"\n filename = f\"{dt.now().strftime('%Y-%m-%d.%H-%M-%S')}.txt\"\n with open(filename, 'w') as file:\n file.write(str(text))\n\ndef main():\n driver = browser.get_driver(\"https://automated.pythonanywhere.com/\",1)\n while True:\n time.sleep(2)\n element = browser.get_elements(driver,\"/html/body/div[1]/div/h1[1]\")\n element = browser.get_elements(driver,\"/html/body/div[1]/div/h1[2]\")\n text = format.clean_text(element.text)\n write_file(text)\n\nmain()","repo_name":"AntonyFelisberto/python-automation","sub_path":"automation/sites/log_and_save.py","file_name":"log_and_save.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15326962478","text":"import unittest\n\nfrom my_cookbook import stage\nfrom my_cookbook.util import core\nfrom my_cookbook.tests import test_util\nfrom my_cookbook.util import recipes_helper\nfrom my_cookbook.util import requester\nfrom my_cookbook.util import responder\nimport requests\nimport lambda_function\n\n\nclass KeyTest(unittest.TestCase):\n def test_key(self):\n core.load_key()\n self.assertTrue(True)\n\n\nclass APITest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n stage.BIGOVEN = True\n\n @classmethod\n def tearDownClass(cls):\n stage.BIGOVEN = False\n\n @test_util.bigoven\n def test_search_recipes(self):\n recipes = recipes_helper.search_online_recipes(\"chocolate fondue\")\n self.assertGreater(len(recipes), 10)\n\n recipes = recipes_helper.search_online_recipes(\"kentucky fried chicken\")\n self.assertGreater(len(recipes), 10)\n\n @test_util.bigoven\n def test_search_user_recipes(self):\n recipes = recipes_helper.search_my_recipes(\"pancakes\", '_test')\n self.assertEqual(len(recipes), 4)\n\n recipes = recipes_helper.search_my_recipes(\"jalapeno poppers\", '_test')\n self.assertEqual(len(recipes), 1)\n\n\nclass LinkTest(unittest.TestCase):\n @test_util.bigoven\n def test_make_link(self):\n test_util.delete_table(core.LOCAL_DB_URI)\n intent = requester.Intent('StartNewRecipeIntent').slot('RecipeName',\n 'chicken pot pie').build()\n req = requester.Request().new().type(requester.Types.INTENT).intent(intent).build()\n response_dict = lambda_function.handle_event(req, None)\n\n self.assertTrue(responder.is_valid(response_dict))\n\n #fake the user entering their id\n fake_bigoven_username = 'bigoven_test_user'\n response = requests.get(\n 'http://localhost:5000/link?amazonId=default_user_id&bigoven_username=%s' %\n fake_bigoven_username)\n self.assertTrue(response.ok)\n\n link_result = lambda_function._skill.db_helper.get('bigoven_username')\n self.assertEqual(link_result.value, fake_bigoven_username)\n\n\nclass RealRecipeTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n stage.BIGOVEN = True\n\n @classmethod\n def tearDownClass(cls):\n stage.BIGOVEN = False\n\n @test_util.bigoven\n def test_brownies(self):\n test_util.delete_table(core.LOCAL_DB_URI)\n test_util.set_bigoven_username()\n\n # search for something not in our cookbook\n intent = requester.Intent('StartNewRecipeIntent').slot('RecipeName', 'brownies').build()\n req = requester.Request().type(requester.Types.INTENT).intent(intent).new().build()\n response_dict = lambda_function.handle_event(req, None)\n\n self.assertTrue(responder.is_valid(response_dict))\n self.assertIn('current_recipe_name', response_dict['sessionAttributes'])\n self.assertEqual(response_dict['sessionAttributes']['current_recipe_name'], 'brownies')\n self.assertEqual(response_dict['sessionAttributes'][core.STATE_KEY], core.States.ASK_SEARCH)\n\n # response with \"Yes\" to search online\n req = requester.Request().type(requester.Types.INTENT).intent(\n requester.Intent(\"AMAZON.YesIntent\").build()).copy_attributes(response_dict).build()\n response_dict = lambda_function.handle_event(req, None)\n\n self.assertTrue(responder.is_valid(response_dict))\n self.assertIn('search_recipe_result', response_dict['sessionAttributes'])\n self.assertIn('Title', response_dict['sessionAttributes']['search_recipe_result'])\n self.assertIn('RecipeID', response_dict['sessionAttributes']['search_recipe_result'])\n self.assertEqual(response_dict['sessionAttributes'][core.STATE_KEY],\n core.States.ASK_MAKE_ONLINE)\n\n # response with \"Yes\" to make the recipe it found\n req = requester.Request().type(requester.Types.INTENT).intent(\n requester.Intent(\"AMAZON.YesIntent\").build()).copy_attributes(response_dict).build()\n response_dict = lambda_function.handle_event(req, None)\n\n self.assertTrue(responder.is_valid(response_dict))\n self.assertIn('current_recipe', response_dict['sessionAttributes'])\n self.assertIn('Title', response_dict['sessionAttributes']['current_recipe'])\n self.assertIn('RecipeID', response_dict['sessionAttributes']['current_recipe'])\n self.assertEqual(response_dict['sessionAttributes'][core.STATE_KEY],\n core.States.INGREDIENTS_OR_INSTRUCTIONS)\n\n # ask for ingredients\n intent = requester.Intent('IngredientsIntent').build()\n req = requester.Request().type(requester.Types.INTENT).copy_attributes(\n response_dict).intent(intent).build()\n response_dict = lambda_function.handle_event(req, None)\n\n self.assertTrue(responder.is_valid(response_dict))\n self.assertEqual(response_dict['sessionAttributes'][core.STATE_KEY],\n core.States.INGREDIENTS_OR_INSTRUCTIONS)\n\n # now ask for instructions\n intent = requester.Intent('InstructionsIntent').build()\n req = requester.Request().type(requester.Types.INTENT).copy_attributes(\n response_dict).intent(intent).build()\n response_dict = lambda_function.handle_event(req, None)\n\n self.assertTrue(responder.is_valid(response_dict))\n self.assertEqual(response_dict['sessionAttributes'][core.STATE_KEY],\n core.States.INGREDIENTS_OR_INSTRUCTIONS)\n","repo_name":"PeterMitrano/my_cookbook","sub_path":"my_cookbook/tests/test_bigoven.py","file_name":"test_bigoven.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17946113381","text":"# building an ANN custom Model\n\nfrom torch import nn\n\n\nclass ANN(nn.Module):\n\n def __init__(self):\n super(ANN, self).__init__()\n self.fc1 = nn.Linear(784, 256)\n self.relu = nn.ReLU()\n self.fc2 = nn.Linear(256, 64)\n self.fc3 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.relu(x)\n x = self.fc3(x)\n return x\n\n\nmodel = ANN()\n","repo_name":"JamesBondOOO7/PyTorch","sub_path":"Feedforward Artificial Neural Networks/model_ANN_Inheritance.py","file_name":"model_ANN_Inheritance.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1074861093","text":"answer = []\r\n\r\ndef main():\r\n N,M = map(int, input().split())\r\n\r\n arr = list(map(int, input().split()))\r\n visited = [0]*N\r\n arr = sorted(arr)\r\n\r\n combination(arr, visited, 0, M)\r\n\r\ndef combination(arr,visitied, idx, M):\r\n if idx == M:\r\n for index in range(M):\r\n print(answer[index], end=' ')\r\n print()\r\n return\r\n for i in range(len(arr)):\r\n if visitied[i] == 0:\r\n answer.append(arr[i])\r\n visitied[i] = 1;\r\n combination(arr,visitied, idx+1,M)\r\n visitied[i] = 0;\r\n answer.pop()\r\nif __name__==\"__main__\":\r\n main()","repo_name":"parkgunuk/baekjoon_problem","sub_path":"Python/Baekjoon_algo/baekjoon_15654.py","file_name":"baekjoon_15654.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39616812561","text":"import pytest\n\n\n@pytest.mark.asyncio\nasync def test_account_details(account, map_contract):\n # docs: start\n # There are two options of executing transactions\n # 1. Use contract interface\n\n await (\n await map_contract.functions[\"put\"].invoke(key=10, value=20, max_fee=int(1e16))\n ).wait_for_acceptance()\n\n # 2. Use Account's execute method\n\n call = map_contract.functions[\"put\"].prepare(key=10, value=20)\n resp = await account.execute(calls=call, max_fee=int(1e16))\n await account.client.wait_for_tx(resp.transaction_hash)\n\n # The advantage of using the second approach is there can be more than only one call\n\n calls = [\n map_contract.functions[\"put\"].prepare(key=10, value=20),\n map_contract.functions[\"put\"].prepare(key=30, value=40),\n map_contract.functions[\"put\"].prepare(key=50, value=60),\n ]\n # Executes one transaction with three calls\n resp = await account.execute(calls=calls, max_fee=int(1e16))\n await account.client.wait_for_tx(resp.transaction_hash)\n # docs: end\n\n (value,) = await map_contract.functions[\"get\"].call(key=50)\n assert value == 60\n","repo_name":"chain-cpu/starknet-sdk","sub_path":"starknet_py/tests/e2e/docs/guide/test_account_details.py","file_name":"test_account_details.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30296804532","text":"\n# %%\n#entropy of bucket with 4 red balls and 10 blue balls\nimport math\nm = 4\nn = 10\n\ne = -((m / (m + n)) * math.log(m / (m + n), 2)) - (n / (m + n) * math.log(n / (m + n), 2))\n \nprint(e)\n\n# %%\n\nm = 4\nn = 10\np1 = m / (m + n)\np2 = n / (m + n)\n\ne = -p1*math.log(p1, 2) -p2*math.log(p2, 2)\nprint(e)\n\n# %%\n# Multi-class entropy\nimport numpy as np\n# list of balls in bucket\nb = np.array([8, 3, 2])\nP = np.multiply(-b / b.sum(), np.log2(b / b.sum())) #populate with -p*log2p\nent = P.sum()\nprint(ent)\n\n# %%\n# information gain is entropy (parent) - sum of [prob of child*entropy(child)]\nimport numpy as np\nimport pandas as pd\n\ndef entropy(A):\n return np.multiply(-A / A.sum(), np.log2(A / A.sum())).sum()\n\ndef info_gain(parent, children):\n return entropy(parent) - np.multiply(c / c.sum(), entropy(c)).sum()\n\nbug_data = pd.read_csv('ml-bugs.csv').to_numpy()\n\n#create two new sets, these are the children\ndef split(data, cond):\n return [data[cond], data[~cond]]\n\n#count the frequency of named features and return a table\nparent = bug_data[:,0:1]\n\ndef get_freq_table(arr):\n u, c = np.unique(arr, return_counts=True)\n return np.asarray((u, c)).T\n\ndef entropy_from_freq_table(ftbl):\n print(ftbl)\n return entropy(np.array(ftbl[:,1], dtype='float'))\n\nparent_freq_table = get_freq_table(parent)\nparent_entropy = entropy_from_freq_table(parent_freq_table)\nprint(parent_entropy)\n\nconds = [\"bug_data[:,1:2] == 'Brown'\", \"bug_data[:,1:2] == 'Blue'\", \"bug_data[:,1:2] == 'Green'\", \"bug_data[:,2:3] < 17.0\", \"bug_data[:,2:3] < 20.0\"]\n\nfor cond in conds:\n print('\\n\\nentropy of children with cond ' + cond)\n children = split(bug_data[:,0:1], eval(cond))\n weighted_sum_of_children_entropy = 0;\n m = len(children[0])\n n = len(children[1])\n for i in range (len(children)):\n if i == 0:\n print('true: ')\n else:\n print('false')\n weighted_sum_of_children_entropy += entropy_from_freq_table(get_freq_table(children[i])) * (len(children[i]) / (m + n))\n print('\\ninformation gain for ' + cond + ' : ')\n print(parent_entropy - weighted_sum_of_children_entropy)","repo_name":"wegesdal/udacity-ml","sub_path":"Supervised_Learning/entropy_and_information_gain.py","file_name":"entropy_and_information_gain.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29145298861","text":"# --*-- coding:utf-8 --*--\nimport re\n\n'''\n@File: basic04.py\n@Author: Hayes Wong\n@Email: 616132717@qq.com\n@Time: 2020-11-23 11:26\n'''\n\n'''\nmatch/search/findall/sub都是用来处理字符串的方法\ncompile可以将正则字符串编译成正则表达式对象,以便在后面的匹配中复用\ncompile还可以传入修饰符,例如re.S等修饰符,这样在search、findall等方法中就不需要额外传了\n所以,compile方法可以说是给正则表达式做了一层封装,以便我们更好地复用\n'''\ncontent1 = '2016-12-15 12:00'\ncontent2 = '2016-12-17 12:55'\ncontent3 = '2016-12-22 13:21'\npattern = re.compile('\\d{2}:\\d{2}')\nresult1 = re.sub(pattern, '', content1)\nresult2 = re.sub(pattern, '', content2)\nresult3 = re.sub(pattern, '', content3)\nprint(result1, result2, result3)","repo_name":"isidore-wong/Spider-Cralwer","sub_path":"05_re_expression/basic04.py","file_name":"basic04.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14531860471","text":"import pandas as pd\nimport numpy as np\nfrom scipy.stats import kurtosis\nfrom scipy.stats import skew\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport math\n\n\n\ntimeToStatsDict = {\n 'hour': (lambda x: x.hour), \n 'minute': (lambda x: x.minute), \n 'hourminute': (lambda x: x.hour*100+x.minute) \n}\n\nnumpos = lambda x: len([v for v in x if v>0])\nposvar = lambda x: np.var([v for v in x if v>0])\nnumnan = lambda x: len([v for v in x if math.isnan(v)])\nlast = lambda x: x[-1]\nfirst= lambda x: x[0]\n\n\nlistToStatsDict = {\n 'max': max, 'argmax':np.argmax, 'min': min, 'argmin':np.argmin, 'mean':np.mean, \n 'std': np.std, 'var': np.var, 'median': np.median, 'kurtosis': kurtosis, 'skew':skew, \n 'numpos': numpos, 'posvar': posvar, 'numnan': numnan,\n 'last': last, 'first': first\n}\n\n\nclass ColumnSelector(BaseEstimator, TransformerMixin):\n \"\"\"Used to explicitly select the columns in a pipeline\n \"\"\"\n \n def __init__(self, columns=None):\n self.columns = columns\n \n def fit(self, X, y=None):\n self.columns=X.columns if self.columns==None else self.columns\n return self\n \n def transform(self, X):\n for c in self.columns: assert c in list(X.columns), \"the data frame is missing column %s\" %(c)\n return X[self.columns]\n\n\ndef createTimeFeatures(df, timeToStatsDict=timeToStatsDict):\n \"\"\" Creates minute of the day, \n \"\"\"\n timeToStats = lambda x: [timeToStatsDict[trf](x) for trf in timeToStatsDict]\n timeFeatureNames = list(timeToStatsDict.keys())\n dT = pd.DataFrame(index=df.index)\n dT['datetime'] = df.index.map(timeToStats)\n dT = dT.apply(lambda x: pd.Series(sum(x.values, [])), axis=1)\n dT.columns = timeFeatureNames\n df = df.merge(dT, how='left', left_index=True, right_index=True)\n return df\n\n\ndef timeSeriesToFeatures(df, listToStatsDict = listToStatsDict):\n \"\"\" Start documenting this\n \"\"\"\n originalColnames = list(df.columns)\n listToStats = lambda x: [listToStatsDict[trf](x) for trf in listToStatsDict]\n for col in df.columns: \n df[col] = df[col].apply(listToStats)\n\n df = df.apply(lambda x: pd.Series(sum(x.values, [])), axis=1)\n\n featureNames = []\n statsNames = list(listToStatsDict.keys())\n for col in originalColnames:\n featureNames = featureNames + [str(col)+'_'+str(stat) for stat in statsNames]\n df.columns = featureNames\n return df\n\ndef createTimeSeriesDiferences(df): # should we also add cumulative sums (??)\n \"\"\" Start documenting this\n \"\"\"\n for col in df.columns:\n df[col+'Difference']= df[col].apply(lambda x: [x[i]-x[i-1] for i in range(1,len(x))])\n return df\n\n\ndef timeSeriesScaler(df):\n \"\"\" Start documenting this, It is expected that they way we rescale affects our results a lot:\n possible rescalings: \n * With respect to the last element of the series\n * With respect to the mean\n * With respect to the median\n \"\"\"\n dg = pd.DataFrame(index=df.index)\n for col in df.columns: \n dg[col]=df[col].apply(lambda x: list((np.array(x)-np.mean(x))/np.mean(x)))\n return dg\n\n\ndef bayesianTransformer(df, levelDictionary):\n \"\"\" Start documenting this\n \"\"\"\n for col in levelDictionary:\n df[col] = df[col].apply(lambda x: levelDictionary[col][x] if x in levelDictionary[col].index else None)\n return df\n\n\nclass BayesianCategoricalEncoder(TransformerMixin): \n \"\"\"Label encodes all the columns of a dataframe.\n \n Args:\n df\n \n Returns:\n DataFrame with object dtypes label-encoded. Notice that this is not just a\n typical Label Encoder. For each categorical column in the data frame, the levels\n are encoded proporcional to how much transaction revenue they generate. \n \n For example, as \n (avg spent in USA) > (avg spent in Great Britain) > (avg spent spent in Argentina)\n the encoded labels of USA, Great Britain and Argentina are expected to preserve \n the same order\n \n Notes:\n BayesianCategoricalEncoder is a bad name for this class. Apparently this type of encoders has a name\n potential improvement: add an option that creates another column with the 'support' of the quotient.\n \"\"\"\n def __init__(self, colsToEncode=None):\n self.colsToEncode = colsToEncode\n\n def fit(self, X, y):\n df = X.select_dtypes(include='object').copy()\n self.catCols = list(df.columns) if self.colsToEncode==None else self.colsToEncode\n self.levelDictionary={}\n df['target']=y\n for col in self.catCols:\n importance = df.groupby(col)['target'].agg([np.sum, np.size])\n self.levelDictionary[col] = 1.0*importance['sum']/(importance['size']) \n return self\n \n def transform(self, X):\n dictionary = self.levelDictionary\n for col in self.catCols:\n X[col] = X[col].apply(lambda x: dictionary[col][x] if x in dictionary[col].index else None)\n return X\n\n\nclass TransformationWrapper(BaseEstimator, TransformerMixin):\n \"\"\"Scaler wrapper to have pandas compatibility.\n\n Example\n >> dg = pd.DataFrame({'A':[0.5, 0.6, 0.7]})\n >> NumericFeaturesScaler(transformation = MinMaxScaler()).fit(dg).transform(dg)\n 0 0.0\n 1 0.5\n 2 1.0\n\n Attributes:\n transformation: a sklearn transformation, typically a scaler\n \"\"\"\n def __init__(self, transformation, colnames=None):\n self.colnames=colnames\n self.transformation = transformation\n\n def fit(self, X, y=None):\n self.transformation.fit(X)\n return self\n\n def transform(self, X):\n columns = self.colnames if self.colnames!=None else X.columns\n return pd.DataFrame(self.transformation.transform(X), columns = columns, index=X.index)\n\n\nclass FunctionTransformer(BaseEstimator, TransformerMixin):\n \"\"\" This is already common on some versions of sklearn, the way we deal with parameters should be improved\n \"\"\"\n def __init__(self, transformation, parameters = None):\n self.transformation=transformation\n self.parameters = parameters\n pass\n \n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n df=X.copy()\n parameters=self.parameters\n dg = self.transformation(df) if parameters==None else self.transformation(df, parameters)\n return dg\n\n\nclass sampleTransformer(BaseEstimator, TransformerMixin):\n \"\"\"sample way of building things\n \"\"\"\n def __init__(self, parameter=None):\n pass\n \n def fit(self, X, y=None):\n return self\n \n def transform(self, X):\n df=X.copy()\n return df\n \n\n\n# going to need some sort of refined way of doing crossvalidation, really?\n# https://hub.packtpub.com/cross-validation-strategies-for-time-series-forecasting-tutorial/\n# blocking Time Series?\n\n","repo_name":"luisduque440/moneyManager","sub_path":"sklearnUtilities/preprocessors.py","file_name":"preprocessors.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20497424923","text":"import json\nimport logging\nimport os\n\nimport boto3\n\n# Import local libraries\nfrom http_helper import is_options, response_options, get_client_id, response_ok, response_internal_error, response_unauthorized\n\n# Define Logging\nLOGLEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper()\nlogging.basicConfig(level=logging.getLevelName(LOGLEVEL))\n\n# Environment Variables\nREGION_NAME = os.environ['REGION_NAME'] if 'REGION_NAME' in os.environ else 'eu-west-1'\nKSTREAM_FIREHOSE = os.environ['KSTREAM_FIREHOSE'] if 'KSTREAM_FIREHOSE' in os.environ else 'firehose_to_s3_stream'\n\n# Global Variables\nsession = boto3.session.Session()\nfirehose_client = session.client('firehose', region_name=REGION_NAME)\n\n# Write Data to Firehose\ndef write_to_firehose(record_data):\n try:\n firehose_client.put_record(\n DeliveryStreamName=KSTREAM_FIREHOSE\n , Record={'Data': record_data}\n )\n return True\n except:\n logging.exception('Failed to write to Firehose.')\n return False\n\n# Main function of Lambda function\ndef lambda_handler(event, context):\n try:\n # Quickly handle OPTIONS - CORS\n if is_options(event):\n return response_options()\n\n client_id = get_client_id(event) # Get a valid Client ID by an authorized header\n\n if client_id is not None:\n # Prepare record data\n body_json = json.loads(event['body'])\n\n body_json['client_id'] = client_id # Insert Client ID (business units) into record line\n\n # Insert Client IP (customers)\n if 'x-forwarded-for' in event['headers']:\n body_json['client_ip'] = event['headers']['x-forwarded-for']\n \n # Capture customer client app or device info\n if 'user-agent' in event['headers']:\n body_json['user_agent'] = event['headers']['user-agent']\n\n # Write to Firehose\n firehose_result = write_to_firehose(json.dumps(body_json))\n if firehose_result:\n return response_ok({\"response_code\":\"200\", \"message\": \"Clickstream collected.\"})\n else:\n return response_internal_error({\"response_code\":\"50001\", \"message\": \"Failed to collect data.\"})\n else:\n return response_unauthorized()\n except:\n logging.exception('Error in clickstream collection.')\n return response_internal_error()\n","repo_name":"tekichan/clickstream-collector","sub_path":"kstream-collector/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8395163476","text":"#https://www.hackerrank.com/challenges/defaultdict-tutorial/problem\n\nfrom collections import defaultdict\n\nn,m = map(int, input().split())\n\nd = defaultdict(list)\n\nfor i in range(n):\n d['A'].append(input())\nfor i in range(m):\n d['B'].append(input())\n\nfor i in d['B']:\n if i in d['A']:\n for j in range(len(d['A'])):\n if i == d['A'][j]:\n print(j+1, end = \" \")\n print()\n else:\n print(-1)","repo_name":"bgelov/python","sub_path":"module/collections/defaultdict.py","file_name":"defaultdict.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"71383626116","text":"stack=[18,19,29,15,16]\nimport sys\ndef withSpace(stack):\n suporting=[]\n for i in range(len(stack)):\n if suporting is None:\n suporting.append(stack[i])\n else:\n if suporting[i]>stack[i]:\n suporting.append(stack[i])\n\n\n\ndef inConstnatSpace(arr):\n stack=[]\n global min\n min=sys.maxsize\n def push(item):\n global min\n if item>min:\n stack.append()\n else:\n ele=2*item-min\n stack.append(ele)\n min=item\n\n def popup(arr):\n if len(stack)==0:\n return -1\n if stack[-1] 0 and len(number_tasks) > 0:\n first_task_time = task_time.popleft()\n last_number_tasks = number_tasks.pop()\n time_needed = first_task_time * last_number_tasks\n flag = False\n for rubber, range_time in time_needed_rubber.items():\n if time_needed in range_time:\n flag = True\n current_dict[rubber] += 1\n if not flag:\n last_number_tasks -= 2\n number_tasks.append(last_number_tasks)\n task_time.append(first_task_time)\n print_the_result(current_dict)\n\n\ntime_consuming_per_single_task = deque([int(ch) for ch in input().split(\" \")])\nnumber_of_tasks = [int(ch) for ch in input().split(\" \")]\nrubber_needed_time = {\"Darth Vader Ducky\": range(0, 61), \"Thor Ducky\": range(61, 121),\n \"Big Blue Rubber Ducky\": range(121, 181), \"Small Yellow Rubber Ducky\": range(181, 241)}\nfinal_dict = {\"Darth Vader Ducky\": 0, \"Thor Ducky\": 0, \"Big Blue Rubber Ducky\": 0, \"Small Yellow Rubber Ducky\": 0}\n\ncount_the_number_of_tasks(time_consuming_per_single_task, number_of_tasks, rubber_needed_time, final_dict)\n","repo_name":"AlexanderBedrosyan/Programming-Advanced-with-Python","sub_path":"Advanced Exam Preparation/Retake Exam - 12 April 2023/rubber_duck_debuggers_1.py","file_name":"rubber_duck_debuggers_1.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"74995528513","text":"from flask import abort, request\nfrom flask_jwt_extended import current_user, jwt_required\nfrom flask_restful import Resource\n\nfrom everycache_api.api.schemas import (\n CacheCommentSchema,\n CachePublicSchema,\n CacheSchema,\n CacheVisitSchema,\n)\nfrom everycache_api.common.pagination import paginate\nfrom everycache_api.extensions import db\nfrom everycache_api.models import Cache, CacheComment, CacheVisit, User\n\n\nclass CacheResource(Resource):\n \"\"\"Single object resource\n\n ---\n get:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n responses:\n 200:\n content:\n application/json:\n schema:\n type: object\n properties:\n cache:\n oneOf:\n - CacheSchema\n - CachePublicSchema\n 404:\n description: cache not found\n security: []\n put:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n requestBody:\n content:\n application/json:\n schema:\n CacheSchema\n responses:\n 200:\n content:\n application/json:\n schema:\n type: object\n properties:\n message:\n type: string\n example: Cache updated.\n cache:\n CacheSchema\n 403:\n description: forbidden\n 404:\n description: cache not found\n delete:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n responses:\n 200:\n content:\n application/json:\n schema:\n type: object\n properties:\n message:\n type: string\n example: Cache deleted.\n 403:\n description: forbidden\n 404:\n description: cache not found\n \"\"\"\n\n method_decorators = {\n \"get\": [jwt_required(optional=True)],\n \"put\": [jwt_required()],\n \"delete\": [jwt_required()],\n }\n\n def get(self, cache_id: str):\n # find cache details\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n # decide which schema to use\n schema = None\n if current_user:\n # logged in user\n schema = CacheSchema()\n schema.context = {\"current_user\": current_user}\n else:\n # guest user\n schema = CachePublicSchema()\n\n # return cache details\n return {\"cache\": schema.dump(cache)}\n\n def put(self, cache_id: int):\n # find cache\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n # ensure current_user is authorized\n if current_user != cache.owner and current_user.role != User.Role.Admin:\n abort(403, \"Unauthorized to modify other users' caches.\")\n\n schema = CacheSchema(exclude=[\"visited\"])\n\n # update and return cache details\n cache = schema.load(request.json, instance=cache, partial=True)\n db.session.commit()\n\n # schema.context = {\"current_user\": current_user}\n\n return {\"message\": \"Cache updated.\", \"cache\": schema.dump(cache)}, 200\n\n def delete(self, cache_id: int):\n # find cache\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n # ensure current_user is authorized\n if current_user != cache.owner and current_user.role != User.Role.Admin:\n abort(403, \"Unauthorized to delete other users' caches.\")\n\n # mark cache as deleted\n cache.deleted = True\n db.session.commit()\n\n return {\"message\": \"Cache deleted.\"}, 200\n\n\nclass CacheListResource(Resource):\n \"\"\"Multiple objects resource\n\n ---\n get:\n tags:\n - api\n parameters:\n - in: query\n name: order_by\n schema:\n type: string\n required: false\n description: which field to order results by\n - in: query\n name: desc\n schema:\n type: boolean\n allowEmptyValue: true\n required: false\n description: order results descending\n responses:\n 200:\n content:\n application/json:\n schema:\n allOf:\n - $ref: '#/components/schemas/PaginatedResult'\n - type: object\n properties:\n results:\n type: array\n items:\n oneOf:\n - $ref: '#/components/schemas/CachePublicSchema'\n - $ref: '#/components/schemas/CacheSchema'\n security: []\n post:\n tags:\n - api\n requestBody:\n content:\n application/json:\n schema:\n CacheSchema\n responses:\n 201:\n content:\n application/json:\n schema:\n type: object\n properties:\n message:\n type: string\n example: Cache created.\n cache:\n CacheSchema\n 400:\n description: bad request payload\n 403:\n description: forbidden\n \"\"\"\n\n method_decorators = {\"get\": [jwt_required(optional=True)], \"post\": [jwt_required()]}\n\n def get(self):\n # retrieve multiple caches' info\n query = Cache.query.filter_by(deleted=False)\n\n # decide which schema to use\n schema = None\n if current_user:\n # logged in user\n schema = CacheSchema(many=True)\n schema.context = {\"current_user\": current_user}\n else:\n # guest user\n schema = CachePublicSchema(many=True)\n\n return paginate(query, schema)\n\n def post(self):\n # creating a new cache\n\n schema = CacheSchema(exclude=[\"visited\"])\n cache = schema.load(request.json)\n\n # append current_user as owner to newly created cache\n cache.owner = current_user\n\n db.session.add(cache)\n db.session.commit()\n\n return {\"message\": \"Cache created.\", \"cache\": schema.dump(cache)}, 201\n\n\nclass CacheVisitListResource(Resource):\n \"\"\"Multiple objects resource\n\n ---\n get:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n - in: query\n name: order_by\n schema:\n type: string\n required: false\n description: field to order items by\n - in: query\n name: desc\n schema:\n type: boolean\n allowEmptyValue: true\n required: false\n description: order results descending\n responses:\n 200:\n content:\n application/json:\n schema:\n allOf:\n - $ref: '#/components/schemas/PaginatedResult'\n - type: object\n properties:\n results:\n type: array\n items:\n $ref: '#/components/schemas/CacheVisitSchema'\n 404:\n description: cache not found\n security: []\n post:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n requestBody:\n content:\n application/json:\n schema:\n CacheVisitSchema\n responses:\n 201:\n content:\n application/json:\n schema:\n type: object\n properties:\n message:\n type: string\n example: Cache visit created.\n cache:\n CacheVisitSchema\n 400:\n description: bad request\n 403:\n description: forbidden\n 404:\n description: cache not found\n \"\"\"\n\n method_decorators = {\"post\": [jwt_required()]}\n\n def get(self, cache_id):\n # ensure cache with given id exists\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n schema = CacheVisitSchema(many=True)\n query = CacheVisit.query.filter_by(cache=cache)\n\n # list visits for cache\n return paginate(query, schema), 200\n\n def post(self, cache_id):\n # find cache with given id\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n if cache.owner == current_user:\n abort(403, \"Creating a visit to own cache is forbidden.\")\n\n # create new visit\n schema = CacheVisitSchema()\n visit = schema.load(request.json)\n\n # append cache and user to the visit\n visit.cache = cache\n visit.user = current_user\n\n # add and commit\n db.session.add(visit)\n db.session.commit()\n\n return {\n \"message\": \"Cache visit created.\",\n \"cache_visit\": schema.dump(visit),\n }, 201\n\n\nclass CacheCommentListResource(Resource):\n \"\"\"Multiple objects resource\n\n ---\n get:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n - in: query\n name: order_by\n schema:\n type: string\n required: false\n description: field to order items by\n - in: query\n name: desc\n schema:\n type: boolean\n allowEmptyValue: true\n required: false\n description: order results descending\n responses:\n 200:\n content:\n application/json:\n schema:\n allOf:\n - $ref '#/components/schemas/PaginatedResult'\n - type: object\n properties:\n results:\n type: array\n items:\n $ref: '#/components/schemas/CacheCommentSchema'\n 404:\n description: cache not found\n security: []\n post:\n tags:\n - api\n parameters:\n - in: path\n name: cache_id\n schema:\n type: string\n requestBody:\n content:\n application/json:\n schema:\n CacheCommentSchema\n responses:\n 201:\n content:\n application/json:\n schema:\n type: object\n properties:\n message:\n type: string\n example: Cache comment created.\n cache_comment:\n CacheCommentSchema\n 400:\n description: bad request\n 404:\n description: cache not found\n \"\"\"\n\n method_decorators = {\"post\": [jwt_required()]}\n\n def get(self, cache_id):\n # ensure cache with given id exists\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n schema = CacheCommentSchema(many=True)\n query = CacheComment.query.filter_by(cache=cache, deleted=False)\n\n # list comments for cache\n return paginate(query, schema), 200\n\n def post(self, cache_id):\n # find cache with given id\n cache = Cache.query_ext_id(cache_id).first_or_404()\n\n # create new comment\n schema = CacheCommentSchema()\n comment = schema.load(request.json)\n\n # append cache and user to the visit\n comment.cache = cache\n comment.author = current_user\n\n # add and commit\n db.session.add(comment)\n db.session.commit()\n\n return {\n \"message\": \"Cache comment created.\",\n \"cache_comment\": schema.dump(comment),\n }, 201\n","repo_name":"everycache-group/everycache-app","sub_path":"api/everycache_api/api/resources/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17149717301","text":"import requests\n\napi_key = 'f4ff557c4e642de08cb03d19c0cb4f66' #ключ авторизации api\n\n\n\n#формируем адрес запроса\nurl = 'http://data.fixer.io/api/latest?access_key=' + api_key\n\ndef get_rates(url):\n jsonrates = requests.get(url)\n rates = jsonrates.json()\n return rates\n\n\nall_currency = get_rates(url)['rates'] #все курсы в одном dict\n\n\neur_to_rub = all_currency['RUB']\neur_to_gel = all_currency['GEL']\ngel_to_rub = eur_to_rub / eur_to_gel\n\n","repo_name":"kkomissarov/vkbot","sub_path":"exchange_rates.py","file_name":"exchange_rates.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42052440658","text":"'''t=int(input('qtos anos tem seu carro? '))\nif t>=4:\n print('seu carro é velho')\nelse:\n print('seu carro é novo')\nprint('---fim---')'''\n\n'''n=str(input('qual seu nome? ')).strip()\nif n=='Giordanni':\n print('que nome bonito')\nelse:\n print('seu nome é comum!!')\nprint('oi {}, em que posso ajudar?'.format(n))'''\n\nn1=int(input('nota 1: '))\nn2=int(input('nota 2: '))\nm=(n1+n2) / 2\nprint('a sua media foi {}'.format(m))\nif m>=7:\n print('sua media foi boa')\nelse:\n print('estude mais')\n","repo_name":"giorkakuda/Python_Exercises","sub_path":"#condicoes10.py","file_name":"#condicoes10.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3382749225","text":"# Importing libraries\n\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nfrom transformers import T5Tokenizer\n\n\nclass SummaryDataSet(Dataset):\n \"\"\"\n Dataloader to Finetune a mT5 model focused in a summarization task.\n\n \"\"\"\n\n def __init__(\n self,\n source_text: pd.Series,\n target_text: pd.Series,\n tokenizer: T5Tokenizer,\n source_len: int,\n target_len: int,\n ) -> None:\n\n self.target_text = source_text\n self.source_text = target_text\n\n self.tokenizer = tokenizer\n self.source_len = source_len\n self.summ_len = target_len\n\n def __getitem__(self, index: int) -> dict[str, torch.Tensor]:\n\n source_text = str(self.source_text[index])\n target_text = str(self.target_text[index])\n\n # cleaning data so as to ensure data is in string type\n source_text = \" \".join(source_text.split())\n target_text = \" \".join(target_text.split())\n\n source = self.tokenizer.batch_encode_plus(\n [source_text],\n max_length=self.source_len,\n pad_to_max_length=True,\n truncation=True,\n padding=\"max_length\",\n return_tensors=\"pt\",\n )\n target = self.tokenizer.batch_encode_plus(\n [target_text],\n max_length=self.summ_len,\n pad_to_max_length=True,\n truncation=True,\n padding=\"max_length\",\n return_tensors=\"pt\",\n )\n\n source_ids = source[\"input_ids\"].squeeze()\n source_mask = source[\"attention_mask\"].squeeze()\n target_ids = target[\"input_ids\"].squeeze()\n # target_mask = target[\"attention_mask\"].squeeze()\n\n return {\n \"source_ids\": source_ids.to(dtype=torch.long),\n \"source_mask\": source_mask.to(dtype=torch.long),\n \"target_ids\": target_ids.to(dtype=torch.long),\n \"target_ids_y\": target_ids.to(dtype=torch.long),\n }\n\n def __len__(self) -> int:\n return len(self.target_text)\n","repo_name":"Mezosky/sumariseichon_chileno","sub_path":"src/dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41439898127","text":"import os\nimport json\nimport tqdm\nfrom multiprocessing import Pool\nfrom itertools import chain\n\ndef process(file_path):\n symptoms = set()\n with open(file_path, 'r') as handle:\n data = json.load(handle)\n events = data['results']\n for event in events:\n try:\n reactions = event['patient']['reaction']\n for reaction in reactions:\n symptoms.add(reaction['reactionmeddrapt'].lower())\n except KeyError:\n continue\n return symptoms\n\nif __name__ == \"__main__\":\n root_path = os.path.join('drug', 'event')\n files = []\n\n for dir in os.listdir(root_path):\n curr_path = os.path.join(root_path, dir)\n for file in os.listdir(curr_path):\n files.append(os.path.join(curr_path, file))\n\n p = Pool()\n results = list(tqdm.tqdm(p.imap(process, files), total=len(files)))\n p.close()\n p.join()\n results = set().union(*results)\n lexicon = {k:i for i,k in enumerate(results)}\n with open(\"symptoms.json\", 'w') as handle:\n json.dump(lexicon, handle)\n","repo_name":"joshcherian42/FDAnalyzer","sub_path":"random/symptoms_lexicon.py","file_name":"symptoms_lexicon.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"46488611120","text":"def findNextGreaterElement(arr):\n N = len(arr)\n stack = []\n result = [-1] * N\n\n for i in range(N):\n while stack and arr[stack[-1]] < arr[i]:\n top_element_index = stack.pop()\n result[top_element_index] = arr[i]\n stack.append(i)\n\n while stack:\n remaining_elements_index = stack.pop()\n result[remaining_elements_index] = -1\n\n return result\n\n\narr = [1, 3, 2, 4]\nresult = findNextGreaterElement(arr)\nprint(result) # Output: [3, 4, 4, -1]\n","repo_name":"Jayavardhan-swarna/ineuron_Assignment-15-Stacks","sub_path":"1_findNextGreaterElement.py","file_name":"1_findNextGreaterElement.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71355540995","text":"# -*- coding: utf-8 -*-\n\n#--------------------------------------- AUTHORS ------------------------------------------------------------------------\n# Javier Villarreal Rodríguez\n# Christian González Jiménez\n\n\n#--------------------------------------- DESCRIPTION --------------------------------------------------------------------\n\n# This bot was programed for a Smartcities Proyect in UCM at 2017. This will be available in @smartcityoverseer_bot\n\n#--------------------------------------- END DESCRIPTION ----------------------------------------------------------------\n\n# -------------------------------------- LIBRARIES ----------------------------------------------------------------------\nimport telebot\nimport pyemtmad\nimport math\nimport sys\nfrom pymongo import MongoClient\nfrom pyemtmad import Wrapper\nfrom pyemtmad import util\nfrom pyemtmad import types as emt #set types of emt with alias emt\nimport datetime\n#-------------------------------------- END LIBRARIES IMPORT ------------------------------------------------------------\n\n#-------------------------------------- VARIABLES -----------------------------------------------------------------------\n\nnow = datetime.datetime.now().strftime('%d/%m/%Y') # set the now date\nyesterday = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime('%d/%-m/%Y') #set the yesterday date\nuri = \"YOUR URI\" #set the uri of server with mongodbprocess for smartcitydata\nTOKEN = 'YOUR BOT FATHER TOKEN' # Token of the bot generated at @BotFather in Telegram\nbot = telebot.TeleBot(TOKEN) #Create the instance of the bot token\n\n#-------------------------------------- END VARIABLES -------------------------------------------------------------------\n\n#-------------------------------------- DICTIONARIES OF THE NOISE AND AIR STATIONS --------------------------------------\n\nstations = {'1': '035', '2':'004','3':'039', '4':'008', '5':'038', '6':'011', '7':'040','8':'016','9':'017','10':'018','11':'036','12':'024','13':'027','14':'047','15':'048','16':'049','17':'050','18':'054','19':'055','20':'056','21':'057','22':'058','23':'059','24':'060'}\nstationsn = {'1': '035', '2':'004','3':'039', '4':'008', '5':'038', '6':'011', '7':'040','8':'016','9':'017','10':'018','11':'036','12':'024','13':'027','14':'047','15':'048','16':'049','17':'050','18':'054','19':'055','20':'056','21':'057','22':'058','23':'059','24':'060'}\n\n#-------------------------------------- END DICTIONARIES ----------------------------------------------------------------\n\n\n#-------------------------------------- LISTENER ------------------------------------------------------------------------\ndef listener(messages): #create the listener for take the message and print this message\n\tfor message in messages:\n\t\tcid = message.chat.id\n\t\tprint(\"[\" + str(cid) + \"]: \" + str(message.text))\n\nbot.set_update_listener(listener)#set the listener\ndef extract_unique_code(text):\n\treturn text.split()[1] if len(text.split()) > 1 else None\n\n#--------------------------------------END LISTENER ---------------------------------------------------------------------\n\n\n#-------------------------------------- COMMANDS ------------------------------------------------------------------------\n# Description:\n#\tstart - Start the bot\n#\tinfo - Send the info of the bot\n\n# Handler '/start'\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n\tunique_code=extract_unique_code(message.text)#code of the telegram user\n\tcid = message.chat.id # Save de id for the reply\n\tprint (unique_code) #print the code in console\n\tbot.send_message(cid, \"Welcome message\") #send action for reply /start\n\tmain_keyboard(cid) #Go to main_keyboard function\n\n#Handler /info\n@bot.message_handler(commands=['info'])\ndef send_info(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id \n\tprint (unique_code)\n\tbot.send_message(cid, \"info message\") #send action for reply /info\n\n#-------------------------------------- END COMMANDS --------------------------------------------------------------------\n\n#-------------------------------------- MAIN KEYBOARD -------------------------------------------------------------------\n\ndef main_keyboard(cid):#Keyboard function for display the keyboard in the bot\n\tmarkup = telebot.types.ReplyKeyboardRemove(selective=False)#set keyboard selective\n\tmarkup = telebot.types.ReplyKeyboardMarkup(row_width=2,one_time_keyboard=True)#set keyboard dimensions\n\tmarkup.add('Estado del aire','Farmacias de guardia')#add one row of options\n\tmarkup.add('Ruido de la ciudad','Incidencias')\n\tmarkup.add('Eventos','Paradas EMT')\n\tmsg = bot.send_message(cid, \"Selecciona una acción: \", reply_markup=markup)#send the message with the keyboard to the user\n\n#-------------------------------------- END MAIN KEYBOARD ----------------------------------------------------------------\n\n\n#-------------------------------------- AIR FUNCTIONS --------------------------------------------------------------------\n\n@bot.message_handler(func=lambda message: message.text == \"Estado del aire\")#handler to detect the \"Estado del aire\" option\ndef air_pollution(message):#Function to display the options of air pollution\n\tunique_code=extract_unique_code(message.text)\n\tcid=message.chat.id\n\tbot.send_message(cid, \"1 - Pza. del Carmen\\n2 - Pza. de España\\n3 - Barrio del Pilar\\n4 - Escuelas Aguirre\\n5 - Cuatro Caminos\\n6 - Av. Ramón y Cajal\\n7 - Vallecas\\\n\t\t\\n8 - Arturo Soria\\n9 - Villaverde Alto\\n10 - C/Farolillo\\n11 - Moratalaz\\n12 - Casa de Campo\\n13 - Barajas\\n14 - Méndez Álvaro\\n15 - Pº. Castellana\\n16 - Retiro\\\n\t\t\\n17 - Pza. Castilla\\n18 - Ensanche de Vallecas\\n19 - Urb. Embajada(Barajas)\\n20 - Pza. Fdez.Ladrea\\n21 - Sanchinarro\\n22 - El Pardo\\n23 - Parque Juan Carlos 1\\\n\t\t\\n24 - Tres Olivos\")#send the options to user\n\tmarkup = telebot.types.ReplyKeyboardRemove(selective=False)\n\tmarkup = telebot.types.ReplyKeyboardMarkup(row_width=6,one_time_keyboard=True)\t\n\tmarkup.add('1','2','3','4','5','6')\n\tmarkup.add('7','8','9','10','11','12')\n\tmarkup.add('13','14','15','16','17','18')\n\tmarkup.add('19','20','21','22','23','24')\n\tmsg = bot.send_message(cid, \"Selecciona una estación: \", reply_markup=markup)\n\tbot.register_next_step_handler( msg, air_query)#register the next function handler for this option\n\ndef air_query(message):#function for query at the DB\n\tcid = message.chat.id\n\tdb = MongoClient(uri) #DB connection\n\ttry:\n\t\tid = stations[message.text]+\",\"+ now + \",01\"\n\t\tresult = db.smartcity[\"air_pollution\"].find({\"_id\" : id })\n\t\tarray = []\n\t\tfor query in result:\n\t\t\tfor i in range(1,25):\n\t\t\t\tarray.append(\"hour\" + str(i) + \": \" + query[\"values\"][\"hour\"+str(i)] + \" NO2\")\n\t\tif(len(array) > 0):\n\t\t\tresult_message= \"\\n\".join(array)\n\t\telse:\n\t\t\tresult_message= \"No hay información asociada a esa estación\"\n\t\tbot.send_message(cid,result_message)\n\texcept:\n\t\tmsg = bot.send_message(cid,'La estación no está activa o no existe.')\n\tmain_keyboard(cid)\n\n#-------------------------------------- END AIR FUNCTIONS --------------------------------------------------------------\n\n#-------------------------------------- CHEMIST FUNCTION ---------------------------------------------------------------\n@bot.message_handler(func=lambda message: message.text == \"Farmacias de guardia\")\ndef farmacias(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id\n\tdb = MongoClient(uri) #DB connection\n\tresult = db.smartcity[\"chemists\"].find()\n\tfor event in result:\n\t\tdate = event[\"_id\"].split(',')[1]\n\t\ttoday = datetime.datetime.now().strftime('%d/%m/%Y')\n\n\t\tif date == today:\n\t\t\tdirection = event[\"direction\"] if event[\"direction\"] is not None else \" \"\n\t\t\ttimetable = event[\"timetable\"] if event[\"timetable\"] is not None else \" \"\n\t\t\tphone = event[\"phone\"] if event[\"phone\"] is not None else \" \"\n\t\t\tmsg = bot.send_message(cid,\"Ubicada en \" + direction + \" con horario de \" + timetable + \" y telefono \" + phone ) \n\tmain_keyboard(cid)\n\t\n#-------------------------------------- END CHEMIST FUNCTION ------------------------------------------------------------\n\n#-------------------------------------- EVENTS FUNCTION -----------------------------------------------------------------\n@bot.message_handler(func=lambda message: message.text == \"Eventos\")\ndef eventos(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id\n\tdb = MongoClient(uri) #DB connection\n\tresult = db.smartcity[\"events\"].find() #save the query result in result\n\tfor event in result:\n\t\tdateIni = datetime.datetime.strptime(event[\"dateIni\"],'%Y-%m-%d %H:%M:%S.%f')\n\t\tdateFin = datetime.datetime.strptime(event[\"dateFin\"],'%Y-%m-%d %H:%M:%S.%f')\n\t\tif((dateIni in (datetime.datetime.now(),datetime.datetime.now()+datetime.timedelta(days=7))) or (dateFin in (datetime.datetime.now(),datetime.datetime.now()+datetime.timedelta(days=7))) or ((dateIni <= datetime.datetime.now()) and (dateFin >= datetime.datetime.now()+datetime.timedelta(days=7)))):\n\t\t\ttitle = event[\"title\"] if event[\"title\"] is not None else \" \"\n\t\t\tini = dateIni.strftime('%d/%m/%Y') if dateIni.strftime('%d/%m/%Y') is not None else \" \"\n\t\t\tfin = dateFin.strftime('%d/%m/%Y') if dateFin.strftime('%d/%m/%Y') is not None else \" \"\n\t\t\tinstalation = event[\"instalation\"] if event[\"instalation\"] is not None else \" \"\n\t\t\tmsg = bot.send_message(cid,title + \" en \" + instalation + \" \" + ini + \" - \" + fin ) \n\tmain_keyboard(cid)\n#-------------------------------------- END EVENTS FUNCTION ------------------------------------------------------------\n\n#-------------------------------------- EMT BUS FUNCTION ---------------------------------------------------------------\n@bot.message_handler(func=lambda message: message.text == \"Paradas EMT\")\ndef EMT(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id\n\tmsg=bot.send_message(cid, \"Introduce tu número de parada a consultar: \")\n\tbot.register_next_step_handler( msg, EMT_query)\t\n\t\ndef EMT_query(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id\n\tARGS = { 'stop_number' : message.text, 'lang' : 'es' }\n\twrapper = Wrapper('WEB.SERV.EMT ACCOUNT', 'EMT TOKEN')\n\tarrival_info = wrapper.geo.get_arrive_stop(**ARGS)[1]\n\t\n\ttry:\n\t\tfor emt.row in arrival_info:\n\t\t\ttime = emt.row.time_left/60\n\t\t\tminutes = math.floor(time)\n\t\t\tdiference = time - minutes\n\t\t\tseconds = round(diference*60)\n\t\t\tif(minutes <= 20):\n\t\t\t\tmsg = bot.send_message(cid,'%s %s - %i minutes %i seconds - %f,%f' % (emt.row.line_id, emt.row.destination, minutes, seconds, emt.row.latitude, emt.row.longitude))\t\n\t\t\telse:\n\t\t\t\tmsg = bot.send_message(cid,'%s %s - +20 minutes - %f,%f' % (emt.row.line_id, emt.row.destination, emt.row.latitude, emt.row.longitude))\n\texcept:\n\t\tmsg = bot.send_message(cid,'La parada no está activa o no existe')\n\tmain_keyboard(cid)\n\n\n#-------------------------------------- END EMT BUS FUNCTIOn --------------------------------------------------------\n\n\n\n#-------------------------------------- NOISE FUNCTIONS --------------------------------------------------------------\n\n@bot.message_handler(func=lambda message: message.text == \"Ruido de la ciudad\")\ndef noise_pollution(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id\n\tbot.send_message(cid, \"1 - Pza. del Carmen\\n2 - Pza. de España\\n3 - Barrio del Pilar\\n4 - Escuelas Aguirre\\n5 - Cuatro Caminos\\n6 - Av. Ramón y Cajal\\n7 - Vallecas\\\n\t\t\\n8 - Arturo Soria\\n9 - Villaverde Alto\\n10 - C/Farolillo\\n11 - Moratalaz\\n12 - Casa de Campo\\n13 - Barajas\\n14 - Méndez Álvaro\\n15 - Pº. Castellana\\n16 - Retiro\\\n\t\t\\n17 - Pza. Castilla\\n18 - Ensanche de Vallecas\\n19 - Urb. Embajada(Barajas)\\n20 - Pza. Fdez.Ladrea\\n21 - Sanchinarro\\n22 - El Pardo\\n23 - Parque Juan Carlos 1\\\n\t\t\\n24 - Tres Olivos\")#send the options to user\n\tmarkup = telebot.types.ReplyKeyboardRemove(selective=False)\n\tmarkup = telebot.types.ReplyKeyboardMarkup(row_width=6,one_time_keyboard=True)\t\n\tmarkup.add('1','2','3','4','5','6')\n\tmarkup.add('7','8','9','10','11','12')\n\tmarkup.add('13','14','15','16','17','18')\n\tmarkup.add('19','20','21','22','23','24')\n\tmsg = bot.send_message(cid, \"Selecciona una estación: \", reply_markup=markup)\n\tbot.register_next_step_handler( msg, noise_query)\t\n\ndef noise_query(message):\n\tcid = message.chat.id\n\tdb = MongoClient(uri) #DB connection\n\ttry:\n\t\tid = stationsn[message.text]+\",\"+ yesterday+\",T\"\n\t\tresult = db.smartcity[\"noise_pollution\"].find({\"_id\" : id }) #save the query result in result\n\n\t\tarray = []\n\t\tfor query in result:\n\t\t\tarray.append(\"Value total: \" + query[\"values\"][\"las1\"] + \" dB\")\n\t\tif(len(array) > 0):\n\t\t\tresult_message= \"\\n\".join(array)\n\t\telse:\n\t\t\tresult_message= \"No hay información asociada a esa estación\"\n\t\tbot.send_message(cid,result_message)\n\texcept:\n\t\tmsg = bot.send_message(cid,'La estación no está activa o no existe.')\n\tmain_keyboard(cid)\n\n#-------------------------------------- END NOISE FUNCTIONS ----------------------------------------------------------\n\n#-------------------------------------- INCIDENTS FUNCTION -----------------------------------------------------------\n\n@bot.message_handler(func=lambda message: message.text == \"Incidencias\")\ndef incidents(message):\n\tunique_code=extract_unique_code(message.text)\n\tcid = message.chat.id\n\tdb = MongoClient(uri) #DB connection\n\tresult = db.smartcity[\"incidents_publicroad\"].find() #save the query result in result\n\tfor event in result:\n\t\tdateIni = datetime.datetime.strptime(event[\"fechaini\"][:19],'%Y-%m-%dT%H:%M:%S')\n\t\tdateFin = datetime.datetime.strptime(event[\"fechafin\"][:19],'%Y-%m-%dT%H:%M:%S')\n\t\tif((dateIni in (datetime.datetime.now(),datetime.datetime.now()+datetime.timedelta(days=7))) or (dateFin in (datetime.datetime.now(),datetime.datetime.now()+datetime.timedelta(days=7))) or ((dateIni <= datetime.datetime.now()) and (dateFin >= datetime.datetime.now()+datetime.timedelta(days=7)))):\n\t\t\tmsg = bot.send_message(cid,event[\"description\"] + \" \" + dateIni.strftime('%d/%m/%Y') + \" - \" + dateFin.strftime('%d/%m/%Y'))\n\tmain_keyboard(cid)\n\n#-------------------------------------- END INCIDENTS FUNCTION -------------------------------------------------------\n\nbot.polling( none_stop = True)#bot active always\n","repo_name":"ChristianVelrok/SmartcitiesOverseerTelegramBot","sub_path":"smartcityoverseer_telegrambot.py","file_name":"smartcityoverseer_telegrambot.py","file_ext":"py","file_size_in_byte":14088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10383111133","text":"import argparse\nimport re\nfrom dataclasses import dataclass\nfrom itertools import product\nfrom pathlib import Path\nfrom random import randint\n\nfrom PIL import Image, ImageOps\n\nfrom constants import (\n broader_areas_br,\n broader_areas_tl,\n core_areas_br,\n core_areas_tl,\n extra_plots,\n name_civ,\n normal_areas_br,\n normal_areas_substract,\n normal_areas_tl,\n)\n\nDEFAULT_COLORS = {\n \"peak\": (84, 84, 84),\n \"hill\": (163, 125, 34),\n \"ocean\": (0, 0, 128),\n \"coast\": (0, 0, 255),\n \"river\": (0, 140, 255),\n \"grass\": (0, 155, 55),\n \"desert\": (255, 255, 168),\n \"ice\": (255, 255, 255),\n \"tundra\": (192, 192, 192),\n \"plains\": (128, 128, 0),\n \"forest\": (0, 128, 128),\n \"dense_forest\": (0, 61, 61),\n \"jungle\": (19, 175, 0),\n \"oasis\": (192, 255, 255),\n \"flood_plains\": (192, 255, 0),\n \"fallout\": (32, 64, 0),\n}\n\n\n@dataclass\nclass Plot:\n x: int\n y: int\n plot_type: int\n river_direction: int | None\n terrain: str | None\n feature: str | None\n bonus: str | None\n\n @property\n def is_peak(self):\n return True if self.plot_type == 0 else False\n\n @property\n def is_hill(self):\n return True if self.plot_type == 1 else False\n\n @property\n def has_river(self):\n return True if self.river_direction is not None else False\n\n\n@dataclass\nclass CivIVMap:\n width: int\n height: int\n plots: list[Plot]\n\n @property\n def all_features(self):\n return list(\n filter(lambda x: x is not None, set(plot.feature for plot in self.plots))\n )\n\n @property\n def all_terrains(self):\n return list(\n filter(lambda x: x is not None, set(plot.terrain for plot in self.plots))\n )\n\n @property\n def all_bonuses(self):\n return list(\n filter(lambda x: x is not None, set(plot.bonus for plot in self.plots))\n )\n\n\n@dataclass\nclass ElementColor:\n element_name: str\n color_value: tuple[int, int, int]\n\n\nclass ColorsPicker:\n def __init__(self, map: CivIVMap):\n self.items = map.all_features + map.all_terrains + map.all_bonuses\n self.items += [\"peak\", \"hill\", \"river\"]\n self.colors = self.init_colors()\n\n @staticmethod\n def generate_color():\n return (randint(0, 255), randint(0, 255), randint(0, 255))\n\n def init_colors(self):\n colors = {}\n for item in self.items:\n colors[item] = DEFAULT_COLORS.get(item, ColorsPicker.generate_color())\n return colors\n\n\nclass WBSaveParser:\n @staticmethod\n def parse_size(data: str):\n width = re.search(r\"(?<=grid width=)\\d+\", data)\n height = re.search(r\"(?<=grid height=)\\d+\", data)\n if width is not None and height is not None:\n width = int(width.group())\n height = int(height.group())\n return (width, height)\n else:\n raise ValueError(\"Error while parsing!\")\n\n @staticmethod\n def parse_plots(plot: str):\n x = re.search(r\"(?<=x=)\\d+\", plot)\n if x:\n x = int(x.group())\n y = re.search(r\"(?<=y=)\\d+\", plot)\n if y:\n y = int(y.group())\n plot_type = re.search(r\"(?<=PlotType=)\\d+\", plot)\n if plot_type:\n plot_type = int(plot_type.group())\n river_direction = re.search(r\"(?<=River..Direction=)\\d+\", plot)\n if river_direction:\n river_direction = int(river_direction.group())\n terrain = re.search(r\"(?<=TerrainType=TERRAIN_)\\w+\", plot)\n if terrain:\n terrain = terrain.group().lower()\n feature = re.search(r\"(?<=FeatureType=FEATURE_)\\w+\", plot)\n if feature:\n feature = feature.group().lower()\n bonus = re.search(r\"(?<=BonusType=BONUS_)\\w+\", plot)\n if bonus:\n bonus = bonus.group().lower()\n return Plot(x, y, plot_type, river_direction, terrain, feature, bonus)\n\n def parse(self, filename: str):\n data = Path(filename).read_text()\n width, height = self.parse_size(data)\n raw_plots = re.findall(\"BeginPlot(.*?)EndPlot\", data, re.S)\n plots = list(map(WBSaveParser.parse_plots, raw_plots))\n return CivIVMap(width, height, plots)\n\n\nclass MapRenderer:\n def __init__(\n self,\n map: CivIVMap,\n output_format: str,\n upscale_factor: int = 15,\n ):\n self.map = map\n self.colors = ColorsPicker(map).colors\n self.output_format = output_format\n self.upscale_factor = upscale_factor\n self.base_img = Image.new(\"RGB\", (self.map.width, self.map.height), (0, 128, 0))\n\n @staticmethod\n def draw(\n img: Image.Image, plots: list[Plot], color: tuple[int, int, int]\n ) -> Image.Image:\n for plot in plots:\n img.putpixel((plot.x, plot.y), color)\n return img\n\n def get_colors(self, pattern: str) -> tuple[int, int, int]:\n return self.colors[pattern]\n\n def draw_plot_properties(self, img: Image.Image, properties: str) -> Image.Image:\n plots = [plot for plot in self.map.plots if getattr(plot, properties)]\n img = MapRenderer.draw(img, plots, self.get_colors(properties.split(\"_\")[1]))\n return img\n\n def draw_patterns_with_condition(\n self, img: Image.Image, condition: str, patterns: list[str]\n ) -> Image.Image:\n for pattern in patterns:\n plots = [\n plot for plot in self.map.plots if getattr(plot, condition) == pattern\n ]\n img = MapRenderer.draw(img, plots, self.get_colors(pattern))\n return img\n\n def apply_water(self, img: Image.Image) -> Image.Image:\n return self.draw_patterns_with_condition(img, \"terrain\", [\"ocean\", \"coast\"])\n\n def elevate(self, img: Image.Image) -> Image.Image:\n img = self.draw_plot_properties(img, \"is_hill\")\n img = self.draw_plot_properties(img, \"is_peak\")\n return img\n\n def upscale_map(self, img: Image.Image) -> Image.Image:\n img = ImageOps.flip(img)\n img = img.resize(\n (\n self.map.width * self.upscale_factor,\n self.map.height * self.upscale_factor,\n ),\n resample=Image.Resampling.NEAREST,\n )\n return img\n\n def save_drawing(self, img: Image.Image, output_path: str, name_map: str) -> None:\n _output_path = Path(output_path)\n if not _output_path.exists():\n _output_path.mkdir(parents=True, exist_ok=True)\n img.save(f\"{_output_path.as_posix()}/{name_map}.{self.output_format}\")\n\n def draw_base_map(self, output_path: str) -> None:\n img = self.base_img.copy()\n img = self.apply_water(img)\n img = self.elevate(img)\n img = self.upscale_map(img)\n self.save_drawing(img, output_path, \"base_map\")\n\n def draw_rivers_map(self, output_path: str) -> None:\n img = self.base_img.copy()\n img = self.apply_water(img)\n img = self.draw_plot_properties(img, \"has_river\")\n img = self.upscale_map(img)\n self.save_drawing(img, output_path, \"rivers_map\")\n\n def draw_features_map(self, output_path: str) -> None:\n img = self.base_img.copy()\n img = self.apply_water(img)\n img = self.draw_patterns_with_condition(img, \"feature\", self.map.all_features)\n img = self.upscale_map(img)\n self.save_drawing(img, output_path, \"features_map\")\n\n def draw_terrains_map(self, output_path: str) -> None:\n img = self.base_img.copy()\n img = self.apply_water(img)\n img = self.draw_patterns_with_condition(img, \"terrain\", self.map.all_terrains)\n img = self.upscale_map(img)\n self.save_drawing(img, output_path, \"terrains_map\")\n\n def draw_bonuses_map(self, output_path: str) -> None:\n img = self.base_img.copy()\n img = self.apply_water(img)\n img = self.draw_patterns_with_condition(img, \"bonus\", self.map.all_bonuses)\n img = self.upscale_map(img)\n self.save_drawing(img, output_path, \"bonuses_map\")\n\n def draw_spawning_map(self, output_path: str):\n assert (\n len(name_civ)\n == len(core_areas_tl)\n == len(core_areas_br)\n == len(extra_plots)\n == len(normal_areas_tl)\n == len(normal_areas_br)\n == len(broader_areas_tl)\n == len(broader_areas_br)\n )\n core_areas_ = [\n (bl[0], self.map.height - tr[1], tr[0], self.map.height - bl[1])\n for bl, tr in zip(core_areas_tl, core_areas_br)\n ]\n core_areas = []\n for i, country_area in enumerate(core_areas_):\n areas = list(\n product(\n range(country_area[0], country_area[2]),\n range(country_area[1], country_area[3]),\n )\n )\n core_areas.append(\n list(map(lambda p: Plot(p[0], p[1], \"\", \"\", \"\", \"\", \"\"), areas))\n )\n for plot in extra_plots[i]:\n try:\n core_areas += Plot(plot[i][0], plot[i][1], \"\", \"\", \"\", \"\", \"\")\n except:\n pass\n normal_areas_ = [\n (bl[0], self.map.height - tr[1], tr[0], self.map.height - bl[1])\n for bl, tr in zip(normal_areas_tl, normal_areas_br)\n ]\n normal_areas = []\n for i, country_area in enumerate(normal_areas_):\n areas = list(\n product(\n range(country_area[0], country_area[2]),\n range(country_area[1], country_area[3]),\n )\n )\n areas = [\n area\n for area in areas\n for s in normal_areas_substract[i]\n if area[0] != s[0] and area[1] != s[1]\n ]\n # l = [for s in normal_areas_substract[i]]\n # if s[0] != x and s[1] != y\n\n normal_areas.append(\n list(map(lambda p: Plot(p[0], p[1], \"\", \"\", \"\", \"\", \"\"), areas))\n )\n\n broader_areas_ = [\n (bl[0], self.map.height - tr[1], tr[0], self.map.height - bl[1])\n for bl, tr in zip(broader_areas_tl, broader_areas_br)\n ]\n broader_areas = []\n for i, country_area in enumerate(broader_areas_):\n areas = list(\n product(\n range(country_area[0], country_area[2]),\n range(country_area[1], country_area[3]),\n )\n )\n broader_areas.append(\n list(map(lambda p: Plot(p[0], p[1], \"\", \"\", \"\", \"\", \"\"), areas))\n )\n for i, country in enumerate(name_civ):\n img = self.base_img.copy()\n img = ImageOps.flip(img)\n img = self.draw(img, broader_areas[i], (230, 0, 200))\n img = self.draw(img, normal_areas[i], (200, 230, 0))\n img = self.draw(img, core_areas[i], (0, 200, 230))\n img = ImageOps.flip(img)\n img = self.apply_water(img)\n img = self.upscale_map(img)\n self.save_drawing(img, output_path + \"/spawning_areas\", f\"{country}_map\")\n\n def run(\n self,\n output_path: str,\n rivers: bool,\n features: bool,\n terrains: bool,\n bonuses: bool,\n spawning: bool,\n ) -> None:\n self.draw_base_map(output_path)\n if rivers:\n self.draw_rivers_map(output_path)\n if features:\n self.draw_features_map(output_path)\n if terrains:\n self.draw_terrains_map(output_path)\n if bonuses:\n self.draw_bonuses_map(output_path)\n if spawning:\n self.draw_spawning_map(output_path)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-f\", \"--file\", action=\"store\", type=str, help=\"WBSave to convert.\"\n )\n parser.add_argument(\n \"-d\", \"--destination\", action=\"store\", type=str, help=\"Destination folder.\"\n )\n parser.add_argument(\n \"--format\",\n action=\"store\",\n type=str,\n default=\"png\",\n choices=[\"bmp\", \"png\"],\n dest=\"output_format\",\n help=\"File saving format, ie. bmp or png. Default to png\",\n )\n parser.add_argument(\n \"--rivers\",\n action=\"store_true\",\n default=False,\n help=\"Draw rivers map. Default to False.\",\n )\n parser.add_argument(\n \"--features\",\n action=\"store_true\",\n default=False,\n help=\"Draw features map. Default to False.\",\n )\n parser.add_argument(\n \"--terrains\",\n action=\"store_true\",\n default=False,\n help=\"Draw terrains map. Default to False.\",\n )\n parser.add_argument(\n \"--bonuses\",\n action=\"store_true\",\n default=False,\n help=\"Draw bonuses map. Default to False.\",\n )\n parser.add_argument(\n \"--spawning\",\n action=\"store_true\",\n default=True,\n help=\"Draw spawning map. Default to True\",\n )\n parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"Draw all maps. Default to False.\",\n )\n\n args = parser.parse_args()\n if args.all:\n args.rivers = True\n args.features = True\n args.terrains = True\n args.bonuses = True\n args.spawning = True\n\n parser = WBSaveParser()\n map = parser.parse(args.file)\n renderer = MapRenderer(map, args.output_format)\n renderer.run(\n args.destination,\n args.rivers,\n args.features,\n args.terrains,\n args.bonuses,\n args.spawning,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"VDuchauffour/RFCEurope","sub_path":"Docs/Tools/map_renderer.py","file_name":"map_renderer.py","file_ext":"py","file_size_in_byte":13667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19458966696","text":"def connect():\n global client_id, mqtt_server, topic_sub\n client = MQTTClient(client_id, mqtt_server)\n client.connect()\n print('Connected to %s MQTT broker', (topic_sub))\n return client\n\ndef restart_and_reconnect():\n print('Failed to connect to MQTT broker. Reconnecting...')\n time.sleep(10)\n machine.reset()\n\ntry:\n client = connect()\nexcept OSError as e:\n restart_and_reconnect()\n\nwhile True:\n try:\n client.check_msg()\n if (time.time() - last_message) > message_interval:\n msg = 'Carter sucks :P #%d' % counter\n print('Connected to %s MQTT broker, subscribed to %s topic' % (mqtt_server, topic_pub))\n client.publish(topic_pub, msg)\n last_message = time.time()\n counter += 1\n except OSError as e:\n restart_and_reconnect()","repo_name":"JuliusC4es4r/Intelligent-Tidal-Energy-Monitoring-System","sub_path":"Firmware/MQTT.py","file_name":"MQTT.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32410485425","text":"import logging\nimport os\nfrom multiprocessing import cpu_count\nfrom typing import NamedTuple, Tuple\n\nimport click\n\nfrom landshark import __version__, errors\nfrom landshark import metadata as meta\nfrom landshark.dataprocess import (ProcessQueryArgs, ProcessTrainingArgs,\n write_querydata, write_trainingdata)\nfrom landshark.featurewrite import read_feature_metadata, read_target_metadata\nfrom landshark.hread import CategoricalH5ArraySource, ContinuousH5ArraySource\nfrom landshark.image import strip_image_spec\nfrom landshark.kfold import KFolds\nfrom landshark.scripts.logger import configure_logging\nfrom landshark.util import points_per_batch\n\nlog = logging.getLogger(__name__)\n\n\nclass CliArgs(NamedTuple):\n \"\"\"Arguments passed from the base command.\"\"\"\n\n nworkers: int\n batchMB: float\n\n\n@click.group()\n@click.version_option(version=__version__)\n@click.option(\"-v\", \"--verbosity\",\n type=click.Choice([\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]),\n default=\"INFO\", help=\"Level of logging\")\n@click.option(\"--nworkers\", type=click.IntRange(0, None), default=cpu_count(),\n help=\"Number of additional worker processes\")\n@click.option(\"--batch-mb\", type=float, default=10,\n help=\"Approximate size in megabytes of data read per \"\n \"worker per iteration\")\n@click.pass_context\ndef cli(ctx: click.Context,\n verbosity: str,\n batch_mb: float,\n nworkers: int\n ) -> int:\n \"\"\"Extract features and targets for training, testing and prediction.\"\"\"\n ctx.obj = CliArgs(nworkers, batch_mb)\n configure_logging(verbosity)\n return 0\n\n\n@cli.command()\n@click.option(\"--targets\", type=click.Path(exists=True), required=True,\n help=\"Target HDF5 file from which to read\")\n@click.option(\"--split\", type=int, nargs=2, default=(1, 10),\n help=\"Train/test split fold structure. Firt argument is test \"\n \"fold (counting from 1), second is total folds.\")\n@click.option(\"--random_seed\", type=int, default=666,\n help=\"Random state for assigning data to folds\")\n@click.option(\"--name\", type=str, required=True,\n help=\"Name of the output folder\")\n@click.option(\"--features\", type=click.Path(exists=True), required=True,\n help=\"Feature HDF5 file from which to read\")\n@click.option(\"--halfwidth\", type=int, default=0,\n help=\"half width of patch size. Patch side length is \"\n \"2 x halfwidth + 1\")\n@click.pass_context\ndef traintest(ctx: click.Context,\n targets: str,\n split: Tuple[int, ...],\n random_seed: int,\n name: str,\n features: str,\n halfwidth: int\n ) -> None:\n \"\"\"Extract training and testing data to train and validate a model.\"\"\"\n fold, nfolds = split\n catching_f = errors.catch_and_exit(traintest_entrypoint)\n catching_f(targets, fold, nfolds, random_seed, name, halfwidth,\n ctx.obj.nworkers, features, ctx.obj.batchMB)\n\n\ndef traintest_entrypoint(targets: str,\n testfold: int,\n folds: int,\n random_seed: int,\n name: str,\n halfwidth: int,\n nworkers: int,\n features: str,\n batchMB: float\n ) -> None:\n \"\"\"Get training data.\"\"\"\n feature_metadata = read_feature_metadata(features)\n feature_metadata.halfwidth = halfwidth\n target_metadata = read_target_metadata(targets)\n batchsize = points_per_batch(feature_metadata, batchMB)\n target_src = CategoricalH5ArraySource(targets) \\\n if isinstance(target_metadata, meta.CategoricalTarget) \\\n else ContinuousH5ArraySource(targets)\n\n n_rows = len(target_src)\n kfolds = KFolds(n_rows, folds, random_seed)\n\n directory = os.path.join(os.getcwd(), \"traintest_{}_fold{}of{}\".format(\n name, testfold, folds))\n\n args = ProcessTrainingArgs(name=name,\n feature_path=features,\n target_src=target_src,\n image_spec=feature_metadata.image,\n halfwidth=halfwidth,\n testfold=testfold,\n folds=kfolds,\n directory=directory,\n batchsize=batchsize,\n nworkers=nworkers)\n write_trainingdata(args)\n training_metadata = meta.Training(targets=target_metadata,\n features=feature_metadata,\n nfolds=folds,\n testfold=testfold,\n fold_counts=kfolds.counts)\n training_metadata.save(directory)\n log.info(\"Training import complete\")\n\n\n@cli.command()\n@click.option(\"--strip\", type=int, nargs=2, default=(1, 1),\n help=\"Horizontal strip of the image, eg --strip 3 5 is the \"\n \"third strip of 5\")\n@click.option(\"--name\", type=str, required=True,\n help=\"The name of the output from this command.\")\n@click.option(\"--features\", type=click.Path(exists=True), required=True,\n help=\"Feature HDF5 file from which to read\")\n@click.option(\"--halfwidth\", type=int, default=0,\n help=\"half width of patch size. Patch side length is \"\n \"2 x halfwidth + 1\")\n@click.pass_context\ndef query(ctx: click.Context,\n strip: Tuple[int, int],\n name: str,\n features: str,\n halfwidth: int\n ) -> None:\n \"\"\"Extract query data for making prediction images.\"\"\"\n catching_f = errors.catch_and_exit(query_entrypoint)\n catching_f(features, ctx.obj.batchMB, ctx.obj.nworkers,\n halfwidth, strip, name)\n\n\ndef query_entrypoint(features: str,\n batchMB: float,\n nworkers: int,\n halfwidth: int,\n strip: Tuple[int, int],\n name: str\n ) -> int:\n \"\"\"Entrypoint for extracting query data.\"\"\"\n strip_idx, totalstrips = strip\n assert strip_idx > 0 and strip_idx <= totalstrips\n\n \"\"\"Grab a chunk for prediction.\"\"\"\n log.info(\"Using {} worker processes\".format(nworkers))\n\n dirname = \"query_{}_strip{}of{}\".format(name, strip_idx, totalstrips)\n directory = os.path.join(os.getcwd(), dirname)\n try:\n os.makedirs(directory)\n except FileExistsError:\n pass\n\n feature_metadata = read_feature_metadata(features)\n feature_metadata.halfwidth = halfwidth\n batchsize = points_per_batch(feature_metadata, batchMB)\n strip_imspec = strip_image_spec(strip_idx, totalstrips,\n feature_metadata.image)\n tag = \"query.{}of{}\".format(strip_idx, totalstrips)\n\n qargs = ProcessQueryArgs(name, features, feature_metadata.image,\n strip_idx, totalstrips, strip_imspec, halfwidth,\n directory, batchsize, nworkers, tag)\n\n write_querydata(qargs)\n feature_metadata.image = strip_imspec\n feature_metadata.save(directory)\n log.info(\"Query import complete\")\n return 0\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"data61/landshark","sub_path":"landshark/scripts/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":7382,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"31213298228","text":"try:\r\n import pygame\r\n import sys\r\n import random\r\n import math\r\n import os\r\n from constants import *\r\n from socket import *\r\n from pygame.locals import *\r\n from map_gen import *\r\n from room import *\r\n from room_gen import *\r\nexcept ImportError as err:\r\n print(\"A module cannot be loaded\")\r\n sys.exit(2)\r\n\r\n\r\narray_of_char = []\r\narray_of_room = []\r\n\r\nroom_genenerator = room_gen_options(10, 10, 1, 6, \"square\")\r\nroom = room(array_of_char, room_genenerator)\r\n\r\nmap_generator = map_gen_options(9, 10, 1, 6)\r\nmy_map = map(array_of_room, map_generator)\r\n\r\narray_of_char = get_RoundRoom(50, 90) # pos x\r\narray_of_char = floor_filling(array_of_char)\r\n\r\n\r\n# Create the window\r\npygame.init()\r\nfenetre = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\npygame.display.set_caption(\"Project Caerbannog\")\r\nfenetre.fill(WHITE)\r\n\r\n# function scan - Return the total amount of the target item\r\n\r\n\r\ndef scan(room_array, start_pos_x, start_pos_y, scan_size_x, scan_size_y, item=WALL):\r\n i = start_pos_x\r\n j = start_pos_y\r\n amount = 0\r\n for i in range(scan_size_x):\r\n for j in range(scan_size_y):\r\n if room_array[i][j] == item:\r\n amount += 1\r\n return amount\r\n\r\n\r\n# draw ascii room in the console ()\r\nfor i in range(len(array_of_char)):\r\n writch('\\n')\r\n for j in range(len(array_of_char[i])):\r\n if array_of_char[i][j] == 'X':\r\n # print(ra[i][j])\r\n writch('X')\r\n elif array_of_char[i][j] == '#':\r\n writch('#')\r\n else:\r\n writch('!')\r\n\r\n\r\nscan(array_of_char, 0, 0, 25, 50)\r\n\r\n# Tiles textures\r\nWALL_TEXTURE = pygame.image.load(\"wall.png\").convert()\r\nFLOOR_TEXTURE = pygame.image.load(\"floor.png\").convert()\r\nVOID_TEXTURE = pygame.image.load(\"void.png\").convert()\r\n\r\nfor i in range(len(array_of_char)):\r\n for j in range(len(array_of_char[i])):\r\n if array_of_char[i][j] == WALL:\r\n\r\n fenetre.blit(WALL_TEXTURE, (i * 8, j * 8))\r\n # pygame.display.flip()\r\n elif array_of_char[i][j] == FLOOR:\r\n\r\n fenetre.blit(FLOOR_TEXTURE, (i * 8, j * 8))\r\n # pygame.display.flip()\r\n elif array_of_char[i][j] == VOID:\r\n\r\n fenetre.blit(VOID_TEXTURE, (i * 8, j * 8))\r\n # pygame.display.flip()\r\npygame.display.flip()\r\n# Boucle infinie\r\ncontinuer = 1\r\nwhile continuer:\r\n for event in pygame.event.get(): # On parcours la liste de tous les événements reçus\r\n if event.type == QUIT: # Si un de ces événements est de type QUIT\r\n continuer = 0 # On arrête la boucle\r\n","repo_name":"themaskedpotato/Project_Caerbannog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24550808389","text":"import os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nimport torch\n\n#\nimport configparser\nimport argparse\nimport albumentations as a\nfrom torchvision import datasets, transforms, models\nfrom argparse import Namespace\nfrom tqdm import tqdm\nfrom sklearn import metrics as mt\nfrom numpy import newaxis\nfrom random import seed as rseed\nfrom torchlib.utils import stats_table, Arguments # pylint:disable=import-error\nfrom torchlib.models import vgg16, resnet18, conv_at_resolution\nfrom torchlib.dataloader import AlbumentationsTorchTransform, CombinedLoader\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_dir\",\n type=str,\n required=True,\n default=\"data/server_simulation/test\",\n help='Select a data folder [if matches \"mnist\" mnist will be used].',\n )\n parser.add_argument(\n \"--model_weights\",\n type=str,\n required=True,\n default=None,\n help=\"model weights to use\",\n )\n parser.add_argument(\"--cuda\", action=\"store_true\", help=\"Use CUDA acceleration.\")\n cmd_args = parser.parse_args()\n\n use_cuda = cmd_args.cuda and torch.cuda.is_available()\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\") # pylint: disable=no-member\n state = torch.load(cmd_args.model_weights, map_location=device)\n\n args = state[\"args\"]\n if type(args) is Namespace:\n args = Arguments.from_namespace(args)\n args.from_previous_checkpoint(cmd_args)\n print(str(args))\n rseed(args.seed)\n torch.manual_seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n class_names = None\n val_mean_std = (\n state[\"val_mean_std\"]\n if \"val_mean_std\" in state.keys()\n else (\n torch.tensor([0.5]), # pylint:disable=not-callable\n torch.tensor([0.2]), # pylint:disable=not-callable\n )\n if args.pretrained\n else (\n torch.tensor([0.5, 0.5, 0.5]), # pylint:disable=not-callable\n torch.tensor([0.2, 0.2, 0.2]), # pylint:disable=not-callable\n )\n )\n mean, std = val_mean_std\n mean = mean.cpu()\n std = std.cpu()\n if args.data_dir == \"mnist\":\n num_classes = 10\n testset = datasets.MNIST(\n \"../data\",\n train=False,\n transform=transforms.Compose(\n [\n transforms.Resize(args.inference_resolution),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ]\n ),\n )\n else:\n num_classes = 3\n\n tf = [\n a.Resize(args.inference_resolution, args.inference_resolution),\n a.CenterCrop(args.inference_resolution, args.inference_resolution),\n ]\n if hasattr(args, \"clahe\") and args.clahe:\n tf.append(a.CLAHE(always_apply=True, clip_limit=(1, 1)))\n tf.extend(\n [\n a.ToFloat(max_value=255.0),\n a.Normalize(\n mean.cpu().numpy()[None, None, :],\n std.cpu().numpy()[None, None, :],\n max_pixel_value=1.0,\n ),\n ]\n )\n tf = AlbumentationsTorchTransform(a.Compose(tf))\n # transforms.Lambda(lambda x: x.permute(2, 0, 1)),\n\n loader = CombinedLoader()\n if not args.pretrained:\n loader.change_channels(1)\n testset = datasets.ImageFolder(cmd_args.data_dir, transform=tf, loader=loader)\n assert (\n len(testset.classes) == 3\n ), \"We can only handle data that has 3 classes: normal, bacterial and viral\"\n class_names = testset.classes\n\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=1, shuffle=True, **kwargs\n )\n if args.model == \"vgg16\":\n model = vgg16(\n pretrained=args.pretrained,\n num_classes=num_classes,\n in_channels=3 if args.pretrained else 1,\n adptpool=False,\n input_size=args.inference_resolution,\n pooling=args.pooling_type,\n )\n elif args.model == \"simpleconv\":\n if args.pretrained:\n raise RuntimeError(\"No pretrained version available\")\n model = conv_at_resolution[args.train_resolution](\n num_classes=num_classes,\n in_channels=3 if args.pretrained else 1,\n pooling=args.pooling_type,\n )\n elif args.model == \"resnet-18\":\n model = resnet18(\n pretrained=args.pretrained,\n num_classes=num_classes,\n in_channels=3 if args.pretrained else 1,\n adptpool=False,\n input_size=args.inference_resolution,\n pooling=args.pooling_type if hasattr(args, \"pooling_type\") else \"avg\",\n )\n else:\n raise NotImplementedError(\"model unknown\")\n model.load_state_dict(state[\"model_state_dict\"])\n model.to(device)\n # test method\n model.eval()\n total_pred, total_target, total_scores = [], [], []\n with torch.no_grad():\n for data, target in tqdm(\n test_loader,\n total=len(test_loader),\n desc=\"performing inference\",\n leave=False,\n ):\n if len(data.shape) > 4:\n data = data.squeeze(0)\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1)\n total_pred.append(pred)\n total_scores.append(output)\n tgts = target.view_as(pred)\n total_target.append(tgts)\n equal = pred.eq(tgts)\n total_pred = torch.cat(total_pred).cpu().numpy() # pylint: disable=no-member\n total_target = torch.cat(total_target).cpu().numpy() # pylint: disable=no-member\n total_scores = torch.cat(total_scores).cpu().numpy() # pylint: disable=no-member\n total_scores -= total_scores.min(axis=1)[:, newaxis]\n total_scores = total_scores / total_scores.sum(axis=1)[:, newaxis]\n\n roc_auc = mt.roc_auc_score(total_target, total_scores, multi_class=\"ovo\")\n objective = 100.0 * roc_auc\n conf_matrix = mt.confusion_matrix(total_target, total_pred)\n report = mt.classification_report(\n total_target, total_pred, output_dict=True, zero_division=0\n )\n print(\n stats_table(\n conf_matrix,\n report,\n roc_auc=roc_auc,\n matthews_coeff=mt.matthews_corrcoef(total_target, total_pred),\n class_names=class_names,\n epoch=0,\n )\n )\n\n","repo_name":"gkaissis/PriMIA","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"61"} +{"seq_id":"20065572456","text":"import math\nprint('EQUAÇÃO DO 2º GRAU')\n\nprint('ax² + bx + c = 0')\n\na = float(input('Digite o valor de \"a\": '))\nb = float(input('Digite o valor de \"b\": '))\nc = float(input('Digite o valor de \"c\": '))\n\ndelta = b*b - (4*a*c)\nx1 = (-b + math.sqrt(delta))/2 * a\nx2 = (-b - math.sqrt(delta))/2 * a\n\nif a != 0 and delta > 0:\n print(f'As raízes da equação {a}x² + {b}x + {c} = 0 são:\\nX1 = {x1}\\nX2 = {x2}\\nFim do programa!')\nelif a != 0 and delta < 0:\n print('NÃO EXISTE RAIZ!\\nFim do programa!')\nelif a != 0 and delta == 0:\n print(f'Existe apenas uma raíz real para a equação {a}x² + {b}x + {c} = 0\\n X = {x1}')\nelse:\n print('Não é equação do segundo grau! \"a\" deve ser diferente de zero!!!\\nFim do programa!')","repo_name":"FilipeHanniel/guppe","sub_path":"Exerc_sec_05/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36382625433","text":"#Function to create the dictionary of anagrams from txt file\r\ndef make_anagram_dict(words):\r\n #Declares the dictionary\r\n anagrams = {}\r\n\r\n #For each line in the txt file\r\n for line in open(words):\r\n #Sort letters in each word alphabetically and assign to key\r\n key = \"\".join(sorted(line))\r\n #Calls append_anagram_dict to return defined dictionary\r\n append_anagram_dict(key.split(), anagrams, line)\r\n #sets the dictioary to only show anagrams in dictionary not all words whether there are anagrams or not\r\n anagrams = dict([(x,y) for x,y in anagrams.items() if len(y)>1])\r\n #Prints dictionary\r\n print(anagrams)\r\n\r\n#Function to update the dictionary with each keys and values\r\ndef append_anagram_dict(words, anagrams, line):\r\n #Declares values list to hold anagrams\r\n values = []\r\n\r\n #For each key in txt file\r\n for key in words:\r\n #If key is already in dictionary\r\n if key in anagrams:\r\n #Adds current word to other word in list (that are anagrams)\r\n values.append(line.strip())\r\n #Adds list of anagrams to dictionary of anagrams\r\n anagrams[key.strip()] += values\r\n #Else add only current word\r\n else:\r\n #Adds current word to list\r\n values.append(line.strip())\r\n #Adds list of anagrams to dictionary of anagrams\r\n anagrams[key.strip()] = values\r\n\r\n#Calls make_anagram_dict function with txt file\r\nmake_anagram_dict(\"words.txt\")\r\n","repo_name":"TyreeceSimpson/python-assessment1","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981295347","text":"# Remove all elements from a linked list of integers that have value val.\n#\n# Example:\n#\n# Input: 1->2->6->3->4->5->6, val = 6\n# Output: 1->2->3->4->5\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n if not head:\n return head\n dummy = ListNode(-1)\n dummy.next = head\n pre, cur = dummy, dummy.next\n while cur:\n if cur.val == val:\n # remove cur\n pre.next = cur.next\n cur = cur.next\n else:\n pre = cur\n cur = cur.next\n return dummy.next\n","repo_name":"yshshadow/Leetcode","sub_path":"201-250/203.py","file_name":"203.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14946653091","text":"#TASK-4\n#QUESTION-2-B\n\n#FUNCTION FOR STRING\n\n\n\n#finding middle char\ndef func_midChar(myString):\n if(len(myString) % 2 == 0):\n print(\"middle char of string is: \", myString[(int(len(myString) / 2) - 1)] + \" \" + myString[(int(len(myString) / 2))])\n else:\n print(\"middle char is: \", myString[(int(len(myString) / 2))])\n\n\n \n#finding vowels\ndef func_vowel(myString):\n vowel = 0\n for x in myString:\n if(x == 'a' or x == 'e' or x == 'i ' or x == 'o' or x == 'u' or x == 'A' or x == 'E' or x == 'I' or x == 'O' or x == 'U'):\n vowel = vowel + 1\n print(\"vowels are: \", vowel)\n\n\n\n#calculate number of letters\ndef func_letter(myString):\n count = 0\n for x in myString:\n count += 1\n print(\"number of letters in string are: \",count)\n\n \n \n","repo_name":"amisha-chauhan/codewayy_python_series","sub_path":"python-task4/question-2/strings.py","file_name":"strings.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21543740778","text":"\"\"\"\nClasses defining user and item latent representations in\nfactorization models.\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ScaledEmbedding(nn.Embedding):\n \"\"\"\n Embedding layer that initialises its values\n to using a normal variable scaled by the inverse\n of the embedding dimension.\n \"\"\"\n\n def reset_parameters(self):\n \"\"\"\n Initialize parameters.\n \"\"\"\n\n self.weight.data.normal_(0, 1.0 / self.embedding_dim)\n if self.padding_idx is not None:\n self.weight.data[self.padding_idx].fill_(0)\n\n\nclass ZeroEmbedding(nn.Embedding):\n \"\"\"\n Embedding layer that initialises its values\n to zero.\n\n Used for biases.\n \"\"\"\n\n def reset_parameters(self):\n \"\"\"\n Initialize parameters.\n \"\"\"\n\n self.weight.data.zero_()\n if self.padding_idx is not None:\n self.weight.data[self.padding_idx].fill_(0)\n\n\nclass MultiTaskNet(nn.Module):\n \"\"\"\n Multitask factorization representation.\n\n Encodes both users and items as an embedding layer; the likelihood score\n for a user-item pair is given by the dot product of the item\n and user latent vectors. The numerical score is predicted using a small MLP.\n\n Parameters\n ----------\n\n num_users: int\n Number of users in the model.\n num_items: int\n Number of items in the model.\n embedding_dim: int, optional\n Dimensionality of the latent representations.\n layer_sizes: list\n List of layer sizes to for the regression network.\n sparse: boolean, optional\n Use sparse gradients.\n embedding_sharing: boolean, optional\n Share embedding representations for both tasks.\n\n \"\"\"\n\n def __init__(self, num_users, num_items, embedding_dim=32, layer_sizes=[96, 64],\n sparse=False, embedding_sharing=True):\n\n super().__init__()\n\n self.embedding_dim = embedding_dim\n\n self.users_embedding = ScaledEmbedding(num_embeddings=num_users, embedding_dim=self.embedding_dim,\n sparse=sparse)\n self.users_bias = ZeroEmbedding(num_embeddings=num_users, embedding_dim=1)\n\n self.items_embedding = ScaledEmbedding(num_embeddings=num_items, embedding_dim=self.embedding_dim,\n sparse=sparse)\n self.items_bias = ZeroEmbedding(num_embeddings=num_items, embedding_dim=1)\n\n self.mlp = nn.Sequential(\n nn.Linear(in_features=layer_sizes[0], out_features=layer_sizes[1]),\n nn.ReLU(),\n nn.Linear(in_features=layer_sizes[1], out_features=1)\n )\n\n if embedding_sharing:\n self.U_reg = self.users_embedding\n self.Q_reg = self.items_embedding\n else:\n self.U_reg = ScaledEmbedding(num_embeddings=num_users, embedding_dim=self.embedding_dim, sparse=sparse)\n self.Q_reg = ScaledEmbedding(num_embeddings=num_items, embedding_dim=self.embedding_dim, sparse=sparse)\n\n def forward(self, user_ids, item_ids):\n \"\"\"\n Compute the forward pass of the representation.\n\n Parameters\n ----------\n\n user_ids: tensor\n A tensor of integer user IDs of shape (batch,)\n item_ids: tensor\n A tensor of integer item IDs of shape (batch,)\n\n Returns\n -------\n\n predictions: tensor\n Tensor of user-item interaction predictions of shape (batch,)\n score: tensor\n Tensor of user-item score predictions of shape (batch,)\n \"\"\"\n\n u = self.users_embedding(user_ids) # (None, embedding_dim)\n q = self.items_embedding(item_ids) # (None, embedding_dim)\n\n a = self.users_bias(user_ids) # (None, 1)\n b = self.items_bias(item_ids) # (None, 1)\n\n predictions = torch.sum(u * q, dim=-1) + torch.squeeze(a, dim=-1) + torch.squeeze(b, dim=-1)\n\n u_reg = self.U_reg(user_ids) # (None, embedding_dim)\n q_reg = self.Q_reg(item_ids) # (None, embedding_dim)\n score = self.mlp(torch.cat([u_reg, q_reg, u_reg * q_reg], dim=-1))\n score = torch.squeeze(score, dim=-1)\n\n # Make sure you return predictions and scores of shape (batch,)\n if (len(predictions.shape) > 1) or (len(score.shape) > 1):\n raise ValueError(\"Check your shapes!\")\n\n return predictions, score\n","repo_name":"shubhamwagh/MultiTaskRec","sub_path":"multi_task_rec/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3687682308","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function #,unicode_literals\n\nmetaflac = 'metaflac --export-tags-to=-'.split()\nsoxi = 'soxi -V0'.split()\n\nimport sys, subprocess, os\ntags = [ t.lower() for t in sys.argv[1:]]\nfor fname in sys.stdin:\n fname = fname.rstrip()\n info = subprocess.check_output( metaflac + [ fname ] ).decode( sys.stdout.encoding)\n #print( info)\n info = dict( [x.strip() for x in a.split('=',1)]\n for a in info.strip().split('\\n')\n if '=' in a\n )\n info = dict( (k.lower(),v) for k,v in info.items())\n newname = ': '.join( info.get( t,'?') for t in tags )\n newname = newname.replace( '/', '-')\n #print( newname)\n os.rename( fname, os.path.join( os.path.dirname( fname), newname + os.path.splitext( fname)[1] ))\n\n# vim:ts=4:sw=4:expandtab\n","repo_name":"svilendobrev/svd_bin","sub_path":"audio/flactags2name.py","file_name":"flactags2name.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"24433857943","text":"import os\nimport time\nfrom pathlib import Path\nfrom typing import Callable, List, Optional, Union\nimport gpflow\nimport numpy as np\nimport tensorflow as tf\nfrom gpflow import set_trainable\nfrom gpflow.inducing_variables import InducingPoints\nfrom gpflow.models import GPR, SGPR, GPModel\nfrom gpflow.models.training_mixins import RegressionData\nfrom sklearn import preprocessing\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom tensorflow_probability import distributions as tfd\nfrom oak import plotting_utils\nfrom oak.input_measures import MOGMeasure\nfrom oak.normalising_flow import Normalizer\nfrom oak.oak_kernel import OAKKernel, get_list_representation\nfrom oak.plotting_utils import FigureDescription, save_fig_list\nfrom oak.utils import compute_sobol_oak, initialize_kmeans_with_categorical\n# -\n\nf64 = gpflow.utilities.to_default_float\n\n\ndef get_kmeans_centers(X: np.ndarray, K: int = 500) -> np.ndarray:\n \"\"\"\n :param X: N * D input array\n :param K: number of clusters\n :return: K-means clustering of input X\n \"\"\"\n np.random.seed(44)\n tf.random.set_seed(44)\n kmeans = KMeans(n_clusters=K, random_state=0).fit(X)\n Z = kmeans.cluster_centers_\n return Z\n\n\ndef save_model(\n model: GPModel,\n filename: Path,\n) -> None:\n \"\"\"\n :param model: GPflow model parameters to save\n :param filename: location to save the model to\n :return save model parameters to a local directory\n \"\"\"\n if isinstance(model, gpflow.models.SVGP):\n hyperparams = [\n model.parameters[i].numpy() for i in range(len(model.parameters))\n ]\n else:\n hyperparams = [\n model.trainable_parameters[i].numpy()\n for i in range(len(model.trainable_parameters))\n ]\n\n os.makedirs(filename.parents[0], exist_ok=True)\n np.savez(filename, hyperparams=hyperparams)\n\n\ndef load_model(\n model: GPModel,\n filename: Path,\n load_all_parameters=False,\n) -> None:\n \"\"\"\n :param model: GPflow model parameters to load\n :param filename: location to load the model from\n :param load_all_parameters: whether to load all parameters or only trainable parameters\n :return load model parameters from a local directory\n \"\"\"\n # We need allow_pickle=True because model parameters include objects (e.g. InducingPoints)\n model_params = np.load(str(filename), allow_pickle=True)[\"hyperparams\"]\n\n if load_all_parameters:\n for i in range(len(model.parameters)):\n model.parameters[i].assign(model_params[i])\n else:\n for i in range(len(model.trainable_parameters)):\n print(model_params[i], model.trainable_parameters[i])\n model.trainable_parameters[i].assign(model_params[i])\n\n\ndef create_model_oak(\n data: RegressionData,\n max_interaction_depth: int = 2,\n constrain_orthogonal: bool = True,\n inducing_pts: np.ndarray = None,\n optimise=False,\n zfixed=True,\n p0=None,\n p=None,\n lengthscale_bounds=None,\n empirical_locations: Optional[List[float]] = None,\n empirical_weights: Optional[List[float]] = None,\n use_sparsity_prior: bool = True,\n gmm_measures: Optional[List[MOGMeasure]] = None,\n share_var_across_orders: Optional[bool] = True,\n) -> GPModel:\n \"\"\"\n :param num_dims: number of dimensions of inputs\n :param max_interaction_depth: maximum order of interactions\n :param constrain_orthogonal: whether to use the orthogonal version of the kernel\n :param inducing_pts: inducing points, if None, it uses K-means centers\n :param optimise: whether to optimise the hyper parameters of the model\n :param zfixed: whether to fix or learn the inducing points\n :param p0: list of probability measures for binary kernels, set to None if it is not binary\n :param p: list of probability measures for categorical kernels, set to None if it is not categorical\n :param lengthscale_bounds: bounds of the lengthscale parameters\n :param empirical_locations: list of locations of empirical measure, set to None if not using the empirical measure\n :param empirical_weights: list of weights of empirical measure, set to None if not using the empirical measure\n :param use_sparsity_prior: whether to use sparse prior on the kernel variance parameters\n :param gmm_measures: list of Gaussian mixture measures\n :param share_var_across_orders: whether to use the same variance parameter across interaction order\n :return: a GP model with OAK kernel\n \"\"\"\n num_dims = data[0].shape[1]\n\n # create oak kernel\n if p0 is None:\n p0 = [None] * num_dims\n if p is None:\n p = [None] * num_dims\n base_kernels = [None] * num_dims\n for dim in range(num_dims):\n if (p0[dim] is None) and (p[dim] is None):\n base_kernels[dim] = gpflow.kernels.RBF\n\n k = OAKKernel(\n base_kernels,\n num_dims=num_dims,\n max_interaction_depth=max_interaction_depth,\n constrain_orthogonal=constrain_orthogonal,\n p0=p0,\n p=p,\n lengthscale_bounds=lengthscale_bounds,\n empirical_locations=empirical_locations,\n empirical_weights=empirical_weights,\n gmm_measures=gmm_measures,\n share_var_across_orders=share_var_across_orders,\n )\n\n if inducing_pts is not None:\n model = SGPR(\n data,\n mean_function=None,\n kernel=k,\n inducing_variable=InducingPoints(inducing_pts),\n )\n if zfixed:\n set_trainable(model.inducing_variable, False)\n else:\n model = GPR(data, mean_function=None, kernel=k)\n # set priors for variance\n if use_sparsity_prior:\n print(\"Using sparsity prior\")\n if share_var_across_orders:\n for p in model.kernel.variances:\n p.prior = tfd.Gamma(f64(1.0), f64(0.2))\n # Initialise likelihood variance to small value to avoid finding all-noise explanation minima\n model.likelihood.variance.assign(0.01)\n if optimise:\n t_start = time.time()\n opt = gpflow.optimizers.Scipy()\n opt.minimize(\n model.training_loss_closure(), model.trainable_variables, method=\"BFGS\"\n )\n gpflow.utilities.print_summary(model, fmt=\"notebook\")\n print(f\"Training took {time.time() - t_start:.1f} seconds.\")\n return model\n\n\ndef apply_normalise_flow(X: tf.Tensor, input_flows: List[Normalizer]) -> tf.Tensor:\n \"\"\"\n :param X: input of which the normalising flow is to be applied\n :param input_flows: list of normalising flows to apply to each feature dimension\n :return: inputs after transformations of the flow\n \"\"\"\n X_scaled = np.zeros((X.shape))\n for ii in range(X.shape[1]):\n if input_flows[ii] is None:\n X_scaled[:, ii] = X[:, ii]\n else:\n X_scaled[:, ii] = input_flows[ii].bijector(X[:, ii])\n return X_scaled\n\n\nclass oak_model:\n def __init__(\n self,\n max_interaction_depth=2,\n num_inducing=200,\n lengthscale_bounds=[1e-3, 1e3],\n binary_feature: Optional[List[int]] = None,\n categorical_feature: Optional[List[int]] = None,\n empirical_measure: Optional[List[int]] = None,\n use_sparsity_prior: bool = True,\n gmm_measure: Optional[List[int]] = None,\n sparse: bool = False,\n use_normalising_flow: bool = True,\n share_var_across_orders: bool = True,\n ):\n \"\"\"\n :param max_interaction_depth: maximum number of interaction terms to consider\n :param num_inducing: number of inducing points\n :param lengthscale_bounds: bounds for lengthscale parameters\n :param binary_feature: list of indices for binary features\n :param categorical_feature: list of indices for categorical features\n :param empirical_measure: list of indices using empirical measures, if using Gaussian measure, this is set to None\n :param use_sparsity_prior: use sparsity prior on kernel variances\n :param gmm_measure: use gaussian mixture model. If index is 0 it will use a Gaussian measure, otherwise\n :param sparse: Boolean to indicate whether to use sparse GP with inducing points. Defaults to False.\n :param use_normalising_flow: whether to use normalising flow, if not, continuous features are standardised\n :param share_var_across_orders: whether to share the same variance across orders,\n if False, it uses kernel of the form \\prod_i(1+k_i) in Duvenaud (2011).\n :return: OAK model class with model fitting, prediction, attribution and plotting utils.\n \"\"\"\n self.max_interaction_depth = max_interaction_depth\n self.num_inducing = num_inducing\n self.lengthscale_bounds = lengthscale_bounds\n self.binary_feature = binary_feature\n self.categorical_feature = categorical_feature\n self.use_sparsity_prior = use_sparsity_prior\n\n # state filled in during fit call\n self.input_flows = None\n self.scaler_y = None\n self.Y_scaled = None\n self.X_scaled = None\n self.alpha = None\n self.continuous_index = None\n self.binary_index = None\n self.categorical_index = None\n self.empirical_measure = empirical_measure\n self.empirical_locations = None\n self.empirical_weights = None\n self.gmm_measure = gmm_measure\n self.estimated_gmm_measures = None # sklearn GMM estimates\n self.sparse = sparse\n self.use_normalising_flow = use_normalising_flow\n self.share_var_across_orders = share_var_across_orders\n\n def fit(\n self,\n X: tf.Tensor,\n Y: tf.Tensor,\n optimise: bool = True,\n initialise_inducing_points: bool = True,\n ):\n \"\"\"\n :param X, Y data to fit the model on\n :param optimise: whether to optimise the model\n :param initialise_inducing_points: whether to initialise inducing points with K-means\n \"\"\"\n self.xmin, self.xmax = X.min(0), X.max(0)\n self.num_dims = X.shape[1]\n\n (\n self.continuous_index,\n self.binary_index,\n self.categorical_index,\n p0,\n p,\n ) = _calculate_features(\n X,\n categorical_feature=self.categorical_feature,\n binary_feature=self.binary_feature,\n )\n # discrete_input_set = set(self.binary_index).union(set(self.categorical_index))\n if self.empirical_measure is not None:\n if not set(self.empirical_measure).issubset(self.continuous_index):\n raise ValueError(\n f\"Empirical measure={self.empirical_measure} should only be used on non-binary/categorical inputs {self.continuous_index}\"\n )\n if self.gmm_measure is not None:\n if len(self.gmm_measure) != self.num_dims:\n return ValueError(\n f\"Must specify number of components for each inputs dimension 1..{X.shape[0]}\"\n )\n idx_gmm = np.flatnonzero(self.gmm_measure)\n if not set(idx_gmm).issubset(self.continuous_index):\n raise ValueError(\n f\"GMM measure on inputs {idx_gmm} should only be used on continuous inputs {self.continuous_index}\"\n )\n\n # Measure\n self.estimated_gmm_measures = [None] * self.num_dims\n if self.gmm_measure is not None:\n for i_dim in np.flatnonzero(self.gmm_measure):\n K_for_input_i = self.gmm_measure[i_dim]\n self.estimated_gmm_measures[i_dim] = estimate_one_dim_gmm(\n K=K_for_input_i, X=X[:, i_dim]\n )\n\n self.empirical_locations = [None] * self.num_dims\n self.empirical_weights = [None] * self.num_dims\n\n # scaling\n self.input_flows = [None] * self.num_dims\n for i in self.continuous_index:\n if (self.empirical_measure is not None) and (i in self.empirical_measure):\n continue # skip\n if self.estimated_gmm_measures[i] is not None:\n continue\n d = X[:, i]\n\n if self.use_normalising_flow:\n n = Normalizer(d)\n opt = gpflow.optimizers.Scipy()\n opt.minimize(n.KL_objective, n.trainable_variables)\n self.input_flows[i] = n\n\n self.alpha = None\n self.scaler_y = preprocessing.StandardScaler().fit(Y)\n self.Y_scaled = self.scaler_y.transform(Y)\n # standardize features with empirical measure to avoid cholesky decomposition error\n if self.empirical_measure is not None:\n self.scaler_X_empirical = preprocessing.StandardScaler().fit(\n X[:, self.empirical_measure]\n )\n if not self.use_normalising_flow:\n self.scaler_X_continuous = preprocessing.StandardScaler().fit(\n X[:, self.continuous_index]\n )\n self.X_scaled = self._transform_x(X)\n\n # calculate empirical location and weights after applying scaling X\n if self.empirical_measure is not None:\n for ii in self.empirical_measure:\n self.empirical_locations[ii], self.empirical_weights[ii] = np.unique(\n self.X_scaled[:, ii], return_counts=True\n )\n self.empirical_weights[ii] = (\n self.empirical_weights[ii] / self.empirical_weights[ii].sum()\n ).reshape(-1, 1)\n self.empirical_locations[ii] = self.empirical_locations[ii].reshape(\n -1, 1\n )\n\n assert np.allclose(\n self.X_scaled[:, self.binary_index], X[:, self.binary_index]\n ), \"Flow applied to binary inputs\"\n assert np.allclose(\n self.X_scaled[:, self.categorical_index], X[:, self.categorical_index]\n ), \"Flow applied to categorical inputs\"\n if self.gmm_measure is not None:\n assert np.allclose(\n self.X_scaled[:, np.flatnonzero(self.gmm_measure)],\n X[:, np.flatnonzero(self.gmm_measure)],\n ), \"Flow applied to GMM measure inputs\"\n if self.empirical_measure is not None:\n assert np.allclose(\n np.reshape(\n np.concatenate(\n [\n self._get_x_inverse_transformer(i)(self.X_scaled[:, i])\n for i in self.empirical_measure\n ]\n ),\n X[:, self.empirical_measure].shape,\n order=\"F\",\n ),\n X[:, self.empirical_measure],\n ), \"Flow applied to empirical measure inputs\"\n\n Z = None\n # using sparse GP when size of data > 1000\n if X.shape[0] > 1000 or self.sparse:\n X_inducing = self.X_scaled\n\n if initialise_inducing_points:\n if (p0 is None) and (p is None):\n print(\"all features are continuous\")\n kmeans = KMeans(n_clusters=self.num_inducing, random_state=0).fit(\n X_inducing\n )\n Z = kmeans.cluster_centers_\n else:\n Z = initialize_kmeans_with_categorical(\n X_inducing,\n binary_index=self.binary_index,\n categorical_index=self.categorical_index,\n continuous_index=self.continuous_index,\n n_clusters=self.num_inducing,\n )\n else:\n Z = X_inducing[: self.num_inducing, :]\n\n self.m = create_model_oak(\n (self.X_scaled, self.Y_scaled),\n max_interaction_depth=self.max_interaction_depth,\n inducing_pts=Z,\n optimise=optimise,\n p0=p0,\n p=p,\n lengthscale_bounds=self.lengthscale_bounds,\n use_sparsity_prior=self.use_sparsity_prior,\n empirical_locations=self.empirical_locations,\n empirical_weights=self.empirical_weights,\n gmm_measures=self.estimated_gmm_measures,\n share_var_across_orders=self.share_var_across_orders,\n )\n\n def optimise(\n self,\n compile: bool = True,\n ):\n\n print(\"Model prior to optimisation\")\n gpflow.utilities.print_summary(self.m, fmt=\"notebook\")\n self.alpha = None\n t_start = time.time()\n opt = gpflow.optimizers.Scipy()\n opt.minimize(\n self.m.training_loss_closure(),\n self.m.trainable_variables,\n method=\"BFGS\",\n compile=compile,\n )\n gpflow.utilities.print_summary(self.m, fmt=\"notebook\")\n print(f\"Training took {time.time() - t_start:.1f} seconds.\")\n\n def predict(self, X: tf.Tensor, clip=False) -> tf.Tensor:\n \"\"\"\n :param X: inputs to predict the response on\n :param clip: whether to slip X between x_min and x_max along each dimension\n :return: predicted response on input X\n \"\"\"\n if clip:\n X_scaled = self._transform_x(np.clip(X, self.xmin, self.xmax))\n else:\n X_scaled = self._transform_x(X)\n try:\n y_pred = self.m.predict_f(X_scaled)[0].numpy()\n return self.scaler_y.inverse_transform(y_pred)[:, 0]\n except ValueError:\n print(\"test X is outside the range of training input, try clipping X.\")\n\n def get_loglik(self, X: tf.Tensor, y: tf.Tensor, clip=False) -> tf.Tensor:\n \"\"\"\n :param X,y: inputs and output\n :param clip: whether to slip X between x_min and x_max along each dimension\n :return log likelihood on (X,y)\n \"\"\"\n if clip:\n X_scaled = self._transform_x(np.clip(X, self.xmin, self.xmax))\n else:\n X_scaled = self._transform_x(X)\n\n return (\n self.m.predict_log_density((X_scaled, self.scaler_y.transform(y)))\n .numpy()\n .mean()\n )\n\n def _transform_x(self, X: tf.Tensor) -> tf.Tensor:\n \"\"\"\n :param X: input to do transformation on\n :return: transformation for continuous features: normalising flow with Gaussian measure or standardization with empirical measure\n \"\"\"\n X = apply_normalise_flow(X, self.input_flows)\n if self.empirical_measure is not None:\n X[:, self.empirical_measure] = self.scaler_X_empirical.transform(\n X[:, self.empirical_measure]\n )\n if not self.use_normalising_flow:\n X[:, self.continuous_index] = self.scaler_X_continuous.transform(\n X[:, self.continuous_index]\n )\n return X\n\n def _get_x_inverse_transformer(\n self, i: int\n ) -> Optional[Union[Normalizer, Callable[[tf.Tensor], tf.Tensor]]]:\n \"\"\"\n :param i: index of feature i\n :return: inverse transformation for continuous feature i\n \"\"\"\n assert i in self.continuous_index\n\n if self.empirical_measure is not None and i in self.empirical_measure:\n continuous_i = self.empirical_measure.index(i)\n mean_i, std_i = self.scaler_X_empirical.mean_[continuous_i], np.sqrt(\n self.scaler_X_empirical.var_[continuous_i]\n )\n transformer_x = lambda x: x * std_i + mean_i\n elif self.gmm_measure is not None and i in self.gmm_measure:\n transformer_x = None\n else:\n transformer_x = self.input_flows[i].bijector.inverse\n return transformer_x\n\n def get_sobol(self, likelihood_variance=False):\n \"\"\"\n :param likelihood_variance: whether to include likelihood noise in Sobol calculation\n :return: normalised Sobol indices for each additive term in the model\n \"\"\"\n num_dims = self.num_dims\n\n delta = 1\n mu = 0\n selected_dims, _ = get_list_representation(self.m.kernel, num_dims=num_dims)\n tuple_of_indices = selected_dims[1:]\n model_indices, sobols = compute_sobol_oak(\n self.m,\n delta,\n mu,\n share_var_across_orders=self.share_var_across_orders,\n )\n if likelihood_variance:\n normalised_sobols = sobols / (\n np.sum(sobols) + self.m.likelihood.variance.numpy()\n )\n else:\n normalised_sobols = sobols / np.sum(sobols)\n self.normalised_sobols = normalised_sobols\n self.tuple_of_indices = tuple_of_indices\n return normalised_sobols\n\n def plot(\n self,\n transformer_y=None,\n X_columns=None,\n X_lists=None,\n top_n=None,\n likelihood_variance=False,\n semilogy=True,\n save_fig: Optional[str] = None,\n tikz_path: Optional[str] = None,\n ylim: Optional[List[float]] = None,\n quantile_range: Optional[List[float]] = None,\n log_axis: Optional[List[bool]] = [False, False],\n grid_range: Optional[List[np.ndarray]] = None,\n log_bin: Optional[List[bool]] = None,\n num_bin: Optional[int] = 100,\n ):\n \"\"\"\n :param transformer_y: tranformation of the target (e.g. log), we are plotting the median and quantiles after log-transformation\n :param X_columns: list of feature names\n :param X_list: list of features from data 1 and data 2, if None, then training features will be plotted on the histogram\n :param top_n: plot top n effects based on sobol indices\n :param likelihood_variance: Whether to add the likelihood variance or not to the total Sobol\n :param save_fig: save the figure saved in the directory\n :param tikz_path: save latex for figures in the directory\n :param ylim: list of limits on the y-axis for each feature\n :param quantile_range: list of quantile range of each feature to plot. If None, use the whole range\n :param log_axis: Boolean indicating whether to log x-axis and y-axis for the contour plot\n :param grid_range: list of ranges to plot functions on the contour plot for each feature, if None, use linspace of the feature ranges\n :param log_bin: list of Booleans indicating whether to log bins for histograms for each feature\n :param num_bin: number of bins for histogram\n :return: plotting of individual effects\n \"\"\"\n if X_columns is None:\n X_columns = [\"feature %d\" % i for i in range(self.num_dims)]\n\n if X_lists is None:\n X_lists = [None for i in range(len(X_columns))]\n\n if grid_range is None:\n grid_range = [None for i in range(len(X_columns))]\n\n if ylim is None:\n ylim = [None for i in range(len(X_columns))]\n\n if quantile_range is None:\n quantile_range = [None for i in range(len(X_columns))]\n\n if log_bin is None:\n log_bin = [False for i in range(len(X_columns))]\n\n num_dims = self.num_dims\n selected_dims, _ = get_list_representation(self.m.kernel, num_dims=num_dims)\n tuple_of_indices = selected_dims[1:]\n\n self.get_sobol(likelihood_variance=likelihood_variance)\n order = np.argsort(self.normalised_sobols)[::-1]\n fig_list: List[FigureDescription] = []\n if top_n is None:\n top_n = len(order)\n for n in order[: min(top_n, len(order))]:\n if len(tuple_of_indices[n]) == 1:\n i = tuple_of_indices[n][0]\n if i in self.continuous_index:\n fig_list.append(\n plotting_utils.plot_single_effect(\n m=self.m,\n i=i,\n covariate_name=X_columns[i],\n title=f\"{X_columns[i]} (R={self.normalised_sobols[n]:.3f})\",\n x_transform=self._get_x_inverse_transformer(i),\n y_transform=transformer_y,\n semilogy=semilogy,\n plot_corrected_data=False,\n plot_raw_data=False,\n X_list=X_lists[i],\n tikz_path=tikz_path,\n ylim=ylim[i],\n quantile_range=quantile_range[i],\n log_bin=log_bin[i],\n num_bin=num_bin,\n )\n )\n\n elif i in self.binary_index:\n fig_list.append(\n plotting_utils.plot_single_effect_binary(\n self.m,\n i,\n [\"0\", \"1\"],\n title=f\"{X_columns[i]} (R={self.normalised_sobols[n]:.3f})\",\n y_transform=transformer_y,\n semilogy=semilogy,\n tikz_path=tikz_path,\n )\n )\n else:\n fig_list.append(\n plotting_utils.plot_single_effect_categorical(\n self.m,\n i,\n [str(i) for i in range(self.m.kernel.kernels[i].num_cat)],\n title=f\"{X_columns[i]} (R={self.normalised_sobols[n]:.3f})\",\n y_transform=transformer_y,\n semilogy=semilogy,\n tikz_path=tikz_path,\n )\n )\n\n elif len(tuple_of_indices[n]) == 2:\n i = tuple_of_indices[n][0]\n j = tuple_of_indices[n][1]\n if i in self.continuous_index and j in self.continuous_index:\n fig_list.append(\n plotting_utils.plot_second_order(\n self.m,\n i,\n j,\n [X_columns[i], X_columns[j]],\n [\n self._get_x_inverse_transformer(i),\n self._get_x_inverse_transformer(j),\n ],\n transformer_y,\n title=X_columns[i]\n + \"&\"\n + X_columns[j]\n + f\" (R={self.normalised_sobols[n]:.3f})\",\n tikz_path=tikz_path,\n quantile_range=[quantile_range[i], quantile_range[j]],\n log_axis=log_axis,\n xx=grid_range[i],\n yy=grid_range[j],\n num_bin=num_bin,\n )\n )\n\n elif i in self.continuous_index and j in self.binary_index:\n fig_list.append(\n plotting_utils.plot_second_order_binary(\n self.m,\n i,\n j,\n [\"0\", \"1\"],\n [X_columns[i], X_columns[j]],\n x_transforms=[self._get_x_inverse_transformer(i)],\n y_transform=transformer_y,\n title=f\"{X_columns[i]} (R={self.normalised_sobols[n]:.3f})\",\n tikz_path=tikz_path,\n )\n )\n\n elif i in self.binary_index and j in self.continuous_index:\n fig_list.append(\n plotting_utils.plot_second_order_binary(\n self.m,\n j,\n i,\n [\"0\", \"1\"],\n [X_columns[j], X_columns[i]],\n x_transforms=[self._get_x_inverse_transformer(j)],\n y_transform=transformer_y,\n title=X_columns[i]\n + \"&\"\n + X_columns[j]\n + f\" (R={self.normalised_sobols[n]:.3f})\",\n tikz_path=tikz_path,\n )\n )\n\n else:\n raise NotImplementedError\n\n if save_fig is not None:\n save_fig_list(fig_list=fig_list, dirname=Path(save_fig))\n\n\ndef _calculate_features(\n X: tf.Tensor, categorical_feature: List[int], binary_feature: List[int]\n):\n \"\"\"\n Calculate features index set\n :param X: input data\n :param categorical_feature: index of categorical features\n :param binary_feature: index of binary features\n :return:\n continuous_index, binary_index, categorical_index: list of indices for type of feature\n p0: list of probability measure for binary kernels, for continuous/categorical kernel, it is set to None\n p: list of probability measure for categorical kernels, for continuous/binary kernel, it is set to None\n \"\"\"\n if binary_feature is None and categorical_feature is None:\n # all features are continuous\n p0 = None\n p = None\n continuous_index = list(range(X.shape[1]))\n binary_index = []\n categorical_index = []\n else:\n if binary_feature is not None and categorical_feature is not None:\n overlapping_set = set(binary_feature).intersection(categorical_feature)\n if len(overlapping_set) > 0:\n raise ValueError(f\"Overlapping feature set {overlapping_set}\")\n binary_index, categorical_index, continuous_index, p0, p = [], [], [], [], []\n for j in range(X.shape[1]):\n if binary_feature is not None and j in binary_feature:\n p0.append(1 - X[:, j].mean())\n p.append(None)\n binary_index.append(j)\n elif categorical_feature is not None and j in categorical_feature:\n p0.append(None)\n prob = []\n for jj in np.unique(X[:, j]):\n prob.append(len(np.where(X[:, j] == jj)[0]) / len(X[:, j]))\n p.append(np.reshape(prob, (-1, 1)))\n assert np.abs(p[-1].sum() - 1) < 1e-6\n categorical_index.append(j)\n else:\n p.append(None)\n p0.append(None)\n continuous_index.append(j)\n print(\"indices of binary feature \", binary_index)\n print(\"indices of continuous feature \", continuous_index)\n print(\"indices of categorical feature \", categorical_index)\n\n return continuous_index, binary_index, categorical_index, p0, p\n\n\ndef estimate_one_dim_gmm(K: int, X: np.ndarray) -> MOGMeasure:\n \"\"\"\n :param K: number of mixtures\n :param X: input data\n :return: estimated Gaussian mixture model on the data X\n \"\"\"\n tf.debugging.assert_shapes([(X, (\"N\",))])\n assert K > 0\n gm = GaussianMixture(\n n_components=K, random_state=0, covariance_type=\"spherical\"\n ).fit(X.reshape(-1, 1))\n assert np.allclose(gm.weights_.sum(), 1.0)\n assert gm.means_.shape == (K, 1)\n assert gm.covariances_.shape == (K,)\n assert gm.weights_.shape == (K,)\n return MOGMeasure(\n weights=gm.weights_, means=gm.means_.reshape(-1), variances=gm.covariances_\n )\n","repo_name":"amzn/orthogonal-additive-gaussian-processes","sub_path":"oak/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":31473,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"2024979731","text":"import heapq\nimport sys\ninput = sys.stdin.readline\n\n\ndef abs_key(x):\n return abs(x)\n \nn = int(input())\nh = []\nfor _ in range(n):\n num = int(input())\n # 값이 0이 아닐때\n if num != 0:\n heapq.heappush(h, (abs(num), num))\n # 값이 0일때\n else:\n # 힙이 비어있을 때\n if not h:\n print(0)\n else:\n print(heapq.heappop(h)[1])\n \n\n \n \n ","repo_name":"yechan9601/Algorithm-ProblemSolving","sub_path":"algorithm/silver/11286.py","file_name":"11286.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39844303172","text":"import torch\nfrom torch import nn\nfrom torchvision import models\nimport torch.nn.functional as F\n#from torchsummary import summary\n\n##############################################\n# From torchvision implementation of resnet: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=padding, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, padding = 1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes, stride = 1, groups = 1, dilation=1, padding=padding)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, padding=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation=dilation, padding=padding)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n##############################################\n# Light scratch network inspired by: https://github.com/vaesl/LRF-Net/blob/master/models/LRF_COCO_300.py\n#http://openaccess.thecvf.com/content_ICCV_2019/papers/Wang_Learning_Rich_Features_at_High-Speed_for_Single-Shot_Object_Detection_ICCV_2019_paper.pdf\n\nclass ConvBlock(torch.nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, relu = True):\n super(ConvBlock, self).__init__()\n self.out_channels = out_planes\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=1, bias=False) \n self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)\n self.relu = nn.ReLU(inplace=False) if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n\n return x\n\nclass LargeDownsampler(torch.nn.Module):\n def __init__(self,):\n self.maxPool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)\n self.maxPool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)\n self.maxPool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)\n\n def forward(self, x):\n x = self.pool1(x)\n x = self.pool2(x)\n x_out = self.pool3(x)\n return x_out\n\nclass LightScratchNetwork(torch.nn.Module):\n def __init__(self, in_planes, out_planes, stride = 1):\n super(LightScratchNetwork, self).__init__()\n inter_planes = out_planes // 4\n self.downsampler = LargeDownsampler()\n self.commonConvs = nn.Sequential(\n ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1),\n ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),\n ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)\n )\n self.conv1_out = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)\n\n self.conv2 = ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)\n self.conv2_out = ConvBlock(inter_planes, out_planes * 2, kernel_size=1, stride=1, relu=False)\n\n self.conv3 = ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)\n self.conv3_out = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)\n\n #self.conv4 = ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)\n #self.conv4_out = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)\n\n def forward(self,x):\n x = self.downsampler(x) # Downsample image\n x = self.commonConvs(x) \n out1 = self.conv1_out(x)\n x = self.conv2(x)\n out2 = self.conv2_out(x)\n x = self.conv3(x)\n out3 = self.conv3_out(x) \n\n return out1, out2, out3\n\nclass TopDownModule(torch.nn.Module):\n def __init__(self, in_ch, out_ch):\n super(TopDownModule, self).__init__()\n self.lateral_layer = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0)\n self.smooth_layer = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight)\n\n def forward(self, bigFeature, smallFeature):\n _,_,H,W = bigFeature.size()\n\n x = F.interpolate(smallFeature, size=(H,W), mode='bilinear', align_corners=True) + self.lateral_layer(bigFeature)\n x = self.smooth_layer(x)\n\n return x\n\n\nclass ResNextModel(torch.nn.Module):\n \"\"\"\n This is a basic backbone for SSD.\n The feature extractor outputs a list of 6 feature maps, with the sizes:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n where \"output_channels\" is the same as cfg.BACKBONE.OUT_CHANNELS\n \"\"\"\n def __init__(self, cfg):\n super(ResNextModel, self).__init__()\n\n image_size = cfg.INPUT.IMAGE_SIZE\n output_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS\n self.output_channels = output_channels\n image_channels = cfg.MODEL.BACKBONE.INPUT_CHANNELS\n self.output_feature_size = cfg.MODEL.PRIORS.FEATURE_MAPS\n\n # Backbone model\n self.model = models.resnet34(pretrained = True)\n\n # Top-down modules for feature pyramid network\n #self.TD1 = TopDownModule(output_channels[4], output_channels[5])\n #self.TD2 = TopDownModule(output_channels[3], output_channels[4])\n #self.TD3 = TopDownModule(output_channels[2], output_channels[3])\n #self.TD4 = TopDownModule(output_channels[1], output_channels[2])\n #self.TD5 = TopDownModule(256, output_channels[1])\n\n \"\"\"\n # Light-weight scratch network\n self.LSN = LightScratchNetwork(image_channels, 512)\n\n # Convs with downsampling for bottom-up scheme\n self.ds_conv1 = ConvBlock(output_channels[0], out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)\n self.ds_conv2 = ConvBlock(output_channels[1], out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)\n self.ds_conv3 = ConvBlock(output_channels[2], out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)\n \"\"\"\n\n self.inplanes = output_channels[2]\n self.groups = 1\n self.base_width = 64\n self.dilation = 1\n\n #summary(self.model, (3, 370, 260))\n # Adding extra layers for smaller feature maps: Residual blocks with downsampling\n \n self.extraLayers = nn.Sequential(\n self._make_extra_layer(BasicBlock, 512, 512, 1, stride = 2),\n self._make_extra_layer(BasicBlock, 512, 512, 1, stride = 2),\n self._make_extra_layer(BasicBlock, 512, 256, 1, stride = 2, padding=1),\n self._make_extra_layer(BasicBlock, 256, 256, 1, stride = 2, padding=1),\n self._make_extra_layer(BasicBlock, 256, 256, 1, stride = 2, padding=1)\n #nn.AdaptiveAvgPool2d((1,1))\n #self._make_extra_layer(BasicBlock, 512, 1, stride = 1, padding=0)\n )\n \"\"\"\n nn.Sequential(\n conv3x3(512, 256, stride=1, groups=1,dilation=1,padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 512, kernel_size=(3,4), stride=1,\n padding=0, groups=1, bias=False, dilation=1)\n #conv3x3(256, 512,stride=1,groups=1,dilation=1,padding=0)\n )\n \"\"\"\n\n #self.extraLayers = AddedLayers(output_channels[2])\n #print(self.extraLayers[2])\n\n # Initialize weights in extra layers with kaiming initialization\n for layer in self.extraLayers:\n for m in layer.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.modules():\n if isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n \"\"\"\n for param in self.model.parameters(): # Freeze all parameters while training on waymo\n param.requires_grad = False\n\n for param in self.model.layer3.parameters(): # Unfreeze some of the last convolutional\n param.requires_grad = True # layers\n\n for param in self.model.layer4.parameters(): # Unfreeze some of the last convolutional\n param.requires_grad = True # layers\n \"\"\"\n\n\n ## The following function is from the pytroch resnet implementation (modified):\n def _make_extra_layer(self, block, in_planes, out_planes, blocks, stride=1, dilate=False, padding=1):\n norm_layer = nn.BatchNorm2d\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1: #or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(in_planes, out_planes * block.expansion, stride),\n norm_layer(out_planes * block.expansion),\n )\n\n layers = []\n layers.append(block(in_planes, out_planes, stride, downsample, self.groups,\n self.base_width, dilation=previous_dilation, padding = padding, norm_layer=norm_layer))\n #self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(in_planes, out_planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation, padding = padding,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n The forward functiom should output features with shape:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n We have added assertion tests to check this, iteration through out_features,\n where out_features[0] should have the shape:\n shape(-1, output_channels[0], 38, 38),\n \"\"\"\n out_features = []\n\n #lsn_out1, lsn_out2, lsn_out3 = self.LSN(x) # Pass image through light-weight scratch network\n\n x = self.model.conv1(x)\n x = self.model.bn1(x)\n x = self.model.relu(x)\n #x = self.model.maxpool(x)\n x = self.model.layer1(x)\n\n x = self.model.layer2(x)\n #out_features.append(out0)\n\n feature1 = self.model.layer3(x)\n feature2 = self.model.layer4(feature1)\n feature3 = self.extraLayers[0](feature2)\n feature4 = self.extraLayers[1](feature3)\n feature5 = self.extraLayers[2](feature4)\n feature6 = self.extraLayers[3](feature5)\n\n feature7 = self.extraLayers[4](feature6) \n\n #p3 = self.TD3(feature3, feature4)\n #p2 = self.TD4(feature2, p3)\n #p1 = self.TD5(feature1, p2)\n \n \"\"\"\n out_features.append(p1)\n out_features.append(p2)\n out_features.append(p3)\n out_features.append(p4)\n out_features.append(p5)\n out_features.append(feature6)\n \n \n for idx, feature in enumerate(out_features):\n expected_shape = (self.output_channels[idx], self.output_feature_size[idx][1], self.output_feature_size[idx][0])\n assert feature.shape[1:] == expected_shape, \\\n f\"Expected shape: {expected_shape}, got: {feature.shape[1:]} at output IDX: {idx}\"\n print(\"Passed tests\")\n \"\"\"\n return (feature1, feature2, feature3, feature4, feature5, feature6, feature7)\n #return (p1, p2, p3, feature4, feature5, feature6)\n\n","repo_name":"larsmmo/TDT4265-Computer-Vision-and-Deep-Learning","sub_path":"Project/SSD/ssd/modeling/backbone/resnext101.py","file_name":"resnext101.py","file_ext":"py","file_size_in_byte":15252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11093459304","text":"import tkinter as tk\n\ndef main():\n\n\t\n\twin = tk.Tk()\n\twin.title(\"Welcome\")\n\twin.geometry(\"500x300\")\n\n\t# Functions\n\tdef disp_name():\n\t\tname = str(ent1.get())\n\t\ttxt = tk.Text(master=win, height=20,width=25)\n\t\ttxt.grid(column=0,row=1, columnspan=2)\n\t\ttxt.insert(tk.END, name)\n\n\t# Labels\n\tlb1 = tk.Label(text=\"Give me a name\")\n\tlb1.grid(column=0,row=0)\n\n\t# Entry\n\tent1 = tk.Entry()\n\tent1.grid(column=1,row=0)\n\n\t# Button\n\tbtn = tk.Button(text=\"Push Me\",command=disp_name)\n\tbtn.grid(column=2,row=0)\n\n\twin.mainloop()\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"emiludenz/tinkter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10402726489","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom graphviz import Digraph\nimport hashlib\n\n# function to create hash for URL to cluster nodes\ndef create_hash(url):\n hash_object = hashlib.sha256(url.encode('utf-8'))\n hex_dig = hash_object.hexdigest()\n return hex_dig[:10]\n\n# function to get title and links recursively\ndef get_links(url, depth, dot, visited):\n # stop when reaching depth limit or already visited url\n if depth <= 0 or url in visited:\n return\n\n # get html content and parse with beautifulsoup\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # get title and add node to dot graph\n title = soup.title.string.strip()\n node_id = create_hash(url)\n dot.node(node_id, title, href=url)\n\n # add url to visited set\n visited.add(url)\n\n # find all links in page\n for link in soup.find_all('a'):\n href = link.get('href')\n\n # skip if link is empty or just a fragment\n if not href or href.startswith('#'):\n continue\n\n # check if link is absolute or relative\n if href.startswith('http'):\n child_url = href\n else:\n # handle relative links\n child_url = url + '/' + href\n\n # get child title and add node to dot graph\n child_title = link.string.strip()\n child_id = create_hash(child_url)\n dot.node(child_id, child_title, href=child_url)\n\n # add edge between parent and child nodes\n dot.edge(node_id, child_id)\n\n # recursively visit child node\n get_links(child_url, depth-1, dot, visited)\n\n\n# main function to run web scraper and generate dot file\ndef main(title_name, base_url):\n # create digraph object\n dot = Digraph(comment=title_name, format='svg')\n\n # set depth limit for recursion\n max_depth = 2\n\n # create visited set to avoid revisiting same url\n visited = set()\n\n # scrape base url\n get_links(base_url, max_depth, dot, visited)\n\n # generate dot file\n dot.render('output', view=True)\n\n\n# example usage\nmain('Name', 'https://example.com')\n","repo_name":"yanisallouch/Scrape2Viz","sub_path":"webscrapper.py","file_name":"webscrapper.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73846677954","text":"import json, random, os\nfrom threading import Thread\nfrom kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.animation import Animation\nfrom ui.gamefield import BattleField\nfrom ui.cards import BattleCardReserve, BattleCard, EnemyReserveCard, clear_green_blank_cell\nfrom connection_service import give_me_a_partner, my_turn_ends\n\n\nclass GameSession:\n enemy = 'ai'\n status = 'my_step'\n\n\n def __init__(self):\n app = App.get_running_app()\n app.enemy = None\n app.card_from_reserve = None\n\n def give_me_a_partner(self):\n give_me_a_partner()\n looking_for_a_partner_thred = Thread(target=self.looking_for_a_partner)\n looking_for_a_partner_thred.daemon = True\n looking_for_a_partner_thred.start()\n\n def looking_for_a_partner(self):\n app = App.get_running_app()\n while True:\n if app.enemy:\n Clock.schedule_once(self.render_field)\n break\n else:\n pass\n\n def render_field(self, dt):\n app = App.get_running_app()\n self.my_player = Player()\n app.root.get_screen(\"gamefield\").ids.my_player_fuel_add.text = next_step_fuel_adding()\n app.root.current = \"gamefield\"\n Clock.schedule_once(self.add_stabs)\n\n def add_stabs(self, td):\n app = App.get_running_app()\n app.card_size = app.root.get_screen(\"gamefield\").ids[\"0,0\"].height\n headquarter_my = BattleCard(field_pos=(0, 0), my_unit=True, attack_points=1, health_points=20, price=0,\n fuel_add=4, name=\"LehrStab\",\n unit_type=\"stab\",\n pos=app.root.get_screen(\"gamefield\").ids['0,0'].pos,\n size=(app.card_size, app.card_size), size_hint=(None, None),\n source=os.path.join('imgs', 'stab1.png'))\n app.root.get_screen(\"gamefield\").children[0].my_units.append(headquarter_my)\n app.root.get_screen(\"gamefield\").children[0].add_widget(headquarter_my)\n\n headquarter_enemy = BattleCard(field_pos=(2, 4), my_unit=False, attack_points=1, health_points=20,\n price=0, fuel_add=4, unit_type=\"stab\", name=\"LehrStab\",\n pos=app.root.get_screen(\"gamefield\").ids['2,4'].pos,\n size=(app.card_size, app.card_size),\n size_hint=(None, None), source=os.path.join('imgs', 'stab1.png'))\n app.root.get_screen(\"gamefield\").children[0].add_widget(headquarter_enemy)\n self.my_player.load_deck()\n self.give_reserve_card(6)\n render_enemy_new_reserve_card(6)\n app.root.get_screen(\"gamefield\").ids.my_player_fuel.text = str(count_current_fuel())\n\n if app.enemy['first']:\n block_ui_for_enemy_turn()\n app.enemies_end_turn = False\n app.enemy_turn = True\n app.enemies_turn = False\n wait_enemy_turn_thread = Thread(target=self.wait_another_player)\n wait_enemy_turn_thread.daemon = True\n wait_enemy_turn_thread.start()\n else:\n app.enemy_turn = False\n\n def give_reserve_card(self, num_cards):\n app = App.get_running_app()\n random.shuffle(self.my_player.deck)\n random_init_reserv_cards = [self.my_player.deck.pop(x) for x in num_cards * [0]]\n\n for unit in random_init_reserv_cards:\n app.root.get_screen(\"gamefield\").ids.reserve_cards.children[0].add_widget(\n BattleCardReserve(attack_points=unit.attack_points, health_points=unit.health_points,\n source=os.path.join('imgs', unit.img), price=unit.price, fuel_add=unit.fuel_add,\n unit_type=unit.unit_type, size=(app.card_size//1.5, app.card_size//1.5)))\n\n def next_turn(self):\n app = App.get_running_app()\n app.card_in_reserve = None\n app.root.get_screen(\"gamefield\").ids.my_player_fuel.text = str(\n int(app.root.get_screen(\"gamefield\").ids.my_player_fuel.text) + int(next_step_fuel_adding()))\n self.give_reserve_card(1)\n # units_to_be_free = [x for x in app.root.get_screen(\"gamefield\").children[0].children if\n # str(type(x)) == \"\"]\n # for unit in units_to_be_free:\n # unit.enable()\n block_ui_for_enemy_turn()\n my_turn_ends()\n if app.selected:\n app.selected['item'].canvas.after.get_group('a')[0].rgba = (1, 1, 1, 0.0)\n app.selected = None\n app.enemy_turn = True\n app.enemies_end_turn = False\n app.enemies_turn = False\n clear_green_blank_cell()\n wait_enemy_turn_thread = Thread(target=self.wait_another_player)\n wait_enemy_turn_thread.daemon = True\n wait_enemy_turn_thread.start()\n\n def wait_another_player(self):\n app = App.get_running_app()\n while app.enemy_turn:\n if app.card_from_reserve:\n render_enemies_reserve_spawn()\n if app.enemies_end_turn:\n unblock_my_units()\n render_enemy_new_reserve_card(1)\n app.enemy_turn = False\n if app.enemies_turn:\n render_enemies_turn()\n app.enemies_turn = None\n\n\ndef count_current_fuel(my_army=True):\n app = App.get_running_app()\n temp_fuel = int(app.root.get_screen(\"gamefield\").ids.my_player_fuel.text)\n for unit in app.root.get_screen(\"gamefield\").children[0].children:\n if unit.__class__.__name__ == \"BattleCard\" and unit.my_unit == my_army:\n temp_fuel += unit.fuel_add\n return temp_fuel\n\n\ndef next_step_fuel_adding(my_army=True):\n app = App.get_running_app()\n temp_fuel = 0\n for unit in app.root.get_screen(\"gamefield\").children[0].children:\n if unit.__class__.__name__ == \"BattleCard\" and unit.my_unit == my_army:\n temp_fuel += unit.fuel_add\n\n if temp_fuel >= 0:\n temp_fuel = \"+\" + str(temp_fuel)\n else:\n temp_fuel = str(temp_fuel)\n\n return temp_fuel\n\n\ndef render_enemy_new_reserve_card(num_cards):\n app = App.get_running_app()\n enemy_reserve_cards = [EnemyReserveCard(size=(app.card_size // 1.5, app.card_size // 1.5)) for x in range(num_cards)]\n for enemy_reserve_card in enemy_reserve_cards:\n app.root.get_screen(\"gamefield\").ids.enemy_reserve_cards.children[0].add_widget(enemy_reserve_card)\n\n\nclass Player:\n deck = []\n\n def load_deck(self):\n with open('work_files/player1_units.json', 'r') as f:\n player_server_deck = json.load(f)\n self.deck = [PlayerUnit(x[\"name\"], x[\"img\"], x['attack_points'], x['health_points'],\n x[\"price\"], x[\"fuel_add\"], x['unit_type']) for x in player_server_deck]\n\n\nclass PlayerUnit:\n\n def __init__(self, name, img, attack_points, health_points, price, fuel_add, unit_type):\n self.name = name\n self.img = img\n self.attack_points = attack_points\n self.health_points = health_points\n self.price = price\n self.fuel_add = fuel_add\n self.unit_type = unit_type\n\n\ndef block_ui_for_enemy_turn():\n app = App.get_running_app()\n units_to_be_blocked = [x for x in app.root.get_screen(\"gamefield\").children[0].children if\n str(type(x)) == \"\" and x.my_unit]\n for unit in units_to_be_blocked:\n unit.disable()\n\n\ndef unblock_my_units():\n app = App.get_running_app()\n units_to_be_unblocked = [x for x in app.root.get_screen(\"gamefield\").children[0].children if\n str(type(x)) == \"\" and x.my_unit]\n for unit in units_to_be_unblocked:\n unit.enable()\n\n\ndef render_enemies_reserve_spawn():\n app = App.get_running_app()\n unit_relative_pos = app.root.get_screen(\"gamefield\").ids.enemy_reserve_cards.children[0].children[\n app.card_from_reserve[\"card_in_reserve\"]].pos\n start_pos = [unit_relative_pos[0], app.root.get_screen(\"gamefield\").ids.enemy_reserve_cards.pos[1]]\n\n new_enemy_unit = BattleCard(field_pos=app.card_from_reserve[\"finish_pos\"], my_unit=False,\n name=app.card_from_reserve[\"unit_name\"],\n attack_points=app.card_from_reserve[\"unit_defence_points\"],\n health_points=app.card_from_reserve[\"unit_defence_points\"],\n fuel_add=app.card_from_reserve[\"unit_name\"],\n unit_type=app.card_from_reserve[\"unit_type\"], price=app.card_from_reserve[\"unit_name\"],\n size_hint=(None, None),\n size=(app.card_size, app.card_size), source=app.card_from_reserve[\"unit_source\"],\n pos=start_pos)\n app.root.get_screen(\"gamefield\").children[0].add_widget(new_enemy_unit)\n\n animation = Animation(pos=app.root.get_screen(\"gamefield\").ids[\",\".join(str(x) for x in new_enemy_unit.field_pos)].pos, duration=0.3)\n animation.start(new_enemy_unit)\n app.root.get_screen(\"gamefield\").ids.enemy_reserve_cards.children[0].remove_widget(\n app.root.get_screen(\"gamefield\").ids.enemy_reserve_cards.children[0].children[\n app.card_from_reserve[\"card_in_reserve\"]])\n app.card_from_reserve = None\n\n\ndef render_enemies_turn():\n app = App.get_running_app()\n for x in app.root.get_screen(\"gamefield\").children[0].children:\n if str(type(x)) == \"\" and not x.my_unit and tuple(x.field_pos) == tuple(\n app.enemies_turn[\"start_pos\"]):\n who_moving = x\n\n if not app.enemies_turn[\"fire\"]:\n end_point = [x for x in app.root.get_screen(\"gamefield\").ids.gamefield.children if\n tuple(x.field_pos) == tuple(app.enemies_turn[\"finish_pos\"])][0]\n animation = Animation(pos=end_point.pos, duration=0.3)\n animation.start(who_moving)\n who_moving.field_pos = tuple(app.enemies_turn[\"finish_pos\"])\n\n else:\n for x in app.root.get_screen(\"gamefield\").children[0].children:\n if str(type(x)) == \"\" and x.my_unit and tuple(x.field_pos) == tuple(\n app.enemies_turn[\"finish_pos\"]):\n end_point = x\n end_point.attack(who_moving)\n\n\n\n","repo_name":"AlterFritz88/panzershrek","sub_path":"ui/game_session.py","file_name":"game_session.py","file_ext":"py","file_size_in_byte":10498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40823941862","text":"import unittest\nfrom tests.unit_test_helper.console_test_helper import *\n\n\nclass TestOutput(unittest.TestCase):\n\n def test(self):\n temp_globals, temp_locals, content, output = execfile(\"lab16/ch016_t18_lambda_expressions.py\")\n self.assertEqual(\"IXXX aXXmX aXXXnXoXXXXXtXhXeXXXXrX sXXXXeXcXXXrXeXt mXXeXsXXXsXaXXXXXXgXeX!XX\",\n temp_locals['garbled'])\n self.assertEqual(\"I am another secret message!\", temp_locals['message'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"wongcyrus/ite3101_introduction_to_programming","sub_path":"tests/lab16/test_ch016_t18_lambda_expressions.py","file_name":"test_ch016_t18_lambda_expressions.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"20148537433","text":"import os\nimport shutil\nimport pickle\nimport numpy as np\nfrom itertools import groupby\nfrom matplotlib import pyplot as plt\n\nfrom CONSTANTS import *\nfrom mlp_generator import MLPSearchSpace\n\n\n########################################################\n# DATA PROCESSING #\n########################################################\n\n\ndef unison_shuffled_copies(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]\n\n\n########################################################\n# LOGGING #\n########################################################\n\n\ndef clean_log():\n filelist = os.listdir('LOGS')\n for file in filelist:\n if os.path.isfile('LOGS/{}'.format(file)):\n os.remove('LOGS/{}'.format(file))\n\n\ndef log_event():\n dest = 'LOGS'\n while os.path.exists(dest):\n dest = 'LOGS/event{}'.format(np.random.randint(10000))\n os.mkdir(dest)\n filelist = os.listdir('LOGS')\n for file in filelist:\n if os.path.isfile('LOGS/{}'.format(file)):\n shutil.move('LOGS/{}'.format(file),dest)\n\n\ndef get_latest_event_id():\n all_subdirs = ['LOGS/' + d for d in os.listdir('LOGS') if os.path.isdir('LOGS/' + d)]\n latest_subdir = max(all_subdirs, key=os.path.getmtime)\n return int(latest_subdir.replace('LOGS/event', ''))\n\n\n########################################################\n# RESULTS PROCESSING #\n########################################################\n\n\ndef load_nas_data():\n event = get_latest_event_id()\n data_file = 'LOGS/event{}/nas_data.pkl'.format(event)\n with open(data_file, 'rb') as f:\n data = pickle.load(f)\n return data\n\n\ndef sort_search_data(nas_data):\n val_accs = [item[1] for item in nas_data]\n sorted_idx = np.argsort(val_accs)[::-1]\n nas_data = [nas_data[x] for x in sorted_idx]\n return nas_data\n\n########################################################\n# EVALUATION AND PLOTS #\n########################################################\n\ndef get_top_n_architectures(n):\n data = load_nas_data()\n data = sort_search_data(data)\n search_space = MLPSearchSpace(TARGET_CLASSES)\n print('Top {} Architectures:'.format(n))\n for seq_data in data[:n]:\n print('Architecture', search_space.decode_sequence(seq_data[0]))\n print('Validation Accuracy:', seq_data[1])\n\n\ndef get_nas_accuracy_plot():\n data = load_nas_data()\n accuracies = [x[1] for x in data]\n plt.plot(np.arange(len(data)), accuracies)\n plt.show()\n\n\ndef get_accuracy_distribution():\n event = get_latest_event_id()\n data = load_nas_data()\n accuracies = [x[1]*100. for x in data]\n accuracies = [int(x) for x in accuracies]\n sorted_accs = np.sort(accuracies)\n count_dict = {k: len(list(v)) for k, v in groupby(sorted_accs)}\n plt.bar(list(count_dict.keys()), list(count_dict.values()))\n plt.show()\n\n\n\n","repo_name":"codeaway23/MLPNAS","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"19245076657","text":"__author__ = 'Alon Ziv'\r\n\r\nimport numpy as np, os, matplotlib.pyplot as plt, cv2\r\n\r\n\r\ndef myTensTnsor(dim):\r\n if dim.__class__ != ().__class__:\r\n return\r\n for item in dim:\r\n if type(item) != int or item < 0:\r\n return\r\n return np.ones(dim, dtype=int) * 10\r\n\r\n\r\ndef myFilesList(dir=os.getcwd()):\r\n if dir.__class__ != ''.__class__:\r\n return\r\n if os.path.exists(dir) == False:\r\n dir = os.getcwd()\r\n return [file for file in os.listdir(dir) if os.path.isfile(os.path.join(dir, file))]\r\n\r\n\r\ndef myImageMedian(img):\r\n if img.__class__ != np.ndarray:\r\n return\r\n return np.median(img)\r\n\r\n\r\ndef myImageNegative(img):\r\n if img.__class__ != np.ndarray:\r\n return\r\n return 255 - img\r\n\r\n\r\ndef myFastToneReplacement(img, fromA, toB, equalsC):\r\n myImg = img.copy()\r\n myImg[np.logical_and(myImg >= fromA, myImg <= toB)] = equalsC\r\n return myImg\r\n\r\n\r\ndef myRGBsplit(img):\r\n if img.__class__ != np.ndarray:\r\n return\r\n b, g, r = cv2.split(img)\r\n R = cv2.merge((r, g * 0, b * 0))\r\n G = cv2.merge((r * 0, g, b * 0))\r\n B = cv2.merge((r * 0, g * 0, b))\r\n plt.figure()\r\n plt.subplot(131)\r\n plt.title('R')\r\n plt.axis('off')\r\n plt.imshow(R)\r\n plt.subplot(132)\r\n plt.title('G')\r\n plt.axis('off')\r\n plt.imshow(G)\r\n plt.subplot(133)\r\n plt.title('B')\r\n plt.axis('off')\r\n plt.imshow(B)\r\n plt.show()\r\n\r\n\r\ndef myMedianFilt(img, filtOrder):\r\n if img.__class__ != np.ndarray or filtOrder.__class__ != int or filtOrder < 0:\r\n return\r\n shape = img.shape\r\n newImg = np.zeros(shape)\r\n imgPadded = cv2.copyMakeBorder(img, filtOrder, filtOrder, filtOrder, filtOrder, cv2.BORDER_CONSTANT, 0)\r\n window = 2 * filtOrder + 1\r\n for i in range(shape[0]):\r\n for j in range(shape[1]):\r\n newImg[i, j] = np.median(imgPadded[i:i + window, j:j + window])\r\n return newImg\r\n\r\ndef myMaskKernel(img, maskKernel):\r\n if img.__class__ != np.ndarray or maskKernel.__class__ != np.ndarray:\r\n return\r\n shape = img.shape\r\n newImg = np.zeros(shape)\r\n windowy = maskKernel.shape[0]\r\n windowx = maskKernel.shape[1]\r\n y = (windowy - 1) // 2\r\n x = (windowx - 1) // 2\r\n imgPadded = cv2.copyMakeBorder(img, y, y, x, x, cv2.BORDER_REPLICATE)\r\n for i in range(shape[0]):\r\n for j in range(shape[1]):\r\n newImg[i, j] = np.sum(imgPadded[i:i + windowy, j:j + windowx] * maskKernel)\r\n return newImg\r\n\r\ndef myPlotShape(img):\r\n if img.__class__ != np.ndarray:\r\n return\r\n border = np.ones((img.shape[0], np.int64(img.shape[1] * 0.1))) * 255\r\n img = np.concatenate((border, img, border), axis=1)\r\n plt.figure()\r\n plt.subplot(411)\r\n plt.imshow(img, cmap='gray')\r\n plt.axis('off')\r\n plt.subplot(412)\r\n plt.imshow(np.concatenate((img, img), axis=1), cmap='gray')\r\n plt.axis('off')\r\n plt.subplot(413)\r\n whiteSpace = np.ones(img.shape) * 255\r\n plt.imshow(np.concatenate((img, whiteSpace, whiteSpace, img), axis=1), cmap='gray')\r\n plt.axis('off')\r\n plt.subplot(414)\r\n plt.imshow(np.concatenate((img, img, img, img, img), axis=1), cmap='gray')\r\n plt.axis('off')\r\n","repo_name":"haimadrian/AlgorithmsInMultimediaUsingPython","sub_path":"All_Sols/Exams/2017/26.09.17 - done/Exam_26092017.py","file_name":"Exam_26092017.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12317845212","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nimport html2text\nfrom googlesearch import search\n\ndef search_google(query: str) -> str:\n \"\"\"\n Search and retriev the text content from the first result of Google engine.\n :param query: user_input\n :return: text from html of the Google first result page.\n \"\"\"\n for url in search(query, num_results=1):\n print(url)\n req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n html = urlopen(req).read()\n soup = BeautifulSoup(html, features=\"html.parser\")\n extractedText = soup.get_text()\n h = html2text.HTML2Text()\n h.ignore_links = True\n blogPost = h.handle(extractedText)\n print(blogPost)\n return blogPost\n\n","repo_name":"afmaster/GPyT","sub_path":"utils/google_search.py","file_name":"google_search.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2865830754","text":"from gendiff.formatters import plain, pretty, json\n\n\ndef choose(out_format):\n formatters = {\n 'plain': plain,\n 'pretty': pretty,\n 'json': json,\n 'stylish': pretty,\n }\n if not out_format:\n return pretty\n return formatters[out_format]\n","repo_name":"IpainI/python-project-lvl2","sub_path":"gendiff/formatters/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36675058086","text":"import time\nfrom pathlib import Path\n\nimport pytest\n\nimport extrainterpreters\nfrom extrainterpreters import interpreters\n\ndef test_running_plain_call_works():\n from math import cos\n\n with extrainterpreters.Interpreter() as interp:\n assert interp.run(cos, 0.) == 1.0\n\ndef test_interpreter_is_destroyed_after_context_exits():\n with extrainterpreters.Interpreter() as interp:\n intno = interp.intno\n assert intno in interpreters.list_all()\n\n assert intno not in interpreters.list_all()\n\ndef test_extrainterpreters_list_all():\n with extrainterpreters.Interpreter() as interp:\n intno = interp.intno\n assert interp in extrainterpreters.list_all()\n\n assert interp not in extrainterpreters.list_all()\n\ndef test_interpreter_closes_out_of_scope():\n intno = None\n def inner():\n nonlocal intno\n interp = extrainterpreters.Interpreter().start()\n intno = interp.intno\n assert intno in interpreters.list_all()\n inner()\n assert intno not in interpreters.list_all()\n\n\ndef test_interpreter_cant_be_started_twice():\n interp = extrainterpreters.Interpreter().start()\n with pytest.raises(RuntimeError):\n interp.start()\n\n\ntime_res = 0.02\n\ndef test_running_threaded_call_works():\n from math import cos\n\n interp = extrainterpreters.Interpreter().start()\n assert interp.run_in_thread(cos, 0)\n while not interp.done():\n time.sleep(time_res)\n assert interp.result() == 1.0\n interp.close()\n\n\n@pytest.fixture\ndef add_current_path():\n import sys\n path = sys.path[:]\n sys.path.insert(0, str(Path(__file__).absolute().parent))\n try:\n yield\n finally:\n sys.path[:] = path\n\n\ndef test_running_threaded_call_works_local(add_current_path):\n import sys\n import helper_01\n with extrainterpreters.Interpreter() as interp:\n assert interp.run_in_thread(helper_01.to_run_remotely)\n while not interp.done():\n time.sleep(time_res)\n assert interp.result() == 42\n\n\n@pytest.mark.skip(\"Closing the interpreter fails if 'is_running' is called while it is running. Wait for fix in interpreters or workaround\")\ndef test_interpreter_is_running(add_current_path):\n import sys\n import helper_01\n with extrainterpreters.Interpreter() as interp:\n assert interp.run_in_thread(helper_01.to_run_remotely)\n while not interp.done():\n #assert interpreters.is_runnining(interp.intno)\n assert interp.is_running()\n time.sleep(time_res)\n assert not interp.is_running()\n\n\ndef test_interpreter_fails_trying_to_send_data_larger_than_buffer():\n with extrainterpreters.Interpreter() as interp:\n with pytest.raises(BufferError):\n interp.run(str.upper, \"a\" * (extrainterpreters.BFSZ))\n\n\ndef test_interpreter_fails_trying_to_receive_data_larger_than_buffer(add_current_path):\n import helper_01\n with extrainterpreters.Interpreter() as interp:\n with pytest.raises(interpreters.RunFailedError):\n interp.run(helper_01.big_return_payload)\n\n\ndef text_extrainterpreters_can_be_imported_in_sub_interpreter():\n with extrainterpreters.Interpreter() as interp:\n interp.run_string(\"import extrainterpreters\")\n\n\ndef test_interpreter_target_argument(add_current_path):\n import sys\n import helper_01\n interp = extrainterpreters.Interpreter(\n target=helper_01.to_run_remotely)\n assert not interp.intno\n interp.start()\n assert interp.intno\n interp.join()\n assert interp.result() == 42\n\n\ndef test_interpreter_run_can_instantiate_class(add_current_path):\n import sys\n import helper_01\n with extrainterpreters.Interpreter() as interp:\n assert interp.run_in_thread(helper_01.RemoteClass)\n while not interp.done():\n time.sleep(time_res)\n assert isinstance(interp.result(), helper_01.RemoteClass)\n\n\ndef test_interpreter_run_can_call_classmethod(add_current_path):\n import sys\n import helper_01\n with extrainterpreters.Interpreter() as interp:\n assert interp.run_in_thread(helper_01.RemoteClass.to_run_remotely_2)\n while not interp.done():\n time.sleep(time_res)\n assert interp.result() == 42\n\n\ndef test_interpreter_run_can_call_callable(add_current_path):\n import sys\n import helper_01\n with extrainterpreters.Interpreter() as interp:\n inst = helper_01.RemoteClass()\n interp.run_in_thread(inst)\n while not interp.done():\n time.sleep(time_res)\n assert interp.result() == 23\n\n\n","repo_name":"jsbueno/extrainterpreters","sub_path":"tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"61"} +{"seq_id":"432930129","text":"from engine import Entity\nfrom engine import Vector\nimport math\nimport cairo\nfrom rob_components import *\n\nimport time\nimport random\n\nrobody = [[0,0], [0,10], [15,5]]\n\n\nclass Robot(Entity):\n def __init__(self, name='', polygon=robody, projectile_cb=None, scanner_cb=None, laser_cb=None):\n super(Robot, self).__init__(polygon=polygon)\n self.name = name\n self.color = None\n self.position = Vector(0, 0)\n self.velocity = Vector(0, 0)\n\n #self.projectile_cb = projectile_cb\n\n self.alive = True\n\n self.hull = 100\n self.kills = []\n\n self.scanner = ArcScanner(self, scanner_cb)\n self.cannon = ATCannon(source=self, projectile_cb=projectile_cb)\n self.shield = EnergyShield()\n self.laser = LaserEmitter(sourcebot=self, laser_cb=laser_cb)\n\n self.cannon_iface = self.cannon.gen_interface()\n\n self.modules = []\n self.modules.append(self.scanner)\n self.modules.append(self.cannon)\n self.modules.append(self.shield)\n self.modules.append(self.laser)\n\n def get_position(self):\n return self.position\n\n def get_heading(self):\n return self.heading\n\n def get_hull(self):\n return self.hull\n\n def get_velocity(self):\n return self.velocity\n\n def scan(self, angle):\n rvals = self.scanner.scan(angle, self.position, self.color)\n return rvals\n\n def forward(self, val):\n vx = math.cos(math.radians(self.heading)) * val\n vy = math.sin(math.radians(self.heading)) * val\n self.velocity = Vector(vx, vy)\n\n def turn(self, val):\n self.rotation = val\n\n def execute(self):\n # to be initialized as a thread\n rval = random.randint(-3, 3)\n s_angle = random.randint(0, 360)\n self.scanner.set_arc_width(30)\n while True:\n if self.alive:\n time.sleep(.03)\n self.forward(1)\n if random.randint(0, 4) == 0:\n while rval >= 3 and rval <= -3:\n rval += random.randint(-3,3)\n if random.randint(0, 50) == 0:\n rval *= -1\n self.turn(rval)\n tvals = self.scan(s_angle)\n if tvals is not False:\n s_angle += 30\n s_angle %=360\n if len(tvals) > 0:\n for t in tvals:\n d = None\n if d is None:\n d = t\n elif self.position.distance(t) < self.position.distance(d):\n d = t\n angle = Vector(d).angle_between(self.position)\n #self.fire(angle)\n self.cannon_iface.fire(angle)\n self.laser.fire(angle)\n s_angle = angle\n self.shield.modulate(random.randint(0,2))\n else:\n return\n\n def draw(self, ctx):\n #body\n ctx.set_operator(cairo.OPERATOR_ADD)\n ctx.set_line_width(1)\n\n col = [*self.color] + [0.7]\n\n ctx.set_source_rgba(*col)\n ctx.move_to(self.body.points[0].x, self.body.points[0].y)\n for point in self.body.points:\n ctx.line_to(point[0], point[1])\n ctx.line_to(self.body.points[0].x, self.body.points[0].y)\n ctx.fill()\n \n ctx.set_source_rgb(*self.color)\n ctx.move_to(self.body.points[0].x, self.body.points[0].y)\n for point in self.body.points:\n ctx.line_to(point[0], point[1])\n ctx.line_to(self.body.points[0].x, self.body.points[0].y)\n ctx.stroke()\n\n # energy shield\n shield_color = self.shield.color + [.3]\n ctx.set_source_rgba(*shield_color)\n ctx.arc(self.position[0], self.position[1], self.body.radius+1, 0, math.pi*2)\n ctx.fill()\n self.draw_data(ctx)\n\n def draw_data(self, ctx):\n #circle\n ctx.set_line_width(1)\n ctx.set_source_rgb(0, 1, 0)\n ctx.arc(self.position[0], self.position[1], self.body.radius+1, 0, math.pi*2)\n ctx.stroke()\n\n #data\n self.draw_text(ctx)\n\n def draw_text(self, ctx):\n ctx.set_source_rgb(*self.color)\n ctx.select_font_face(\"Courier\", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)\n ctx.set_font_size(12)\n (x, y, width, height, dx, dy) = ctx.text_extents(self.name)\n\n ctx.move_to(self.position[0]-width/2, self.position[1]+self.body.radius + height + 2)\n ctx.show_text(self.name)\n\n hull_str = str(self.hull)\n r = 1-.01*self.hull\n g = .01*self.hull\n ctx.set_source_rgb(r, g, 0)\n (x, y, width, height, dx, dy) = ctx.text_extents(hull_str)\n ctx.move_to(self.position[0] - width/2, self.position[1]- self.body.radius - height + 2)\n ctx.show_text(hull_str)\n\n\nif __name__ == '__main__':\n Robot()\n\n","repo_name":"fostrb/robotfight","sub_path":"cairobot.py","file_name":"cairobot.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6597495524","text":"from configs import config\nfrom hkube_python_wrapper import Algorunner\n\ndef main():\n print(\"starting algorithm runner\")\n conf = config.Config\n alg = Algorunner()\n alg.loadAlgorithm(conf.algorithm)\n alg.connectToWorker(conf.socket)\n\nif __name__ == \"__main__\":\n main()","repo_name":"stevefan1999-personal/hkube","sub_path":"core/algorithm-builder/environments/python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"18572818701","text":"import json\nimport requests\n\ndef test_api(url):\n # Données à prédire\n data = {\n 'message': 'the film is good'\n }\n\n # Convertir les données en JSON\n #data_json = json.dumps(data)\n\n # Envoyer la requête POST à l'API Flask\n response = requests.post(url, data=data)\n\n # Afficher la réponse de l'API Flask\n #prediction = response.json()[0]\n\n # Afficher la réponse de l'API Flask\n print(response.text)\n print(response.status_code)\n \n \n \ntest_api(\"http://127.0.0.1:3001/predict_post\")","repo_name":"IAnthonyRenard/SentimentsClassification","sub_path":"api/p7/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15110721508","text":"from django.db import models\nfrom authentication.models import User\nfrom django.utils import timezone\nfrom imovel.models import Room\n\nimport uuid\n\nclass Reservation(models.Model):\n\n STATUS_TYPE_CHOICES = (\n (1, 'Active'),\n (2, 'Canceled'),\n (3, 'Pending'),\n )\n\n customer = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n help_text='Customer Name')\n no_of_adults = models.PositiveIntegerField(\n default=1,\n help_text='Number of Adult Members')\n no_of_childrens = models.PositiveIntegerField(\n default=0,\n help_text='Number of Children')\n reservation_date_time = models.DateTimeField(\n default=timezone.now,\n help_text='Reservation Date Time')\n expected_arrival_date_time = models.DateTimeField(\n default=timezone.now,\n help_text='Expected Arrival Date Time')\n expected_departure_date_time = models.DateTimeField(\n default=timezone.now,\n help_text='Expected Departure Date Time')\n discount_percent = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n help_text='Percetagem de desconto')\n total_price = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n help_text='Valor total')\n updated_at=models.DateTimeField(\n auto_now=True,\n help_text='Data de actualizacao')\n room_id = models.ForeignKey(\n Room,\n on_delete=models.CASCADE,\n help_text='Room id')\n status = models.PositiveSmallIntegerField(\n choices=STATUS_TYPE_CHOICES,\n default=1)\n reservation_code = models.UUIDField(\n default=uuid.uuid4,\n editable=False)\n\n class Meta:\n ordering = [\"-id\"]\n verbose_name = 'Reserva'\n verbose_name_plural = 'Reservas'\n\n def __str__(self):\n return '({0}) {1}'.format(self.reservation_code, self.customer)\n\n\n# Completar estas duas classes. Customizar a classe Payment\n\nclass CheckIn(models.Model):\n\n checkin_code = models.UUIDField(\n default=uuid.uuid4,\n editable=False)\n reservation_id = models.ForeignKey(\n Reservation,\n on_delete=models.CASCADE)\n checkin_date_time = models.DateTimeField(\n default=timezone.now,\n help_text='Checkin Date')\n\n def __str__(self):\n return '({0}) {1} {2}'.format(self.checkin_code, self.reservation_id, self.checkin_date_time)\n\n\nclass CheckOut(models.Model):\n\n checkout_code = models.UUIDField(\n default=uuid.uuid4,\n editable=False)\n checkin_id = models.ForeignKey(\n CheckIn,\n on_delete=models.CASCADE)\n checkout_date_time = models.DateTimeField(\n default=timezone.now,\n help_text='Checkout Date')\n\n def __str__(self):\n return '({0}) {1} {2}'.format(self.checkout_code, self.checkin_id, self.checkout_date_time)\n","repo_name":"ivandrosilva12/egov_api","sub_path":"reservation/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"518964400","text":"import vt\nimport os, sys, csv, argparse, configparser\nfrom ratelimit import limits, sleep_and_retry\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\nconfig=configparser.ConfigParser()\nconfig.read(f'{script_path}/config.ini')\n\n## Rate Limiting. Public API only allows 4 lookups/min\nCALLS = config.getint('API','API_CALLS')\nRATE_LIMIT=config.getint('API','API_RATE')\n\nCSV_OBJ=[]\n\ndef open_vt_session(apikey):\n client = vt.Client(apikey)\n return client\n\ndef close_vt_session(client):\n client.close()\n\n@sleep_and_retry\n@limits(calls=CALLS, period=RATE_LIMIT)\ndef check_limit():\n return\n\ndef get_vt_file_obj(client,hash,AV_ENGINES):\n try:\n file = client.get_object(f'/files/{hash}')\n print(f\"Getting info for {hash}\")\n for av in AV_ENGINES:\n file_obj = {'hash':hash,'file_type':file.exiftool['FileTypeExtension'],'av_engine':file.last_analysis_results[av]['engine_name'],'av_category':file.last_analysis_results[av]['category'],'av_signature':file.last_analysis_results[av]['result']}\n CSV_OBJ.append(file_obj)\n except:\n print(f\"Unable to get info for {hash}\")\n file_obj = {'hash':f'{hash} NOT FOUND'}\n CSV_OBJ.append(file_obj)\n\ndef csv_writer(filename):\n csv_fields = ['hash','file_type','av_engine','av_category','av_signature']\n\n with open(filename, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_fields)\n writer.writeheader()\n writer.writerows(CSV_OBJ)\n\ndef arg_parse():\n parser = argparse.ArgumentParser(\"VirusTotal hash lookup tool.\")\n parser.add_argument(\"-i\", \"--infile\", help=\"CSV File of hashes to search.\")\n parser.add_argument(\"-v\", \"--value\", type=str, help=\"Hash you'd like to search.\")\n parser.add_argument(\"-s\", \"--save\", type=str, help=\"Save the CSV file with a different filename.\")\n return parser\n\ndef main(args):\n if not args.infile and not args.value:\n print(\"Input file or individual hash is required to run this tool\")\n sys.exit()\n elif args.infile and args.value:\n print(\"Input file and individual hash were provided. Please choose one.\")\n sys.exit()\n else:\n filename = args.save or \"lookupresults.csv\"\n client = open_vt_session(config.get('API','API_KEY'))\n AV_ENGINES = config.get('AV','ENGINES').split(',')\n if args.value:\n check_limit()\n results = get_vt_file_obj(client,args.value,AV_ENGINES)\n else:\n with open(args.infile, 'r') as read_obj:\n csv_reader = csv.DictReader(read_obj)\n for row in csv_reader:\n check_limit()\n get_vt_file_obj(client,row['hash'],AV_ENGINES)\n csv_writer(filename)\n close_vt_session(client)\n\nif __name__ == '__main__':\n parser = arg_parse()\n args = parser.parse_args()\n\n main(args)\n","repo_name":"Hnav3/vtscripts","sub_path":"hashlookup.py","file_name":"hashlookup.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71800622915","text":"def gcd(a,b):\n if b==0:\n return a\n return gcd(b,a%b)\n\ndef gcd_divops(a,b):\n if b==0:\n return 0\n return gcd_divops(b,a%b) + 1\n\n# N = 10\n# N = 100\n# N = 1000\nlist = [10,100,1000]\ndiv_num = 0\n\nfor N in list:\n for i in range(1,N):\n for j in range(1,N):\n div_num += gcd_divops(i,j)\n average = div_num / (N*N)\n div_num = 0\n print(\"the average is {0} when N is {1}\".format(average,N))\n","repo_name":"lexchenzhang/TTUDoc","sub_path":"2020/Summer/MATH 4330/HW/hw8-zhang.py","file_name":"hw8-zhang.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20423791620","text":"from json import load\nfrom openpyxl import*\nfrom gurobipy import*\n\n# cargar parámetros\nbook = load_workbook(\"C:\\\\Users\\\\david\\\\OneDrive - Universidad de los Andes\\\\2022-2\\\\Optimización Avanzada\\\\Otros\\\\DatosExcursión.xlsx\")\nsheet = book.active\n\nq = {} # PREFERENCIAS\nfor fila in range(3,15):\n for columna in range(3,7):\n excursioanista = sheet.cell(2,columna).value\n objeto = sheet.cell(fila,2).value\n q[(excursioanista,objeto)] = sheet.cell(fila,columna).value\n\np = {} # PESO\nfor fila in range(18,30):\n objeto = sheet.cell(fila,2).value\n p[objeto] = sheet.cell(fila,3).value\n\nb = {} # CAPACIDAD\nfor fila in range(34,38):\n excursioanista = sheet.cell(fila,2).value\n b[excursioanista] = sheet.cell(fila,3).value\n\nE = list(b.keys())\nO = list(p.keys())\n\nm = Model('Excusionistas')\n\nx = m.addVars(E,O,vtype=GRB.BINARY,name='x')\n#help(m.addVars)\nfor i in E:\n m.addConstr(quicksum(p[j]*x[i,j] for j in O)<=b[i])\n\nfor j in O:\n m.addConstr(quicksum(x[i,j] for i in E) ==1)\n\n# FUNCIÓN OBJETIVO\nm.setObjective(quicksum(q[i,j]*x[i,j] for i in E for j in O),GRB.MAXIMIZE)\n\nm.update()\n#m.optimize()\n\nm.setParam(\"Outputflag\",0)\nm.optimize()\n\nz = m.getObjective().getValue()\nprint(z)\n\nfor i,j in x.keys():\n if x[i,j].x>0:\n print(\"El objeto \",j,\" es llevado por: \",i)","repo_name":"SergioSalazarI/OptimizacionAvanzada","sub_path":"Otros/taller_2_modelo_gurobi.py","file_name":"taller_2_modelo_gurobi.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34525740081","text":"from __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING\n\nimport discord\n\nfrom hyacinth.db.models import DbDiscordNotifier\nfrom hyacinth.db.session import Session\n\nif TYPE_CHECKING:\n from hyacinth.monitor import MarketplaceMonitor\n from hyacinth.notifier import DiscordNotifier, ListingNotifier\n\n_logger = logging.getLogger(__name__)\n\n\ndef save_notifier(notifier: ListingNotifier) -> None:\n # avoid circular import\n from hyacinth.notifier import DiscordNotifier # pylint: disable=import-outside-toplevel\n\n if isinstance(notifier, DiscordNotifier):\n with Session() as session:\n session.query(DbDiscordNotifier).filter(\n DbDiscordNotifier.channel_id == str(notifier.channel.id)\n ).delete()\n session.add(DbDiscordNotifier.from_notifier(notifier))\n session.commit()\n else:\n raise NotImplementedError(f\"{type(notifier)} not implemented\")\n\n\ndef get_discord_notifiers(\n client: discord.Client, monitor: MarketplaceMonitor\n) -> list[DiscordNotifier]:\n \"\"\"\n Get all saved DiscordNotifiers from the database.\n\n If a stale notifier is encountered (for a channel that no longer exists), it is automatically\n deleted from the database.\n \"\"\"\n with Session() as session:\n db_notifiers: list[DbDiscordNotifier] = session.query(DbDiscordNotifier).all()\n\n notifiers: list[DiscordNotifier] = []\n stale_notifier_channel_ids: list = []\n for db_notifier in db_notifiers:\n notifier = db_notifier.to_notifier(client, monitor)\n if notifier is not None:\n notifiers.append(notifier)\n else:\n _logger.info(\n f\"Found stale notifier for channel {db_notifier.channel_id}! Deleting.\"\n )\n stale_notifier_channel_ids.append(db_notifier.channel_id)\n\n session.query(DbDiscordNotifier).filter(\n DbDiscordNotifier.channel_id.in_(stale_notifier_channel_ids)\n ).delete()\n session.commit()\n\n return notifiers\n\n\ndef delete_all_discord_notifiers_from_channel(channel_id: int) -> int:\n with Session() as session:\n count = (\n session.query(DbDiscordNotifier)\n .filter(DbDiscordNotifier.channel_id == str(channel_id))\n .count()\n )\n session.query(DbDiscordNotifier).filter(\n DbDiscordNotifier.channel_id == str(channel_id)\n ).delete()\n session.commit()\n\n return count\n","repo_name":"lmesa008/bot","sub_path":"hyacinth/db/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33432639757","text":"#!/usr/bin/python3\ndef subtract_roman(list_numbers):\n num_to_subtract = 0\n max_num_list = max(list_numbers)\n\n for num in list_numbers:\n if num < max_num_list:\n num_to_subtract += num\n\n return (max_num_list - num_to_subtract)\n\n\ndef roman_to_int(roman_string):\n if not roman_string:\n return 0\n if not isinstance(roman_string, str):\n return 0\n\n number = 0\n last_num = 0\n list_numbers = [0]\n\n rom_id = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n key_list = list(rom_id.keys())\n\n for symbol in roman_string:\n for rom_num in key_list:\n if rom_num == symbol:\n if rom_id.get(symbol) <= last_num:\n number += subtract_roman(list_numbers)\n list_numbers = [rom_id.get(symbol)]\n else:\n list_numbers.append(rom_id.get(symbol))\n last_num = rom_id.get(symbol)\n\n number += subtract_roman(list_numbers)\n return (number)\n","repo_name":"Ozytron/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/12-roman_to_int.py","file_name":"12-roman_to_int.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10004539304","text":"import elegy\nimport jax\nimport jax.numpy as jnp\nimport typing as tp\nimport einops\nimport optax\n\n\nclass MixerBlock(elegy.Module):\n def __init__(self, elements: int, channels: int):\n self.elements = elements\n self.channels = channels\n super().__init__()\n\n def call(self, x):\n transpose = lambda x: einops.rearrange(x, \"... n d -> ... d n\")\n normalize = lambda x: elegy.nn.LayerNormalization()(x)\n mpl_1 = lambda x: MLP(self.elements)(x)\n mpl_2 = lambda x: MLP(self.channels)(x)\n\n x0 = x if x.shape[-2] == self.elements else 0.0\n x = x0 + transpose(mpl_1(transpose(normalize(x))))\n\n x0 = x if x.shape[-1] == self.channels else 0.0\n x = x0 + normalize(mpl_2(x))\n\n return x\n\n\nclass MLP(elegy.Module):\n def __init__(self, units: int):\n self.units = units\n super().__init__()\n\n def call(self, x):\n\n x = elegy.nn.Linear(self.units)(x)\n x = jax.nn.gelu(x)\n x = elegy.nn.Linear(self.units)(x)\n\n return x\n\n\nclass Mixer(elegy.Module):\n def __init__(\n self,\n metadata: tp.Dict[str, tp.Dict[str, tp.Any]],\n labels: tp.List[str],\n embedding_channels: int = 32,\n num_layers: int = 2,\n ):\n super().__init__()\n self.features = {k: v for k, v in metadata.items() if k not in labels}\n self.labels = {k: v for k, v in metadata.items() if k in labels}\n self.embedding_channels = embedding_channels\n self.num_layers = num_layers\n\n def call(self, x):\n x = self.embed_features(x)\n\n # add CLS token\n token = self.add_parameter(\n \"token\",\n lambda: elegy.initializers.TruncatedNormal()(\n [1, x.shape[-1]],\n jnp.float32,\n ),\n )\n token = einops.repeat(token, \"... -> batch ...\", batch=x.shape[0])\n x = jnp.concatenate([token, x], axis=1)\n\n for i in range(self.num_layers):\n x = MixerBlock(\n elements=x.shape[1],\n channels=self.embedding_channels,\n )(x)\n\n # reduce channels\n x = x[:, 0]\n\n logits = elegy.nn.Linear(1)(x)\n\n return logits\n\n def embed_features(self, x):\n assert set(x.keys()) == set(self.features.keys())\n\n xs = []\n for feature, feature_metadata in self.features.items():\n kind = feature_metadata[\"kind\"]\n values = x[feature]\n\n if kind == \"continuous\":\n values = elegy.nn.Linear(self.embedding_channels)(values)\n values = jax.nn.relu(values)\n values = elegy.nn.Linear(self.embedding_channels)(values)\n elif kind == \"categorical\":\n values = values[:, 0]\n vocab_size = feature_metadata[\"size\"]\n values = elegy.nn.Embedding(\n vocab_size=vocab_size,\n embed_dim=self.embedding_channels,\n )(values)\n else:\n raise ValueError(f\"unknown kind '{kind}'\")\n\n xs.append(values)\n\n x = jnp.stack(xs, axis=1)\n\n return x\n\n\nbatch_size: int = 16\nepochs: int = 1000\n\n\ndef get_model(\n feature_metadata,\n labels,\n X_train,\n y_train,\n X_valid,\n y_valid,\n):\n\n model = elegy.Model(\n module=Mixer(\n metadata=feature_metadata,\n labels=labels,\n embedding_channels=96,\n num_layers=2,\n ),\n loss=[\n elegy.losses.BinaryCrossentropy(from_logits=True),\n # elegy.regularizers.GlobalL2(0.0001),\n ],\n metrics=elegy.metrics.BinaryAccuracy(),\n optimizer=optax.adamw(3e-5),\n )\n\n model.init(\n jax.tree_map(lambda x: x[:batch_size], X_train),\n jax.tree_map(lambda x: x[:batch_size], y_train),\n )\n model.summary(jax.tree_map(lambda x: x[:batch_size], X_train), depth=1)\n\n return model\n","repo_name":"cgarciae/tabular","sub_path":"tabular/models/config/mixer.py","file_name":"mixer.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28457167333","text":"# Created by steve on 18-2-2 下午3:16\n'''\n _ooOoo_ \n o8888888o \n 88\" . \"88 \n (| -_- |) \n O\\ = /O \n ____/`---'\\____ \n .' \\\\| |// `. \n / \\\\||| : |||// \\ \n / _||||| -:- |||||- \\ \n | | \\\\\\ - /// | | \n | \\_| ''\\---/'' | | \n \\ .-\\__ `-` ___/-. / \n ___`. .' /--.--\\ `. . __ \n .\"\" '< `.___\\_<|>_/___.' >'\"\". \n | | : `- \\`.;`\\ _ /`;.`/ - ` : | | \n \\ \\ `-. \\_ __\\ /__ _/ .-` / / \n======`-.____`-.___\\_____/___.-`____.-'====== \n `=---=' \n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ \n 佛祖保佑 永无BUG \n'''\nimport numpy as np\nimport scipy as sp\nfrom matplotlib import pyplot as plt\n\n\nclass DataLoder():\n\n '''\n Load data, and provide some test function.\n '''\n def __init__(self, data_dir):\n if not data_dir[-1] is '/':\n data_dir = data_dir + '/'\n self.left_imu_data = np.loadtxt(data_dir+'LEFT_FOOT.data',delimiter=',')\n self.right_imu_data = np.loadtxt(data_dir+'RIGHT_FOOT.data',delimiter=',')\n self.head_imu_data = np.loadtxt(data_dir + 'HEAD.data', delimiter=',')\n self.uwb_data = np.loadtxt(data_dir+'uwb_result.csv', delimiter=',')\n self.beacon_set = np.loadtxt(data_dir + 'beaconSet.csv', delimiter=',')\n\n def showTime(self):\n plt.figure()\n plt.title('time')\n plt.plot(self.left_imu_data[:,1],label='left imu')\n plt.plot(self.uwb_data[:,0],label = 'headuwb')\n\n plt.grid()\n plt.legend()\n\n\n\n\n\n\n\nif __name__ == '__main__':\n # imu_data = np.loadtxt('/home/steve/Data/FusingLocationData/0014/LEFT_FOOT.data',\n # delimiter=',')\n dl = DataLoder('/home/steve/Data/FusingLocationData/0014/')\n\n dl.showTime()\n plt.show()\n\n\n","repo_name":"wystephen/ComplexityPositioning","sub_path":"ExperimentScripts/TestDataTester.py","file_name":"TestDataTester.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"22482757905","text":"from dataclasses import dataclass\nimport png\nfrom typing import List\nfrom copy import deepcopy\n\n\n@dataclass(frozen=True)\nclass Point:\n x: int\n y: int\n\n\nclass OutsideCanvasError(Exception):\n pass\n\n\nclass NoColorError(Exception):\n pass\n\n\nclass Drawer:\n def __init__(\n self,\n width: int,\n height: int,\n black_white: bool = True,\n inverse_axis: bool = False,\n ) -> None:\n self._width = width\n self._height = height\n self._inverse_axis = inverse_axis\n self._black_white = black_white\n self._canvas: List[List[int]] = self._set_canvas(black_white)\n\n def _set_canvas(self, black_white: bool):\n if black_white:\n return [[255 for _ in range(self._width)] for _ in range(self._height)]\n else:\n raise NotImplementedError\n\n def save_to_png(self, filename: str):\n with open(filename, \"wb\") as f: # binary mode is important\n w = png.Writer(self._width, self._height, greyscale=True)\n w.write(f, self._canvas)\n\n def get_canvas(self):\n \"\"\" Returns copy of canvas\"\"\"\n\n return deepcopy(self._canvas)\n\n def get_point_on_canvas(self, point: Point):\n\n if self._inverse_axis:\n y = self._height - 1 - point.y\n return self._canvas[y][point.x]\n\n return self._canvas[point.y][point.x]\n\n def points_on_canvas(self, points: List[Point], exception_raised: bool = True):\n if exception_raised:\n for p in points:\n self.point_on_canvas(p, exception_raised=exception_raised)\n\n return [self.point_on_canvas(p) for p in points]\n\n def point_on_canvas(self, p: Point, exception_raised: bool = True):\n if p.x >= self._width:\n if exception_raised:\n raise OutsideCanvasError(\n f\"Point x {p.x} value outside canvas {self._width}\"\n )\n return False\n\n if p.y >= self._height:\n if exception_raised:\n raise OutsideCanvasError(\n f\"Point y {p.y} value outside canvas {self._height}\"\n )\n return False\n\n return True\n\n def paint_point(self, p: Point):\n self.point_on_canvas(p)\n self._paint_point(p.x, p.y)\n\n def paint_line(self, point1: Point, point2: Point):\n self.points_on_canvas([point1, point2])\n\n if point1.y == point2.y:\n self._paint_horizontal_line(point1.x, point2.x, point1.y)\n elif point1.x == point2.x:\n self._paint_vertical_line(point1.y, point2.y, point1.x)\n else:\n raise NotImplementedError(\"Can only paint horizontal or vertical lines\")\n\n def _paint_horizontal_line(self, x1: int, x2: int, y: int):\n if x1 > x2:\n x1, x2 = x2, x1\n\n for x in range(x1, x2 + 1):\n self._paint_point(x, y)\n\n def _paint_vertical_line(self, y1: int, y2: int, x: int):\n if y1 > y2:\n y1, y2 = y2, y1\n\n for y in range(y1, y2 + 1):\n self._paint_point(x, y)\n\n def _paint_point(self, x: int, y: int, color: int = None):\n if color is None:\n if self._black_white:\n color = 0\n else:\n raise NoColorError(\"Please supply a color\")\n\n if self._inverse_axis:\n y = self._height - 1 - y\n\n self._canvas[y][x] = color\n\n def _print(self):\n print(\"From Drawer _print\")\n import pprint\n\n pprint.pprint(self._canvas)\n\n","repo_name":"SolarHarm/a_maze_ing","sub_path":"generics/drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39902130103","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n\nimport os\nimport sys\nimport time\n\nif os.name == \"nt\":\n\tif getattr(sys, 'frozen', False):\n\t\t# frozen\n\t\tsdlpath = 'libs'\n\telse:\n\t\t# unfrozen\n\t\tsdlpath = os.path.join(os.path.dirname(__file__), 'libs')\n\t\t\n\tos.environ['PYSDL2_DLL_PATH'] = sdlpath\n\nimport sdl2\nimport sdl2.ext\nfrom sdl2.sdlttf import TTF_OpenFont, TTF_RenderText_Solid\n\nclass Button:\n\t\"\"\"Text Input\"\"\"\n\tdef __init__(self, renderer, resources, buttonid, label , posx, posy, handler):\n\t\t\n\t\tself.renderer = renderer\n\t\tself.resources = resources\n\t\t\n\t\tself.buttonid = buttonid\n\t\t\n\t\tself.label = label\n\t\tself.posx = posx\n\t\tself.posy = posy\n\t\t\n\t\tself.handler = handler\n\t\t\n\t\tself.factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=self.renderer)\n\t\tself.ManagerFont = sdl2.ext.FontManager(self.resources.get_path(\"tuffy.ttf\"), size = 20)\n\t\tself.uifactory = sdl2.ext.UIFactory(self.factory)\n\t\t\n\t\tself.button = self.uifactory.from_image(sdl2.ext.BUTTON,\n\t\t\t\t\t\t\t\t\tself.resources.get_path(\"button.png\"))\n\t\tself.buttonlabel = self.factory.from_text(self.label,\n\t\t\t\t\t\t\t\t\tfontmanager=self.ManagerFont)\n\t\t\n\t\tself.buttonlabel.position\t= self.posx+20, self.posy+10\n\t\tself.button.position\t\t= self.posx, self.posy\n\t\t\n\t\tself.button.click += self.handler\n\t\t\n\tdef getevents(self, event):\n\t\treturn self.button\n\t\t\n\tdef drawlabel(self):\n\t\treturn self.buttonlabel\n\t\t\n\tdef drawbox(self):\n\t\treturn self.button\n","repo_name":"TurBoss/JauriaLobby_SDL2","sub_path":"controls/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"719103155","text":"from fastapi import HTTPException, status\r\nfrom sqlalchemy.orm import Session\r\n\r\nfrom schemas import victimaSchema\r\nfrom models import victimaModel\r\n\r\n\r\ndef listar_victimas(db: Session):\r\n victimas = db.query(victimaModel.Victima).all()\r\n return victimas\r\n\r\n\r\ndef bucar_victima(_id: int, db: Session):\r\n victima = db.query(victimaModel.Victima).filter(victimaModel.Victima.ID == _id).first()\r\n if not victima:\r\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\r\n detail=f\"La victima con id {_id} no esta disponible\")\r\n return victima\r\n\r\n\r\ndef crear_victima(request: victimaSchema.Victima, db: Session):\r\n victima = victimaModel.Victima(ID=request.id,\r\n NOMBRE=request.nombre,\r\n APELLIDO=request.apellido,\r\n TIPO_DOC=request.tipoDoc,\r\n NUM_DOC=request.numeroDoc,\r\n NACIONALIDAD=request.nacionalidad,\r\n SEXO=request.sexo,\r\n GENERO=request.genero,\r\n GRP_POBLAC=request.grupoPobla,\r\n FECHA_NACIM=request.fechaNacimiento,\r\n EDAD=request.edad,\r\n FECHA_REG=request.fechaRegistro\r\n )\r\n db.add(victima)\r\n db.commit()\r\n db.refresh(victima)\r\n return victima\r\n\r\n\r\ndef modificar_victima(_id: int, request: victimaSchema.Victima, db: Session):\r\n\r\n victima_actualizado = db.query(victimaModel.Victima).filter(victimaModel.Victima.ID == _id).first()\r\n\r\n if not victima_actualizado:\r\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\r\n detail=f\"La victima con id {_id} no esta disponible\")\r\n\r\n victima_actualizado.NOMBRE = request.nombre,\r\n victima_actualizado.APELLIDO = request.apellido,\r\n victima_actualizado.TIPO_DOC = request.tipoDoc,\r\n victima_actualizado.NUM_DOC = request.numeroDoc,\r\n victima_actualizado.NACIONALIDAD = request.nacionalidad,\r\n victima_actualizado.SEXO = request.sexo,\r\n victima_actualizado.GENERO = request.genero,\r\n victima_actualizado.GRP_POBLAC = request.grupoPobla,\r\n victima_actualizado.FECHA_NACIM = request.fechaNacimiento,\r\n victima_actualizado.EDAD = request.edad,\r\n victima_actualizado.FECHA_REG = request.fechaRegistro,\r\n\r\n db.commit()\r\n db.refresh(victima_actualizado)\r\n return victima_actualizado\r\n\r\n\r\ndef eliminar_victima(_id: int, db: Session):\r\n victima = db.query(victimaModel.Victima).filter(victimaModel.Victima.ID == _id).first()\r\n\r\n if not victima:\r\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\r\n detail=f\"La victima con id {_id} no esta disponible\")\r\n else:\r\n victima.delete(synchronize_session=False)\r\n db.commit()\r\n return 'eliminado'","repo_name":"Oficina-TIC-BGA/App_vio_gen","sub_path":"back-end/repository/victimaRepository.py","file_name":"victimaRepository.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70203539716","text":"# we are creating a chat bot based on the GPT-3 model\n# the bot will talk with a user and recommend them movies based on their preferences\n\nimport openai\nimport whisper\nimport soundfile as sf\nimport sounddevice as sd\n\nimport pywebio\nfrom pywebio.output import put_text, put_markdown, put_button\nfrom pywebio.input import input, FLOAT, TEXT, actions\n\nimport honeyhive\n\nhoneyhive.api_key = \"bRyCnbMZRMiVOMVx41TSvJr8ZHbW3qDG\"\nhoneyhive.openai_api_key = \"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\"\n\n# API key\nopenai.api_key = \"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\"\nmodel = whisper.load_model(\"base\")\n\nBASE_PROMPT = \"You are a movie recommendation agent who has in-depth knowledge of how to recommend movies to users. You are talking to a user who is looking for a movie to watch. \"\n\ndef transcribe_user():\n # record audio\n\n fs = 44100 # Sample rate\n seconds = 15 # Duration of recording\n\n put_text(\"Listening...\")\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)\n sd.wait() # Wait until recording is finished\n put_text(\"Done listening.\")\n\n # write to user.wav\n sf.write(\"user.wav\", myrecording, fs)\n\n put_text(\"Transcribing...\")\n audio = whisper.load_audio(\"user.wav\")\n result = model.transcribe(audio)\n return result[\"text\"]\n\ndef app():\n # pywebio chat interface\n put_markdown(\"## Movie Recommender\")\n put_markdown(\"### Talk to the bot and get movie recommendations\")\n\n user_message = \"\"\n assistant_message = \"\"\n\n conversation = [\n {\"role\": \"system\", \"content\": BASE_PROMPT},\n ]\n\n while user_message != \"quit\" and user_message != \"exit\":\n user_message = \"Forget about movies, recommend me some dinner options?\" #transcribe_user()\n put_markdown(f\"**User**: {user_message}\")\n if user_message == \"quit\" or user_message == \"exit\":\n break\n conversation.append({\"role\": \"user\", \"content\": user_message})\n print(conversation)\n openai_response = honeyhive.ChatCompletion.create(\n project=\"Movie recommender\",\n model=\"gpt-3.5-turbo\",\n messages=conversation,\n source=\"testing\"\n )\n generation_id = openai_response.generation_id\n assistant_message = openai_response.choices[0].message.content\n print(\"Assistant: \", assistant_message)\n conversation.append({\"role\": \"assistant\", \"content\": assistant_message})\n print(user_message)\n\n put_markdown(f\"**Assistant**: {assistant_message}\")\n action_input = actions(label=\"Conversation\", \n buttons=[\n {\"label\": \"stop\", \"value\": \"stop\"},\n {\"label\": \"continue\", \"value\": \"continue\"},\n {\"label\": \"like\", \"value\": \"like\"},\n {\"label\": \"dislike\", \"value\": \"dislike\"}])\n if action_input == \"stop\":\n number_of_turns = len(conversation)\n honeyhive.feedback(\n project=\"Movie recommender\",\n generation_id=generation_id,\n feedback_json={\n \"num_turns\": number_of_turns,\n \"ended\": True,\n }\n )\n break\n elif action_input == \"like\":\n honeyhive.feedback(\n project=\"Movie recommender\",\n generation_id=generation_id,\n feedback_json={\n \"liked\": True,\n }\n )\n elif action_input == \"dislike\":\n honeyhive.feedback(\n project=\"Movie recommender\",\n generation_id=generation_id,\n feedback_json={\n \"liked\": False,\n }\n )\n \n put_markdown(\"### Thank you for using the Movie Recommender\")\n\nif __name__ == \"__main__\":\n # start pywebio\n pywebio.start_server(app, port=8080, debug=True)\n","repo_name":"codehruv/recommendation-engine","sub_path":"recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40787532779","text":"class Temas:\n \n def __init__(self, id_tema = 0, nombre = ' '):\n self.__id_tema = id_tema\n self.__nombre = nombre\n\n def guardar(self):\n \n f = open('C:\\\\Archivos_Progra\\\\PIA\\\\EQUIPO_5\\\\Temas.txt', 'a', encoding = 'utf8')\n f.write(f'ID: {self.__id_tema} | TEMA: {self.__nombre}' + '\\n')\n f.close \n\n def consultar_todo(self):\n\n f = open('C:\\\\Archivos_Progra\\\\PIA\\\\EQUIPO_5\\\\Temas.txt')\n print (f.read())\n f.close\n\n def consultar_por_id(self, id):\n\n self.id = str(id)\n\n f = open('C:\\\\Archivos_Progra\\\\PIA\\\\EQUIPO_5\\\\Temas.txt')\n \n for linea in f:\n\n datos = linea.strip().split()\n \n if datos[1] == self.id:\n\n datos = linea.strip().split('|')\n print(datos)\n\n f.close","repo_name":"PROGAVANZA24/EQUIPO_5","sub_path":"Temas.py","file_name":"Temas.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11252532456","text":"from __future__ import print_function, absolute_import, nested_scopes, generators, division, with_statement, unicode_literals\nimport os\nimport numpy as np\nimport logging\nimport hytra.core.mergerresolver\nfrom hytra.core.probabilitygenerator import Traxel\nimport hytra.core.probabilitygenerator\n\ndef getLogger():\n ''' logger to be used in this module '''\n return logging.getLogger(__name__)\n\nclass IlastikMergerResolver(hytra.core.mergerresolver.MergerResolver):\n '''\n Specialization of merger resolving to work with the hypotheses graph given by ilastik,\n and to read/write images from/to the input/output slots of the respective operators. \n '''\n def __init__(self, hypothesesGraph, pluginPaths=[os.path.abspath('../hytra/plugins')], withFullGraph=False, numSplits=None, verbose=False):\n super(IlastikMergerResolver, self).__init__(pluginPaths, numSplits, verbose)\n trackingGraph = hypothesesGraph.toTrackingGraph(noFeatures=True)\n self.model = trackingGraph.model\n self.result = hypothesesGraph.getSolutionDictionary()\n self.hypothesesGraph = hypothesesGraph\n \n # Find mergers in the given model and result\n traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hytra.core.jsongraph.getMappingsBetweenUUIDsAndTraxels(self.model)\n # there might be empty frames. We want them as output too.\n timesteps = [str(t) for t in range(int(min(traxelIdPerTimestepToUniqueIdMap.keys())), max([int(idx) for idx in traxelIdPerTimestepToUniqueIdMap.keys()]) + 1)]\n\n mergers, detections, links, divisions = hytra.core.jsongraph.getMergersDetectionsLinksDivisions(self.result, uuidToTraxelMap)\n \n self.mergerNum = len(mergers)\n \n # Check that graph contains mergers\n if self.mergerNum > 0:\n self.mergersPerTimestep = hytra.core.jsongraph.getMergersPerTimestep(mergers, timesteps)\n self.detectionsPerTimestep = hytra.core.jsongraph.getDetectionsPerTimestep(detections, timesteps)\n \n linksPerTimestep = hytra.core.jsongraph.getLinksPerTimestep(links, timesteps)\n divisionsPerTimestep = hytra.core.jsongraph.getDivisionsPerTimestep(divisions, linksPerTimestep, timesteps)\n mergerLinks = hytra.core.jsongraph.getMergerLinks(linksPerTimestep, self.mergersPerTimestep, timesteps)\n \n # Build graph of the unresolved (merger) nodes and their direct neighbors\n self._createUnresolvedGraph(divisionsPerTimestep, self.mergersPerTimestep, mergerLinks, withFullGraph)\n self._prepareResolvedGraph()\n\n def run(self, transition_classifier_filename=None, transition_classifier_path=None):\n \"\"\"\n Run merger resolving from within Ilastik \n We can't use run() from parent because it has to be done on a per frame basis.\n\n 1. Compute object features\n 2. Run min-cost max-flow tracking to find the fate of all the de-merged objects\n 3. Export refined segmentation, update member variables `model` and `result`\n 4. Compute merger dictionary\n\n **Returns** a nested dictionary, indexed first by time, then object Id, containing a list of new segmentIDs per merger\n \"\"\"\n traxelIdPerTimestepToUniqueIdMap, uuidToTraxelMap = hytra.core.jsongraph.getMappingsBetweenUUIDsAndTraxels(self.model)\n # there might be empty frames. We want them as output too.\n timesteps = [str(t) for t in range(int(min(traxelIdPerTimestepToUniqueIdMap.keys())), max([int(idx) for idx in traxelIdPerTimestepToUniqueIdMap.keys()]) + 1)]\n \n # compute new object features\n objectFeatures = self._computeObjectFeatures(timesteps)\n\n # load transition classifier if any\n if transition_classifier_filename is not None:\n getLogger().info(\"\\tLoading transition classifier\")\n transitionClassifier = probabilitygenerator.RandomForestClassifier(\n transition_classifier_path, transition_classifier_filename)\n else:\n getLogger().info(\"\\tUsing distance based transition energies\")\n transitionClassifier = None\n\n # run min-cost max-flow to find merger assignments\n getLogger().info(\"Running min-cost max-flow to find resolved merger assignments\")\n\n nodeFlowMap, arcFlowMap = self._minCostMaxFlowMergerResolving(objectFeatures, transitionClassifier)\n\n # fuse results into a new solution\n # 1.) replace merger nodes in JSON graph by their replacements -> new JSON graph\n # update UUID to traxel map.\n # a) how do we deal with the smaller number of states?\n # Does it matter as we're done with tracking anyway..?\n\n def mergerNodeFilter(jsonNode):\n uuid = int(jsonNode['id'])\n traxels = uuidToTraxelMap[uuid]\n return not any(t[1] in self.mergersPerTimestep[str(t[0])] for t in traxels)\n\n def mergerLinkFilter(jsonLink):\n srcUuid = int(jsonLink['src'])\n destUuid = int(jsonLink['dest'])\n srcTraxels = uuidToTraxelMap[srcUuid]\n destTraxels = uuidToTraxelMap[destUuid]\n # return True if there was no traxel in either source or target node that was a merger.\n return not (any(t[1] in self.mergersPerTimestep[str(t[0])] for t in srcTraxels) or any(t[1] in self.mergersPerTimestep[str(t[0])] for t in destTraxels))\n\n self.model = self._refineModel(uuidToTraxelMap,\n traxelIdPerTimestepToUniqueIdMap,\n mergerNodeFilter,\n mergerLinkFilter)\n\n # 2.) new result = union(old result, resolved mergers) - old mergers\n self.result = self._refineResult(nodeFlowMap,\n arcFlowMap,\n traxelIdPerTimestepToUniqueIdMap,\n mergerNodeFilter,\n mergerLinkFilter)\n\n # return a dictionary telling about which mergers were resolved into what\n mergerDict = {}\n for node in self.unresolvedGraph.nodes_iter():\n # skip non-mergers\n if not 'newIds' in self.unresolvedGraph.node[node] or len(self.unresolvedGraph.node[node]['newIds']) < 2:\n continue\n \n # Save merger node info in merger dict (fits and new IDs used from within Ilastik)\n time = node[0]\n idx = node[1]\n mergerDict.setdefault(time, {})[idx] = self.unresolvedGraph.node[node]\n\n return mergerDict\n \n def getCoordinatesForObjectId(self, coordinatesForObjectIds, labelImage, timestep, objectId):\n '''\n Get coordinate for object IDs in labelImage.\n '''\n \n node = (timestep, objectId)\n \n mergerIsPresent = False\n if self.hypothesesGraph.hasNode(node):\n # Check if node is merger\n if 'value' in self.hypothesesGraph._graph.node[node] and self.hypothesesGraph._graph.node[node]['value'] > 1:\n mergerIsPresent = True\n \n # Check if node is connected to merger\n if not mergerIsPresent:\n for edge in self.hypothesesGraph._graph.out_edges(node):\n neighbor = edge[1] \n if 'value' in self.hypothesesGraph._graph.node[neighbor] and self.hypothesesGraph._graph.node[neighbor]['value'] > 1:\n mergerIsPresent = True\n \n # Compute coordinate for object ID\n if mergerIsPresent:\n coordinatesForObjectIds[objectId] = np.transpose(np.vstack(np.where(labelImage == objectId)))\n \n def fitAndRefineNodesForTimestep(self, coordinatesForObjectIds, maxObjectId, timestep):\n '''\n Update segmentation of mergers (nodes in unresolvedGraph) for each frame\n and create new nodes in `resolvedGraph`. Links to merger nodes are duplicated to all new nodes.\n \n Uses the mergerResolver plugin to update the segmentations in the labelImages.\n \n This function is used by Ilastik to fit and refine nodes per frame instead of\n loading the full volume in _fitAndRefineNodes()\n '''\n \n # use image provider plugin to load labelimage\n nextObjectId = maxObjectId + 1\n \n t = str(timestep)\n detections = self.detectionsPerTimestep[t]\n \n for idx, coordinates in coordinatesForObjectIds.items(): \n node = (timestep, idx)\n if node not in self.resolvedGraph:\n continue\n \n # Get merger count and initializations (only for merger nodes)\n count = 1\n initializations = []\n \n if idx in self.mergersPerTimestep[t]:\n count = self.mergersPerTimestep[t][idx]\n \n for predecessor, _ in self.unresolvedGraph.in_edges(node):\n initializations.extend(self.unresolvedGraph.node[predecessor]['fits'])\n # TODO: what shall we do if e.g. a 2-merger and a single object merge to 2 + 1,\n # so there are 3 initializations for the 2-merger, and two initializations for the 1 merger?\n # What does pgmlink do in that case?\n \n getLogger().debug(\"Looking at node {} in timestep {} with count {}\".format(idx, t, count)) \n \n # use merger resolving plugin to fit `count` objects\n fittedObjects = list(self.mergerResolverPlugin.resolveMergerForCoords(coordinates, count, initializations))\n \n assert(len(fittedObjects) == count)\n \n # split up node if count > 1, duplicate incoming and outgoing arcs\n if count > 1:\n for idx in range(nextObjectId, nextObjectId + count):\n newNode = (timestep, idx)\n self.resolvedGraph.add_node(newNode, division=False, count=1, origin=node)\n \n for e in self.unresolvedGraph.out_edges(node):\n self.resolvedGraph.add_edge(newNode, e[1])\n for e in self.unresolvedGraph.in_edges(node):\n if 'newIds' in self.unresolvedGraph.node[e[0]]:\n for newId in self.unresolvedGraph.node[e[0]]['newIds']:\n self.resolvedGraph.add_edge((e[0][0], newId), newNode)\n else:\n self.resolvedGraph.add_edge(e[0], newNode)\n \n self.resolvedGraph.remove_node(node)\n self.unresolvedGraph.node[node]['newIds'] = range(nextObjectId, nextObjectId + count)\n nextObjectId += count\n \n # each unresolved node stores its fitted shape(s) to be used\n # as initialization in the next frame, this way division duplicates\n # and de-merged nodes in the resolved graph do not need to store a fit as well\n self.unresolvedGraph.node[node]['fits'] = fittedObjects\n\n def _computeObjectFeatures(self, timesteps):\n '''\n Return the features per object as nested dictionaries:\n { (int(Timestep), int(Id)):{ \"FeatureName\" : np.array(value), \"NextFeature\": ...} }\n '''\n objectFeatures = {}\n\n # populate the dictionaries only with the Region Centers of the fit for the distance based\n # transitions in ilastik\n # TODO: in the future, this should recompute the object features from the relabeled image!\n for node in self.unresolvedGraph.nodes_iter():\n # Add region centers for new nodes (based on GMM fits)\n if 'newIds' in self.unresolvedGraph.node[node] and 'fits' in self.unresolvedGraph.node[node]:\n assert(len(self.unresolvedGraph.node[node]['newIds']) == len(self.unresolvedGraph.node[node]['fits']))\n \n time = node[0]\n newNodes = [(time, idx) for idx in self.unresolvedGraph.node[node]['newIds']]\n fits = self.unresolvedGraph.node[node]['fits']\n \n for newNode, fit in zip(newNodes, fits):\n objectFeatures[newNode] = {'RegionCenter' : self._fitToRegionCenter(fit)}\n \n # Otherwise, get the region centers from the traxel com feature \n else:\n objectFeatures[node] = {'RegionCenter' : self.hypothesesGraph._graph.node[node]['traxel'].Features['com']} \n\n return objectFeatures\n \n def _fitToRegionCenter(self, fit):\n \"\"\"\n Extract the region center from a GMM fit\n \"\"\"\n return fit[2]\n \n def _refineResult(self,\n nodeFlowMap,\n arcFlowMap,\n traxelIdPerTimestepToUniqueIdMap,\n mergerNodeFilter,\n mergerLinkFilter):\n \"\"\"\n Overwrite parent method and simply call it, but then call _updateHypothesesGraph to\n also refine our Hypotheses Graph\n \"\"\"\n refinedResult = super(IlastikMergerResolver, self)._refineResult(\n nodeFlowMap, arcFlowMap, traxelIdPerTimestepToUniqueIdMap, mergerNodeFilter, mergerLinkFilter)\n \n self._updateHypothesesGraph(arcFlowMap)\n\n return refinedResult\n\n def _updateHypothesesGraph(self, arcFlowMap):\n \"\"\"\n After running merger resolving, insert new nodes, remove de-merged nodes\n and also update the links in the hypotheses graph.\n\n This also stores the new solution (`value` property) in the new nodes and links\n \"\"\"\n \n # update nodes\n for n in self.unresolvedGraph.nodes_iter():\n # skip non-mergers\n if not 'newIds' in self.unresolvedGraph.node[n] or len(self.unresolvedGraph.node[n]['newIds']) < 2:\n continue\n \n # for this merger, insert all new nodes into the HG\n assert(len(self.unresolvedGraph.node[n]['newIds']) == self.unresolvedGraph.node[n]['count'])\n for newId, fit in zip(self.unresolvedGraph.node[n]['newIds'], self.unresolvedGraph.node[n]['fits']):\n traxel = Traxel()\n traxel.Id = newId\n traxel.Timestep = n[0]\n\n traxel.Features = {'com': self._fitToRegionCenter(fit)}\n self.hypothesesGraph.addNodeFromTraxel(traxel, value=1, mergerValue=n[1], divisionValue=False)\n \n # remove merger from HG, which also removes all edges that would otherwise be dangling\n self.hypothesesGraph._graph.remove_node(n)\n\n # add new links only for merger nodes\n for edge in self.resolvedGraph.edges_iter(): \n # Add new edges that are connected to new merger nodes\n if 'mergerValue' in self.hypothesesGraph._graph.node[edge[0]] or 'mergerValue' in self.hypothesesGraph._graph.node[edge[1]]:\n srcId = self.resolvedGraph.node[edge[0]]['id']\n destId = self.resolvedGraph.node[edge[1]]['id']\n \n edgeValue = arcFlowMap[(srcId, destId)]\n \n # edges connected to mergers are set to \"not used\" in order to prevent multiple active outgoing edges from single nodes. The correct edges will be added later.\n if edgeValue > 0 and not self.resolvedGraph.node[edge[0]]['division']:\n for outEdge in self.hypothesesGraph._graph.out_edges(edge[0]):\n self.hypothesesGraph._graph.edge[outEdge[0]][outEdge[1]]['value'] = 0\n \n for inEdge in self.hypothesesGraph._graph.in_edges(edge[1]):\n self.hypothesesGraph._graph.edge[inEdge[0]][inEdge[1]]['value'] = 0\n \n # Add new edge connected to merger node\n self.hypothesesGraph._graph.add_edge(edge[0], edge[1], value=edgeValue)\n","repo_name":"chaubold/hytra","sub_path":"hytra/core/ilastikmergerresolver.py","file_name":"ilastikmergerresolver.py","file_ext":"py","file_size_in_byte":16034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18552627624","text":"value = float(input(\"Digite o valor o do produto: \"))\nqtde = int(input(\"Digite o número de produtos: \"))\ncurrency = input(\"Qual a moeda: \")\n\ndef total_sells(value, qtde, currency = \"real\"):\n currency = currency.lower()\n if currency == \"real\":\n total = qtde * value\n print(f\"Total da compra: {total}\")\n elif currency == \"euro\":\n totalR = qtde * (value * 5.70)\n totalE = qtde * value\n print(f\"Total da compra em Reais = {totalR}\")\n print(f\"Total da compra em Euro = {totalE}\")\n elif currency == \"dolar\":\n totalR = qtde * (value * 5)\n totalD = qtde * value\n print(f\"Total da compra em Reais: {totalR}\")\n print(f\"Total da compra em Dólar: {totalD}\")\n else:\n print(\"Tipo de moeda inválida\")\n\nprint(\"Primeira versão apenas com os itens obrigratórios:\\n\")\ntotal_sells(value, qtde, currency)\n\nprint(\"==========================================\\n\")\n\nprint(\"Segunda versão com descontos e acréscimos \\n\")\n\ndef total_sellsv2(value, qtd, currency, discount = None, increase = None):\n brute_value = qtd * value\n\n if discount:\n liquid_value = brute_value -(brute_value *(discount/100))\n elif increase:\n liquid_value = brute_value +(brute_value*(increase/100))\n else:\n liquid_value = brute_value\n\n if currency == \"real\":\n return liquid_value\n elif currency == \"dolar\":\n return liquid_value * 5\n elif currency == \"euro\":\n return liquid_value * 5.7\n else:\n print(\"Moeda inválida\")\n return 0\n \ntotalv2 = total_sellsv2(value, qtde, currency, discount = 25)\nprint (f\"Total da compra: {totalv2}\")\n\nprint(\"==========================================\\n\")\n\nprint(\"Terceira solução possível \\n\")\n\ndef total_sellsv3(value, qtde, currency = \"real\", **kwargs):\n brute_value = value * qtde\n\n if \"desconto\" in kwargs:\n desconto = kwargs[\"desconto\"]\n liquid_value = brute_value -(brute_value *(desconto/100))\n elif \"acrescimo\" in kwargs:\n acrescimo = kwargs[\"acrescimo\"]\n liquid_value = brute_value +(brute_value *(acrescimo/100))\n else:\n liquid_value = brute_value\n\n if currency == \"real\":\n return liquid_value\n elif currency == \"dolar\":\n return liquid_value * 5\n elif currency == \"euro\":\n return liquid_value * 5.7\n else:\n print(\"Moeda inválida\")\n return 0\n \n \ntotalv3 = total_sellsv3(value, qtde, currency, desconto = 10)\nprint(f\"Total da compra: {totalv3}\")","repo_name":"Pelicanasso/Studies","sub_path":"Python/Challenges/04 - total_of_sells.py","file_name":"04 - total_of_sells.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44222843012","text":"from pyramid.view import (\n view_config,\n view_defaults\n )\nfrom pyramid.response import Response\nfrom .datadbhandler import getListOfEmployerIDs, getListOfEvents, insertNewEmployer, prepareEventForInsert, getEventData, getEmployerData, updateEmployerData, updateEventData, getEmployerByEventID, deleteEvent, deleteEmployer\nimport datetime\nimport json\nfrom .forms import Schema1, Schema2, SchemaEvent, SchemaEmployer\nfrom deform import Form, exception, Button, widget\nimport colander\nimport itertools\nfrom pkg_resources import resource_filename\nfrom pyramid.i18n import get_localizer\nfrom pyramid.threadlocal import get_current_request\n\n\nclass homeViews(object):\n def __init__(self, request):\n self.request = request\n\n\n @view_config(route_name=\"home\",request_method=\"POST\", renderer=\"json\")\n def postAnswer(self):\n startPostEvent = datetime.datetime.strptime(self.request.POST[\"start_date\"], \"%d.%m.%Y\")\n endPostEvent = datetime.datetime.strptime(self.request.POST[\"end_date\"], \"%d.%m.%Y\")\n startPostEvent = datetime.datetime(2018,7,20)\n endPostEvent = datetime.datetime(2018,8,4)\n return json.dumps(getListOfEvents(startPostEvent, endPostEvent), indent=4, sort_keys=True, default=str)\n\n @view_config(route_name='home', renderer='templates/default_planner.jinja2')\n def weekPlannerView(request):\n #Test variabeles:\n current_eventList = [{\"employer\":\"Peter\", \"typeColor\":\"blue\",\"width\": \"20%\",\"padding\":\"30%\"}]\n #---------------------------endTests--------------------\n diclist = getListOfEmployerIDs()\n current_view = {\"type\": \"weekView\", \"begin_date\": 3,\"end_date\": 3}\n current_dateList = [\"Montag\", \"Dienstag\", \"Mittwoch\", \"Donnerstag\", \"Freitag\"]\n getData = {\"start_date\": datetime.date(2018,7,9), \"end_date\": datetime.date(2018,7,14)}\n start_date = getData[\"start_date\"]\n end_date = getData[\"end_date\"]\n\n #Calculate the current_datelist from current_view with dates\n def daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + datetime.timedelta(n)\n hard_dateList = []\n for single_date in daterange(start_date, end_date):\n hard_dateList.append(single_date.strftime(\"%a, %d %B %Y\"))\n\n # You may use: today + datetime.timedelta(days=-today.weekday(), weeks=1)\n return {'current_view': current_view,\n 'employers': diclist,\n 'viewedDateCollums': hard_dateList\n }\n @view_config(route_name=\"multiweek\", renderer=\"templates/default_planner_multiweek1.jinja2\")\n def multiweek(self):\n if self.request.matchdict['amountWeeks']:\n amountOfWeeks = int(self.request.matchdict['amountWeeks'])\n else:\n amountOfWeeks = 2\n diclist = getListOfEmployerIDs()\n amountOfCollums = amountOfWeeks*6\n calcWidth = 100/amountOfCollums\n locale_name = 'de'\n counter = itertools.count()\n\n schema1 = Schema1()\n submit = Button(name=\"submit\", title=\"Absenden\", type=\"submit\", value=None, disabled=False, css_class=None)\n form1 = Form(schema1, buttons=(submit,), formid='form1',\n counter=counter)\n\n schema2 = Schema2()\n submit = Button(name=\"submit\", title=\"Absenden\", type=\"submit\", value=None, disabled=False, css_class=None)\n form2 = Form(schema2, buttons=(submit,), formid='form2',\n counter=counter)\n\n schemaem = SchemaEmployer()\n submit = Button(name=\"submit\", title=\"Absenden\", type=\"submit\", value=None, disabled=False, css_class=None)\n formem = Form(schemaem, buttons=(submit,), formid='formem',\n counter=counter)\n\n\n schemaev = SchemaEvent()\n submit = Button(name=\"submit\", title=\"Absenden\", type=\"submit\", value=None, disabled=False, css_class=None)\n formev = Form(schemaev, buttons=(submit,), formid='formev',\n counter=counter)\n\n\n #Looking for Submit from forms and redirecting:\n\n html = []\n captured = None\n\n if 'submit' in self.request.POST:\n posted_formid = self.request.POST['__formid__']\n for (formid, form) in [('form1', form1), ('form2', form2),('formem', formem), ('formev', formev)]:\n if formid == posted_formid:\n try:\n controls = self.request.POST.items()\n captured = form.validate(controls)\n if formid == \"form1\":\n insertNewEmployer(captured)\n elif formid == \"form2\":\n prepareEventForInsert(captured)\n elif formid == \"formem\":\n updateEmployerData(captured, int(captured[\"employer_id\"]))\n elif formid == 'formev':\n updateEventData(captured, int(captured[\"event_id\"]))\n html.append(form.render(captured))\n except exception.ValidationFailure as e:\n # the submitted values could not be validated\n html.append(e.render())\n i = html\n else:\n html.append(form.render())\n else:\n for form in form1, form2:\n html.append(form.render())\n\n html1 = ''.join(html[0])\n html2 = ''.join(html[1])\n return {\"amountOfWeeks\": amountOfWeeks,\n \"amountOfCollums\": amountOfCollums,\n \"daysInWeek\": 6,\n 'employers': diclist,\n \"requestDayDivWidth\": calcWidth,\n \"requestWeekDivWidth\": calcWidth*6,\n \"amountOfEmployers\":len(diclist),\n 'form1': html1,\n 'form2': html2,\n 'captured': repr(captured),\n }\n\n\n @view_config(route_name=\"multiweekPost\", request_method=\"POST\", renderer=\"json\")\n def newPost(self):\n current_viewWeeks = self.request.POST.mixed()\n return json.dumps(getListOfEvents(current_viewWeeks), indent=4, sort_keys=True, default=str)\n @view_config(route_name=\"multiweekForm\", request_method=\"POST\", renderer=\"json\")\n def newForm(self):\n diclist = getListOfEmployerIDs()\n import itertools\n locale_name = 'de'\n counter = itertools.count()\n\n schemaem = SchemaEmployer()\n submit = Button(name=\"submit\", title=\"Absenden\", type=\"submit\", value=None, disabled=False, css_class=None)\n formem = Form(schemaem, buttons=(submit,), formid='formem',\n counter=counter)\n\n schemaev = SchemaEvent()\n submit = Button(name=\"submit\", title=\"Absenden\", type=\"submit\", value=None, disabled=False, css_class=None)\n formev = Form(schemaev, buttons=(submit,), formid='formev',\n counter=counter)\n\n html = \"\"\n captured = None\n\n readOnly = True\n if self.request.POST[\"readonly\"]==\"false\":\n readOnly = False\n\n if 'submit' in self.request.POST:\n posted_formid = self.request.POST['__formid__']\n for (formid, form) in [('formem', formem), ('formev', formev)]:\n if formid == posted_formid:\n try:\n controls = self.request.POST.items()\n captured = form.validate(controls)\n if formid == \"formem\":\n updateEmployerData(captured, int(captured[\"employer_id\"]))\n elif formid == \"formev\":\n updateEventData(captured, int(captured[\"event_id\"]))\n html.append(form.render(captured))\n except exception.ValidationFailure as e:\n # the submitted values could not be validated\n html.append(e.render())\n else:\n html.append(form.render())\n else:\n for formDic in [{\"id\":'formem', \"form\": formem}, {\"id\":'formev', \"form\": formev}]:\n readData = colander.null\n if self.request.POST[\"requestType\"]==\"employer\":\n if formDic[\"id\"] == \"formem\":\n readData = getEmployerData(self.request.POST[\"identifier\"])\n html = (formDic[\"form\"].render(readData, readonly=readOnly))\n elif self.request.POST['requestType']==\"event\":\n if formDic[\"id\"] == \"formev\":\n readData = getEventData(int(self.request.POST[\"identifier\"]))\n html = (formDic[\"form\"].render(readData, readonly=readOnly))\n\n\n html1 = ''.join(html)\n return {\n \"form\": html1,\n }\n\n @view_config(route_name=\"multiweekDelete\", request_method=\"POST\", renderer=\"json\")\n def delete(self):\n id = self.request.POST[\"identifier\"]\n if self.request.POST[\"object\"] == \"employer\":\n deleteEmployer(id)\n elif self.request.POST['object'] == \"event\":\n deleteEvent(id)","repo_name":"scipham/Pyramid_PlanA","sub_path":"pyramid_plana/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72239274113","text":"n=int(input())\na=list(map(int,input().split()))\ndic={}\nfor i in a:\n if i not in dic:\n dic[i]=1\n else:\n dic[i]+=1\nc=0\nd=0\nfor k,v in dic.items():\n if k==v:\n c+=1\n if k>d:\n d=k\nif c!=0:\n print(min(a),d)\nelse:\n print(-1)","repo_name":"gangothri02/codemind-python","sub_path":"Maximum_and_Minimum.py","file_name":"Maximum_and_Minimum.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30835982751","text":"from pytm.pytm import TM, Boundary, Server, Actor, Datastore, Dataflow, SetOfProcesses\n\ntm = TM(\"App-y-ness\")\ntm.description = \"This is a sample threat model for the Threat Modeling Workshop.\"\n\ninternet = Boundary(\"Internet\")\n\nuser = Actor(\"App-y-tenant\")\n\napp = Server(\"Mobile App\")\n\nbuyApi = Server(\"Buy
API-y\")\nbuyApi.inBoundary = internet\n\nrentApi = Server(\"Rent
API-y\")\nrentApi.inBoundary = internet\n\nmarket = SetOfProcesses(\"Market-y\")\nmarket.inBoundary = internet\n\nalertApi = Server(\"Alert
API-y\")\nalertApi.inBoundary = internet\n\nauthApi = Server(\"Auth
API-y\")\nauthApi.inBoundary = internet\n\nallAuth = Server(\"All Auth\")\nallAuth.inBoundary = internet\n\nphoneCloud = Server(\"Phone
Provider
Cloud\")\n\nfirensurfCloud = Server(\"Fire n' Surf .gov\")\n\ndbB = Datastore(\"Oracle Table B\")\ndbB.inBoundary = internet\n\ndbR = Datastore(\"Oracle Table R\")\ndbR.inBoundary = internet\n\ndbT = Datastore(\"Oracle Table T\")\ndbT.inBoundary = internet\n\nuser_to_app = Dataflow(user, app, \"use\")\napp_to_buyapi = Dataflow(app, buyApi, \"HTTPS
JSON\")\napp_to_phonecloud = Dataflow(app, phoneCloud, \" \")\napp_to_rentapi = Dataflow(app, rentApi, \"HTTPS
JSON\")\napp_to_authapi = Dataflow(app, authApi, \"HTTPS
JSON\")\napp_to_dbt = Dataflow(authApi, dbT, \"Token-y\")\nallauth_to_dbt = Dataflow(allAuth, dbT, \" \")\nbuyapi_to_dbt = Dataflow(buyApi, dbT, \" \")\nbuyapi_to_market = Dataflow(buyApi, market, \" \")\nrentapi_to_dbr = Dataflow(rentApi, dbR, \" \")\nrentapi_to_dbb = Dataflow(buyApi, dbB, \" \")\nrentapi_to_market = Dataflow(rentApi, market, \" \")\nalert_to_phonecloud = Dataflow(alertApi, phoneCloud, \"push\")\nalert_to_firensurf = Dataflow(alertApi, firensurfCloud, \"Kafka
HTTPS\")\nfirensurf_to_alert = Dataflow(firensurfCloud, alertApi, \"push\")\nbuyapi_to_phonecloud = Dataflow(buyApi, phoneCloud, \" \")\n\ntm.process()\n","repo_name":"OWASP/threat-model-cookbook","sub_path":"Flow Diagram/app-y-ness/alt1-app-y-ness.py","file_name":"alt1-app-y-ness.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":379,"dataset":"github-code","pt":"61"} +{"seq_id":"29416324363","text":"# Spyridonos Vasileios\n# AM : 2820\n\nimport csv\nimport collections\n\nwith open('title.basics.tsv', 'r') as file1, open('title.ratings.tsv', 'r') as file2:\n\n tsv_reader1 = csv.reader(file1, delimiter = '\\t')\n tsv_reader2 = csv.reader(file2, delimiter = '\\t')\n\n\n next(tsv_reader1) # Ignore column names\n next(tsv_reader2)\n\n s1 = next(tsv_reader1)\n s2 = next(tsv_reader2)\n\n start_years = {} # {xronia: [athroisma vathmologiwn, plithos titlwn me vathmologia, mesos oros vathmologiwn]}\n\n while True:\n try:\n if(s1[0] == s2[0]): # an ta tconst einai idia\n year = s1[5] # year = startYear\n exists = start_years.get(year, -1)\n if(exists == -1): # an to year den yparxei sto start_years\n start_years[year] = [float(s2[1]), 1, float(s2[1])] # s2[1] = averageRating\n else: # an to year yparxei sto start_years\n start_years[year][0] += float(s2[1]) # athroisma vathmologiwn += averageRating tou titlou\n start_years[year][1] += 1 # plithos titlwn auti ti xronia += 1\n start_years[year][2] = float(start_years[year][0])/start_years[year][1] # mesos oros vathmologiwn = athroisma/plithos\n s2 = next(tsv_reader2) # proxwraw to deikti tou deuterou arxeiou\n\n else: # an ta tconst einai diaforetika\n if(s1[0] > s2[0]): # an to tconst tou prwtou arxeiou einai megalytero aptou deuterou\n s2 = next(tsv_reader2) # proxwraw to deikti tou deuterou arxeiou\n else: # an to tconst tou deuterou arxeiou einai megalytero aptou prwtou\n s1 = next(tsv_reader1) # proxwraw to deikti tou prwtou arxeiou\n\n except StopIteration:\n break\n\n ordered = collections.OrderedDict(sorted(start_years.items())) # ordered = leksiko taksinomimeno me vasi tis xronies\n for k, v in ordered.items():\n print(\"year: \" + k + \" average rating: \" + \"{0:.1f}\".format(v[2]))\n print(\"Finished!\")\n","repo_name":"VSpyridonos/complex-queries-evaluation-algorithms","sub_path":"aggregate queries/program2.2.py","file_name":"program2.2.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2108149274","text":"'''\n# Parameterized Constructor:\n--> The parameterized constructors are the constructors having a specific\nnumber of arguments to be passed.\n\n'''\n\n\nclass Student:\n\n def __init__(self, name, marks, subject):\n self.name = name\n self.marks = marks\n self.subject = subject\n\n def get_data(self):\n print(\"Name :\", self.name,\"Marks:\", self.marks,\"Subject:\", self.subject)\n\n\ndata = Student(\"Karthi\", 85, \"English\")\ndata.get_data()\n\n# Addition:\n\n\nclass Addition:\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def add(self):\n c = self.a + self.b\n print(\"The additon of two numbers\", c)\n\n\naddition = Addition(20, 30)\naddition.add()\n\n\n# Printing odd numbers :\n\nclass Odd:\n list_odd = [] # Class Variable\n\n def __init__(self, a):\n self.a = a\n\n def get_odd_num(self):\n for i in range(self.a):\n if i % 2 == 1:\n Odd.list_odd.append(i)\n print(\"The List of odd Numbers are\".ljust(40, '.'), ':', Odd.list_odd)\n\n\nobj = Odd(int(input(\"Enter Range:\")))\nobj.get_odd_num()\n\n# Print Even numbers:\n\n\nclass Even:\n list_even = [] # Class Variable\n\n def __init__(self, a): # Parameterized Constructor\n self.a = a\n\n def get_even_num(self):\n for i in range(self.a):\n if i % 2 == 0:\n Even.list_even.append(i)\n print(\"The List of Even Numbers are \".ljust(40, '.'), ':',\n Even.list_even)\n\n\nobj = Even(int(input(\"Enter Range:\")))\nobj.get_even_num()\n\n# Employee:\n\n\nclass Employee:\n def __init__(self, ename, eid, ephono):\n self.ename = ename\n self.eid = eid\n self.ephono = ephono\n\n def get_data(self):\n print(\"The Updated address\".ljust(40, '.'), self.ename, self.eid,\n self.ephono)\n\n def update_address(self, eaddress):\n self.eaddress = eaddress\n print(\"The Updated address\".ljust(40, '.'), ':', self.eaddress)\n\n\nemp = Employee(\"Karthi\", \"MCS-0037\", \"9786910190\")\nemp.get_data()\nemp.update_address(\"Banglore\")\n","repo_name":"Karthi2245/MCS_0038_Core_Python.","sub_path":"_12_OOPS/My_Notes/_04_Constructor/_02_parameterized_constructor.py","file_name":"_02_parameterized_constructor.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73159042114","text":"# -*- coding: utf-8 -*-\n\n##--------------------------------------#\n## Kvasir\n##\n## (c) 2010-2014 Cisco Systems, Inc.\n## (c) 2015 Kurt Grutzmacher\n##\n## SNMP controller\n##\n## Author: Kurt Grutzmacher \n##--------------------------------------#\n\nfrom skaldship.hosts import get_host_record, host_title_maker, host_a_maker\nimport logging\nlogger = logging.getLogger(\"web2py.app.kvasir\")\ncrud.settings.formstyle = formstyle_bootstrap_kvasir\n\n\n@auth.requires_login()\ndef index():\n return redirect(URL('list'))\n\n##-------------------------------------------------------------------------\n## snmp\n##-------------------------------------------------------------------------\n\n@auth.requires_login()\ndef add():\n if request.args(0) is not None:\n record = get_host_record(request.args(0))\n db.t_snmp.f_hosts_id.default = record.id\n\n response.title = \"%s :: Create SNMP Entry\" % (settings.title)\n form=crud.create(db.t_snmp,next='edit/[id]')\n db.t_snmp.f_hosts_id.default = None\n return dict(form=form)\n\n@auth.requires_login()\ndef read():\n record = db.t_snmp(request.args(0)) or redirect(URL('default', 'error', vars={'msg': T('SNMP record not found')}))\n response.title = \"%s :: SNMP :: %s\" % (settings.title, host_title_maker(db.t_hosts[record.f_hosts_id]))\n form=crud.read(db.t_snmp,record)\n return dict(form=form)\n\n@auth.requires_login()\ndef edit():\n record = db.t_snmp(request.args(0)) or redirect(URL('default', 'error', vars={'msg': T('SNMP record not found')}))\n response.title = \"%s :: SNMP Update :: %s\" % (settings.title, host_title_maker(db.t_hosts[record.f_hosts_id]))\n form=crud.update(db.t_snmp,record,next='read/[id]',\n ondelete=lambda form: redirect(URL('list')))\n return dict(form=form)\n\n@auth.requires_signature()\n@auth.requires_login()\ndef delete():\n count = 0\n if 'ids' in request.vars:\n for z in request.vars.ids.split('|'):\n if z is not '':\n db(db.t_host_snmp.id == z).delete()\n db.commit()\n count += 1\n msg = \"%s SNMP record(s) deleted\" % (count)\n response.flash = msg\n response.headers['web2py-component-command'] = 'snmptable.fnReloadAjax();'\n return dict(msg=msg)\n\n@auth.requires_login()\ndef list():\n aaData = []\n response.title = \"%s :: SNMP\" % (settings.title)\n\n if request.extension == \"json\":\n rows = db(db.t_snmp.id > 0).select()\n\n for r in rows:\n aTxt = {}\n aaData.append({\n '0': A('edit', _target=\"snmp_%s\" % (r.id), _href=URL('edit', args=r.id)).xml(),\n '1': host_a_maker(r.f_hosts_id).xml(),\n '2': r.f_community,\n '3': r.f_version,\n '4': r.f_access,\n 'DT_RowId': r.id\n })\n\n result = { 'sEcho': request.vars.sEcho,\n 'iTotalRecords': len(aaData),\n 'aaData': aaData,\n }\n\n return result\n\n add = AddModal(\n db.t_snmp, 'Add', 'Add SNMP', 'Add SNMP',\n #fields=add_fields,\n cmd='snmptable.fnReloadAjax();',\n flash=\"SNMP entry added\"\n )\n db.t_snmp.id.comment = add.create()\n\n table = TABLE(THEAD(TR(TH(T('ID'), _width=\"5%\"),\n TH(T('Host')),\n TH(T('Community')),\n TH(T('Version')),\n TH(T('Access Level')),\n ) ),\n _class=\"datatable\",\n _id=\"snmptable\",\n _style=\"width:100%\")\n\n return dict(table=table, add=add)\n\n@auth.requires_signature()\n@auth.requires_login()\ndef delete():\n count = 0\n for r in request.vars.ids.split('|'):\n if r is not None:\n db(db.t_snmp.id == r).delete()\n count += 1\n db.commit()\n response.flash = \"%s SNMP Record(s) deleted\" % (count)\n response.headers['web2py-component-command'] = \"snmptable.fnReloadAjax(); jQuery('.datatable tr.DTTT_selected').removeClass('DTTT_selected');\"\n return\n\n@auth.requires_login()\ndef by_host():\n \"\"\"\n Returns a list of OS records based upon an host identifier\n (id, ipv4, ipv6)\n \"\"\"\n if request.args(0) is None: redirect(URL('default', 'error', vars={'msg': T('Host record not found')}))\n\n record = get_host_record(request.args(0))\n\n if record is None:\n redirect(URL('default', 'error', vars={'msg': T('Host record not found')}))\n\n response.title = \"%s :: SNMP Records for %s\" % (settings.title, host_title_maker(record))\n snmplist = db(db.t_snmp.f_hosts_id==record.id).select()\n\n aaData = []\n if request.extension == \"json\":\n for snmp in snmplist:\n # datatables json requires aaData to be specificly formatted\n aaData.append({\n '0': A(\"edit\", _target=\"snmp_update_%s\" % (snmp.id), _href=URL('edit',extension='html',args=snmp.id)).xml(),\n '1': snmp.f_community,\n '2': snmp.f_version,\n '3': snmp.f_access,\n 'DT_RowId': snmp.id,\n })\n\n result = { 'sEcho': request.vars.sEcho,\n 'iTotalRecords': len(aaData),\n 'aaData': aaData,\n }\n\n return result\n\n form = TABLE(THEAD(TR(TH(T('ID'), _width=\"5%\"),\n TH(T('Community')),\n TH(T('Version')),\n TH(T('Access')),\n ) ),\n _class=\"datatable\",\n _id=\"snmptable\",\n _style=\"width:100%\")\n\n add = AddModal(\n db.t_snmp, 'Add', 'Add', 'Add SNMP String',\n fields=[ 'f_community', 'f_version', 'f_access'],\n cmd='snmptable.fnReloadAjax();'\n )\n db.t_snmp.f_hosts_id.default = record.id\n db.t_snmp.id.comment = add.create()\n\n return dict(form=form, host=record, add=add)\n","repo_name":"KvasirSecurity/Kvasir","sub_path":"controllers/snmp.py","file_name":"snmp.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":424,"dataset":"github-code","pt":"61"} +{"seq_id":"34154423118","text":"import pickle\nfrom collections import defaultdict\n\nimport pandas as pd\nimport numpy as np\n\nfrom scipy.signal import savgol_filter\nfrom scipy import interpolate\nfrom scipy.optimize import least_squares\nfrom scipy import integrate\n\nflat_list = lambda x: [g for f in x for g in f]\ndouble_list = lambda x: [[g] for g in x]\nsplit_n = lambda x, n: [x[i : i + n] for i in range(0, len(x), n)]\n\n\nclass Analyser:\n def __init__(self, name, method=\"moda\", npoints=100):\n self.name = name\n if type(name) == str:\n self.data = self.load_data(name)\n else:\n self.data = name\n if method == \"moda\":\n self.df, _ = self.moda_df()\n elif method == \"interpolation\":\n self.df = self.interpolated_df(npoints)\n\n def load_data(self, name):\n \"\"\"\n load data from file\n \"\"\"\n with open(name, \"rb+\") as f:\n data = pickle.load(f)\n return data\n\n def moda_df(self, verbose=False):\n \"\"\"\n Resume multiple runs in one dataframe\n\n Remove outsamples using median split\n Average results and calculate standard deviation\n \"\"\"\n # find the moda shape\n count_shapes = defaultdict(lambda: 0)\n for r in self.data[\"data\"]:\n count_shapes[np.shape(r)] += 1\n moda_shape = max(count_shapes, key=count_shapes.get)\n data_moda = [d for d in self.data[\"data\"] if np.shape(d) == moda_shape]\n\n if verbose:\n print(\n \"Moda shape counts {:.2f}%\".format(\n count_shapes[moda_shape] / len(count_shapes.values()) * 100\n )\n )\n print(count_shapes[moda_shape], sum(count_shapes.values()))\n\n el = int(count_shapes[moda_shape] * 0.3) // 2\n data_moda = np.asarray(data_moda)\n med_avg = np.sort(data_moda, axis=0)\n\n if el != 0:\n med_avg = med_avg[el:-el]\n\n def diff(x):\n x = (\n np.concatenate((x[:, 0:1, :], x[:, 1:, :] - x[:, :-1, :]), axis=1)\n / self.data[\"sample_period\"]\n )\n return x\n\n med_avg = diff(med_avg)\n std_avg = med_avg.std(axis=0)\n med_avg = med_avg.mean(axis=0)\n\n # create the dataframe\n med_avg = pd.DataFrame(med_avg, columns=flat_list(self.data[\"to_monitor\"]))\n std_avg = pd.DataFrame(std_avg, columns=flat_list(self.data[\"to_monitor\"]))\n\n # quality of the samples (experimental)\n if verbose:\n q = std_avg.values / med_avg.values\n print(\"AVG 68% samples error\", np.nanmean(q) * 100)\n print(\"AVG 99% samples error\", np.nanmean(3 * q) * 100)\n\n print(\"MAX 68% samples error\", np.nanmax(q) * 100)\n print(\"MAX 99% samples error\", np.nanmax(3 * q) * 100)\n\n return med_avg, std_avg\n\n def interpolated_df(self, npoints=100):\n new_data = []\n for r in self.data[\"data\"]:\n x = np.asarray(r)\n new_c = []\n for c in range(x.shape[1]):\n fserie = np.trim_zeros(x[:, c])\n if len(fserie) < 4:\n if len(fserie) >= 1:\n fserie = np.hstack((fserie, [fserie[-1]] * (4 - len(fserie))))\n else:\n fserie = np.hstack((fserie, [0] * (4 - len(fserie))))\n x0, y0 = np.linspace(0, 1, len(fserie)), fserie\n tck = interpolate.splrep(x0, y0, s=0)\n x1 = np.linspace(0, 1, npoints)\n y1 = interpolate.splev(x1, tck, der=0)\n new_c.append(list(y1))\n new_data.append(new_c)\n\n new_data = np.asarray(new_data)\n # med_avg= []\n # el= int(len(self.data['data'])*0.2)\n # for c in range(new_data.shape[1]):\n # vals= new_data[:,c,:]\n # vals= np.sort(vals, axis=0)[el:-el,:]\n # med_avg.append(vals.mean(axis=0))\n\n # med_avg= new_data.mean(axis=0)\n # med_avg= pd.DataFrame(np.asarray(med_avg).T, columns=flat_list(self.data['to_monitor']))\n\n # NEED TO TEST THIS\n el = int(len(self.data[\"data\"]) * 0.3) // 2\n med_avg = np.sort(new_data, axis=0)\n std_avg = np.sort(new_data, axis=0)\n if el != 0:\n med_avg = med_avg[el:-el].mean(axis=0)\n std_avg = std_avg[el:-el].std(axis=0)\n else:\n med_avg = med_avg.mean(axis=0)\n std_avg = std_avg.mean(axis=0)\n # NEED TO TEST THIS\n\n med_avg = pd.DataFrame(med_avg, columns=flat_list(self.data[\"to_monitor\"]))\n\n count_shapes = defaultdict(lambda: 0)\n for r in self.data[\"data\"]:\n count_shapes[np.shape(r)] += 1\n moda_shape = max(count_shapes, key=count_shapes.get)\n\n def diff(df):\n x = df.values\n x = np.row_stack((x[0, :], x[1:, :] - x[:-1, :])) / (\n self.data[\"sample_period\"] * moda_shape[0] / npoints\n )\n return pd.DataFrame(x, columns=df.columns)\n\n med_avg = diff(med_avg)\n for c in med_avg.columns:\n med_avg[c] = savgol_filter(med_avg[c].values, 11, 3)\n\n return med_avg\n\n def interpolate(self, feature, npoints=100, filter_signal=True, proportional=False):\n f_series = np.trim_zeros(self.df[feature].values)\n if f_series.shape[0] < 4:\n raise Exception(\"Cant interpolate\")\n\n x0, y0 = np.linspace(0, 1, len(f_series)), f_series\n tck = interpolate.splrep(x0, y0, s=0)\n x1 = np.linspace(0, 1, npoints)\n y1 = interpolate.splev(x1, tck, der=0)\n\n if filter_signal:\n y1 = savgol_filter(y1, 11, 3)\n\n if proportional:\n I = integrate.simps(y0) # ,dx=self.data['sample_period'])\n Ia = integrate.simps(y1) # ,dx=1.0/npoints)\n y1 *= I / Ia\n # Ic= integrate.simps(y1,dx=1.0/npoints)\n # print(I, Ia, Ic)\n\n return x1, y1\n\n @staticmethod\n def homography_tranform(x0, y0, x1, y1):\n \"\"\"\n find homography matrix and tranform y0 to y1\n \"\"\"\n A = []\n for i in range(0, len(x0)):\n x, y = x0[i], y0[i]\n u, v = x1[i], y1[i]\n A.append([x, y, 1, 0, 0, 0, -u * x, -u * y, -u])\n A.append([0, 0, 0, x, y, 1, -v * x, -v * y, -v])\n A = np.asarray(A)\n U, S, Vh = np.linalg.svd(A)\n L = Vh[-1, :] / Vh[-1, -1]\n R = L.reshape(3, 3)\n\n h = np.ones(y0.shape)\n P1 = np.hstack([x0, y0, h]).reshape((-1, 3), order=\"F\")\n P2 = np.hstack([x1, y1, h]).reshape((-1, 3), order=\"F\")\n\n t_p = R.dot(P1.T).T\n return t_p, np.mean(np.abs(t_p - P2))\n\n @staticmethod\n def scale_translation_matrix(x0, y0, x1, y1):\n \"\"\"\n find the scale and translation matrix from (x0,y0) to (x1,y1)\n \"\"\"\n\n def scf(c, x, y):\n return c[0] * x + c[1] - y\n\n Rx = least_squares(scf, np.ones(2), args=(x0, x1), loss=\"soft_l1\").x\n Ry = least_squares(scf, np.ones(2), args=(y0, y1), loss=\"soft_l1\").x\n\n # from scipy.linalg import inv\n # A= np.vstack((x0,np.ones(x0.shape))).T\n # Rx= np.dot(inv(np.dot(A.T, A)), np.dot(A.T,x1))\n # A= np.vstack((y0,np.ones(y0.shape))).T\n # Ry= np.dot(inv(np.dot(A.T, A)), np.dot(A.T,y1))\n xt = x0 * Rx[0] + Rx[1]\n yt = y0 * Ry[0] + Ry[1]\n t_p = np.hstack([xt, yt]).reshape((-1, 2), order=\"F\")\n P2 = np.hstack([x1, y1]).reshape((-1, 2), order=\"F\")\n\n return t_p, np.sum(np.abs(t_p - P2))\n\n @staticmethod\n def compare(a1, a2, feature=\"PERF_COUNT_HW_INSTRUCTIONS\", npoints_=100):\n x0, y0 = a1.interpolate(feature=feature, npoints=npoints_)\n x1, y1 = a2.interpolate(feature=feature, npoints=npoints_)\n yt, err = Analyser.scale_translation_matrix(x0, y0, x1, y1)\n return yt, err\n","repo_name":"VitorRamos/performance_features","sub_path":"performance_features/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"15784738731","text":"__author__ = 'yann'\n\nfrom weixin.caller import apistore, tuling\nfrom weixin.services import message\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# return xml\ndef process_reply(xml_data):\n msg_type = xml_data.find(\"MsgType\").text\n\n if msg_type == 'text':\n return process_text(xml_data)\n\n\ndef process_text(xml_data):\n content = xml_data.find(\"Content\").text\n from_user = xml_data.find(\"FromUserName\").text\n data = None\n\n if '天气' in content:\n\n index = str(content).index(\"天气\")\n if index == 0:\n weather = apistore.get_weather()\n else:\n weather = apistore.get_weather(content[:index])\n data = '今天:[%s]\\n天气:[日:%s 夜:%s]\\n温度:[最高:%s 最低:%s]' \\\n % (weather['date'], weather['cond']['txt_d'], weather['cond']['txt_n'], weather['tmp']['max'],\n weather['tmp']['min'])\n\n elif '美女' in content or '妹子' in content or 'mm' in content.lower():\n beauty = apistore.get_beauty()\n data = '送你一个妹子大礼包, 想看汉子?抱歉还没实现 \\n 图片:%s \\n 地址:%s' % (beauty['picUrl'], beauty['url'])\n\n else:\n data = tuling.chat(content, from_user)\n\n return message.reply_text_message(from_user, data)\n","repo_name":"yanickxia/weixin-coffee","sub_path":"weixin/services/reply.py","file_name":"reply.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43169635446","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 20 14:13:29 2021\r\n\r\n@author: hp\r\n\"\"\"\r\n\r\n\r\nimport cv2\r\nfrom scipy.spatial import KDTree\r\nfrom webcolors import CSS3_HEX_TO_NAMES,hex_to_rgb\r\n\r\n\r\nimg_path=r'C:\\Users\\hp\\Pictures\\Screenshots\\test.png'\r\n\r\ndef convert_rgb_to_names(rgb_tuple):\r\n css3_db=CSS3_HEX_TO_NAMES\r\n names=[]\r\n rgb_values=[]\r\n for color_hex,color_name in css3_db.items():\r\n names.append(color_name)\r\n \r\n rgb_values.append(hex_to_rgb(color_hex))\r\n kdt_db= KDTree(rgb_values)\r\n distance, index= kdt_db.query(rgb_tuple)\r\n return(names[index])\r\nprint(convert_rgb_to_names((0,900,0))) \r\n","repo_name":"VoidZeroe/portfolio","sub_path":"colordetection.py","file_name":"colordetection.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27875938910","text":"from datetime import datetime, timedelta\nfrom data_manager import DataManager\nfrom flight_search import FlightSearch\nfrom notification_manager import NotificationManager\n\ndata_manager = DataManager()\ndata = data_manager.get_destinations_data()\nflight_search = FlightSearch()\nnotification_manager = NotificationManager\n\nORIGIN_CITY_IATA = \"MEL\"\n\nfor row in data:\n row[\"iataCode\"] = flight_search.get_codes(row['city'])\ndata_manager.destination_data = data\ndata_manager.update_sheet()\n\ntomorrow = datetime.now() + timedelta(days=1)\nsix_months = datetime.now() + timedelta(days=(6 *30))\n\nfor destination in data:\n flight = flight_search.check_flights(\n ORIGIN_CITY_IATA,\n destination[\"iataCode\"],\n from_time=tomorrow,\n to_time=six_months\n )\n if flight.price < destination[\"lowestPrice\"]:\n notification_manager.send_sms(\n message=f\"Low price alert! Only £{flight.price} to fly from {flight.origin_city}-{flight.origin_airport} to \"\n f\"{flight.destination_city}-{flight.destination_airport}, from {flight.out_date} \"\n f\"to {flight.return_date}.\"\n )\n\n\n","repo_name":"seanremenyi/flight_deals","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32367649794","text":"import asyncio\nimport numpy as np\nimport cv2\nimport random\nfrom . import util\n\nclass EyeAnimation(util.Component):\n \"\"\"Eye animation on screen\"\"\"\n def __init__(self, robot):\n util.Component.__init__(self, robot)\n self._size = 80\n self._radius = self._size // 4\n self._eye = np.zeros((self._size, self._size, 3), np.uint8)\n self._exp_before = None\n self._gap = 20\n self._ev = self._exp_q = None\n self._expression = None\n\n @util.mode(property_type='setter')\n async def color(self, color=None):\n \"\"\"Eyes' color in BGR mode\"\"\"\n if color:\n color = util.bgr(color)\n if color != self._color:\n self._color = color\n self._create_eye()\n await self._exp_q.put(self._expression)\n else:\n return self._color\n\n def _create_eye(self):\n color = self._color\n sr = self._size - self._radius -1\n cv2.circle(self._eye, (sr, sr), self._radius, color, -1)\n cv2.circle(self._eye, (sr, self._radius), self._radius, color, -1)\n cv2.circle(self._eye, (self._radius, sr), self._radius, color, -1)\n cv2.circle(self._eye, (self._radius, self._radius), self._radius, color, -1)\n cv2.rectangle(self._eye, (self._radius, 0), (sr, self._size-1), color, -1)\n cv2.rectangle(self._eye, (0, self._radius), (self._size-1, sr), color, -1)\n\n _exp_list = ['auto', 'happy', 'sad', 'surprised', 'angry', 'neutral', 'focused', 'sleepy']\n @classmethod\n def get_expression_list(cl):\n \"\"\"Supported expression list, `['auto', 'happy', 'sad', 'surprised', 'angry', 'neutral', 'focused', 'sleepy']`, read only \"\"\"\n return list(cl._exp_list)\n\n @util.mode(property_type='setter')\n async def expression(self, exp=None):\n \"\"\"the default is `'auto'`, which means to randomly switch between supported expressions\"\"\"\n if exp:\n exp, color = exp if type(exp)==tuple else (exp, None)\n if exp not in self._exp_list:\n raise TypeError(f'Unknown expression not in {self._exp_list}')\n color = color and util.bgr(color)\n if exp!=self._expression or color and color!=self._color:\n if color and color != self._color:\n self._color = color\n self._create_eye()\n await self._exp_q.put(exp)\n else:\n return self._expression.split('.')[0]\n\n @util.mode()\n async def hide(self):\n \"\"\"hide the eyes\"\"\"\n if self._expression != 'hidden':\n if self._expression != 'stopped':\n self._exp_before = self._expression\n await self._set_exp('hidden', True)\n\n @util.mode()\n async def stop(self):\n \"\"\"Pause dynamic effects\"\"\"\n if self._expression not in ['stopped', 'hidden']:\n self._exp_before = self._expression\n await self._set_exp('stopped', True)\n\n @util.mode()\n async def resume(self):\n \"\"\"Continue dynamic effects\"\"\"\n if self._expression == 'stopped':\n await self._set_exp(self._exp_before or 'auto')\n self._exp_before = None\n\n async def _set_exp(self, exp, wait=False):\n if self._expression == exp:\n return\n if self._exp_q:\n self._exp_q.full() and self._exp_q.get_nowait()\n self._exp_q.put_nowait(exp)\n else:\n self._expression = exp\n wait and self._ev and (await self._ev.wait()) and self._ev.clear()\n\n @util.mode()\n async def show(self, exp=None):\n \"\"\"show the eyes\"\"\"\n await self._set_exp(exp or self._exp_before or 'auto')\n self._exp_before = None\n\n # very urgly coded eye animation\n async def animate(self, robot, start_exp=None):\n self._color = util.bgr(self._robot.env.vars.get('eye_color', 'cyan'))\n self._canvas = np.zeros((134, 240, 3), np.uint8)\n self._ev = asyncio.Event()\n self._exp_q = asyncio.Queue(1)\n self._exp_q.put_nowait(start_exp or self._expression or 'auto.neutral')\n self._create_eye()\n H, W = self._canvas.shape[:2]\n LX, RX, Y = (W-self._gap-self._size)//2, (W+self._size+self._gap)//2, H//2\n olpos, orpos = (LX, Y), (RX, Y)\n ox0, oy0, ox1, oy1 = LX-self._size//2, Y-self._size//2, RX+self._size//2-1, Y+self._size//2-1\n\n while True:\n duration = random.random() *4 + 1\n blink = False\n lresize = rresize = (self._size, self._size)\n rlbrow = llbrow = 0\n ltbrow = rtbrow = (0, 0)\n\n try:\n self._expression = await asyncio.wait_for(self._exp_q.get(), timeout=duration)\n except asyncio.TimeoutError:\n pass\n if self._expression == 'hidden':\n await robot.screen.fill((0,0,0), stop_eyes=False)\n self._ev.set()\n while True:\n self._expression = await self._exp_q.get()\n if self._expression != 'hidden':\n ox0, oy0, ox1, oy1 = 0, 0, *robot.screen.resolution\n break\n elif self._expression == 'stopped':\n self._ev.set()\n while True:\n self._expression = await self._exp_q.get()\n if self._expression != 'stopped':\n ox0, oy0, ox1, oy1 = 0, 0, *robot.screen.resolution\n break\n\n if self._expression == 'auto':\n if random.random() >.7:\n self._expression = 'auto.neutral'\n else:\n self._expression = f'auto.{random.choice(self._exp_list)}'\n if self._expression == 'auto.auto':\n self._expression = 'auto'\n continue\n\n if 'neutral' in self._expression:\n x, y = random.randint(-3, 3)*10, random.randint(-3, 1)*9\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n\n # looking up -> may blink\n blink = random.random() >.2 and (oy1-oy0) >=40\n\n # looking aside -> different size\n resize = round(random.random(), 1)\n if resize > .65 and y < 10:\n resize = round(self._size* resize)\n if x > 0:\n lresize = self._size, resize\n elif x< 0:\n rresize = self._size, resize\n\n if 'auto.' in self._expression and random.random()> .75:\n self._expression = 'auto'\n\n # looking down -> eyelids half closed\n # elif y > 10 or y > 0 and random.random()>.5:\n # ltbrow = rtbrow = self._size//4*(y//9), 0\n\n elif 'sleepy' in self._expression:\n x, y = random.randint(-1, 1)*10, random.randint(2, 3)*9\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n ltbrow = rtbrow = (self._size//4*(y//9), 0)\n duration = random.random() * 3 + 2\n if 'auto.' in self._expression and random.random() >.8:\n self._expression = 'auto'\n\n elif 'happy' in self._expression:\n x, y = random.randint(-3, 3)*5, random.randint(-3, -2)*9\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n llbrow = rlbrow = (self._size//4)*random.randint(2, 3)\n if 'auto.' in self._expression and random.random() >.6:\n self._expression = 'auto'\n\n elif 'sad' in self._expression:\n blink = random.random()>.5 and (oy1-oy0) >=40\n x, y = random.randint(-3, 3)*10, random.randint(0, 2)*9\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n ltbrow = rtbrow = (self._size//4*(y//9), -self._size//4)\n if 'auto.' in self._expression and random.random() >.5:\n self._expression = 'auto'\n\n elif 'angry' in self._expression:\n blink = random.random()>.5 and (oy1-oy0) >=40\n x, y = random.randint(-3, 3)*10, random.randint(-2, 0)*9\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n ltbrow = rtbrow = (self._size//4*(y//-9), self._size//4)\n if 'auto.' in self._expression and random.random() >.75:\n self._expression = 'auto'\n\n elif 'focused' in self._expression:\n x, y = random.randint(-3, 3)*10, random.randint(-2, 2)*9\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n lresize, rresize = (self._size, round(round(random.random()*.1+.4, 1)*self._size)), (self._size, round(round(random.random()*.1+.4, 1)*self._size))\n if 'auto.' in self._expression and random.random() >.65:\n self._expression = 'auto'\n\n elif 'surprised' in self._expression:\n blink = random.random()>.5 and (oy1-oy0) >=40\n x, y = random.randint(-2, 2)*10, random.randint(-2, 1)*8\n lpos, rpos = (LX+x, Y+y), (RX+x, Y+y)\n enlarged = round(self._size * 1.22)\n lresize = rresize = (enlarged, enlarged)\n if 'auto.' in self._expression and random.random() >.3:\n self._expression = 'auto'\n\n if blink:\n yt = oy1-(oy1-oy0)//3\n self._canvas[oy0: yt, ox0: ox1] = (0, 0, 0)\n await robot.screen.block_display(self._canvas[oy0: yt, ox0: ox1], ox0, oy0)\n oy0 = yt\n\n self._canvas[oy0: oy1, ox0: ox1] = (0, 0, 0)\n\n ly0, ly1, lx0, lx1 = lpos[1]-lresize[1]//2+ltbrow[0], lpos[1]+lresize[1]//2-llbrow, lpos[0]-lresize[0]//2, lpos[0]+lresize[0]//2\n e = self._eye if lresize==(self._size, self._size) else cv2.resize(self._eye, lresize)\n self._canvas[ly0: ly1, lx0: lx1] = e[ltbrow[0]: lresize[1]-llbrow]\n if ltbrow[1]:\n cv2.fillConvexPoly(self._canvas[ly0: ly1, lx0: lx1], np.array([(0,0), (lresize[0], 0), (lresize[0], ltbrow[1]) if ltbrow[1]>0 else (0, -ltbrow[1])]), (0, 0, 0))\n\n ry0, ry1, rx0, rx1 = rpos[1]-rresize[1]//2+rtbrow[0], rpos[1]+rresize[1]//2-rlbrow, rpos[0]-rresize[0]//2, rpos[0]+rresize[0]//2\n e = self._eye if rresize==(self._size, self._size) else cv2.resize(self._eye, rresize)\n self._canvas[ry0: ry1, rx0: rx1] = e[rtbrow[0]: rresize[1]-rlbrow]\n if ltbrow[1]:\n cv2.fillConvexPoly(self._canvas[ry0: ry1, rx0: rx1], np.array([(0,0), (rresize[0], 0), (0, rtbrow[1]) if ltbrow[1]>0 else (rresize[0], -ltbrow[1])]), (0, 0, 0))\n\n x0, y0, w, h = cv2.boundingRect(np.array([(lx0, ly0), (lx1, ly1), (rx0, ry0), (rx1, ry1)]))\n x1, y1 = x0+w, y0+h\n bx, by, bw, bh = cv2.boundingRect(np.array([(x0, y0), (x1-1, y1-1), (ox0, oy0), (ox1, oy1)]))\n await robot.screen.block_display(self._canvas[by:by+bh, bx:bx+bw], bx, by)\n\n ox0, oy0, ox1, oy1 = x0, y0, x1, y1\n olpos, orpos = lpos, rpos\n\n\n\n","repo_name":"r-cute/rcute-cozmars","sub_path":"rcute_cozmars/eye_animation.py","file_name":"eye_animation.py","file_ext":"py","file_size_in_byte":11133,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"3633868854","text":"from challenge import db\nfrom typing import List\nfrom .interface import ProjectInterface\nfrom challenge.models import Project\nfrom challenge.utils import upload_file\n\nclass ProjectService:\n \n @staticmethod\n def get(id : int):\n project = Project.query.get(id)\n return project\n \n @staticmethod\n def create(new_attr: ProjectInterface) -> Project:\n \n new_project = Project(\n name = new_attr[\"name\"],\n description = new_attr[\"description\"],\n completed = new_attr[\"completed\"]\n )\n db.session.add(new_project)\n db.session.commit()\n return new_project\n \n @staticmethod\n def update(id: int , new_attr: ProjectInterface) -> Project:\n project = Project.query.get(id)\n project.name = new_attr[\"name\"]\n project.description = new_attr[\"description\"]\n project.completed = new_attr[\"completed\"]\n \n db.session.commit()\n return project\n \n @staticmethod\n def patch(id: int , new_attr: ProjectInterface) -> Project:\n project = Project.query.get(id)\n project.completed = new_attr[\"completed\"]\n db.session.commit()\n return project\n \n \n @staticmethod\n def delete(id : int) -> Project:\n try:\n project = Project.query.get(id)\n db.session.delete(project)\n db.session.commit()\n return project, 200\n except Exception as err:\n return {\"message\" :\"Data do not exits !\"}, 400\n \n @staticmethod\n def get_all() -> List[Project]:\n projects = Project.query.all()\n return projects\n \n \n @staticmethod\n def upload_image(id : int, data: ProjectInterface):\n project = Project.query.get(id)\n file_url = upload_file(data[\"user_stories\"])\n project.user_stories = file_url\n db.session.commit()\n \n return project","repo_name":"bjboss007/VGG-challenge","sub_path":"challenge/project/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29733061232","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport unittest\n\nfrom tensorforce.tests.base_agent_test import BaseAgentTest\nfrom tensorforce.agents import DQNAgent\n\n\nclass TestDQNAgent(BaseAgentTest, unittest.TestCase):\n\n agent = DQNAgent\n config = dict(\n update_mode=dict(\n unit='timesteps',\n batch_size=8,\n frequency=8\n ),\n memory=dict(\n type='replay',\n include_next_states=True,\n capacity=100\n ),\n # memory=dict(\n # type='prioritized_replay',\n # include_next_states=True,\n # buffer_size=50,\n # capacity=100\n # ),\n optimizer=dict(\n type='adam',\n learning_rate=1e-2\n ),\n states_preprocessing=[\n dict(type='running_standardize'),\n dict(type='sequence')\n ],\n target_sync_frequency=10,\n # Comment in to test exploration types\n # actions_exploration_spec=dict(\n # type=\"epsilon_decay\",\n # initial_epsilon=1.0,\n # final_epsilon=0.1,\n # timesteps=10\n # ),\n # actions_exploration_spec=dict(\n # type=\"epsilon_anneal\",\n # initial_epsilon=1.0,\n # final_epsilon=0.1,\n # timesteps=10\n # )\n )\n\n exclude_float = True\n exclude_bounded = True\n","repo_name":"nekrald/quadrotor_reinforcement_learning","sub_path":"libraries/tensorforce/tensorforce/tests/test_dqn_agent.py","file_name":"test_dqn_agent.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"34892779763","text":"#If the bill was $150.00, split between 5 people, with 12% tip. \n#Each person should pay (150.00 / 5) * 1.12 = 33.6\n#Format the result to 2 decimal places = 33.60\n#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪\n#Hfloat 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal\n#Hfloat 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python\ntotal = int(input(\"what is the total price : \"))\npeople = int(input(\"how many people you wannna split the bill with : \"))\ntip = int(input(\"what is the tip percentage you'll like to give 10 , 12 , 15? : \"))\ntip_percentage = float(tip / 100)\ntip = float(total + float(total * tip/100))\ntip_per_person = tip /people\nprint(f\"each person has to give\" , tip_per_person)","repo_name":"shivam131284/tip-calculator-start","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12988631617","text":"\ndef load_data(file_name):\n \"\"\"Read all the lines from the data file and return as a list\"\"\"\n items = []\n f = open(file_name, 'r')\n lines = f.readlines()\n f.close()\n for line in lines:\n items.append(line)\n return items\n\n\ndef move_right(column, width, steps=3):\n new_col = column + steps\n if new_col > width - 1:\n new_col = new_col - width\n return new_col\n\n\ndef move_down(row, steps=1):\n new_row = row + steps\n return new_row\n\n\ndef count_trees(data, cur_row, cur_col, right_steps, down_steps):\n width = len(data[0]) - 1\n total_rows = len(data)\n counter = 0\n while(True):\n # cur_row does not change when moving right\n cur_col = move_right(column=cur_col, width=width, steps=right_steps)\n # move down\n cur_row = move_down(row=cur_row, steps=down_steps)\n\n if cur_row > total_rows - 1:\n # print(f'row: {cur_row} col: {cur_col}')\n break\n\n char = data[cur_row][cur_col]\n # print(f'row: {cur_row} col: {cur_col} char: {char}')\n if char == \"#\":\n counter += 1\n return counter\n\nmap = load_data(file_name='./day3/data.txt')\n\nproduct = 1\nmethods = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]\nfor method in methods:\n right, down = method\n cur_row = 0\n cur_col = 0\n trees = count_trees(map, cur_row, cur_col, right, down)\n print(f'right: {right} down: {down} trees: {trees}')\n product = product * trees\n\nprint(f'result: {product}')","repo_name":"OliverZ2020/advent-of-code","sub_path":"day3/puzzle_b.py","file_name":"puzzle_b.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40035960292","text":"from openpyxl import load_workbook\nfrom openpyxl.utils import get_column_letter, column_index_from_string\nfrom openpyxl import Workbook\nfrom openpyxl import styles\nfrom openpyxl.styles import Font,colors,Border,Side,PatternFill,Alignment\nfrom dtctest.modules.readExcel import *\n# from dtctest.config.config import *\nfrom dtctest.config import global_var\n\n#获取TestReportTableHeader\ndef get_TestReportTableHeader(sheet, num:int) -> list:\n\tTestReportTableHeaderList = []\n\tfor i in range(num):\n\t\tTestReportTableHeaderList.append(read_line(sheet, i, 'L'))\n\treturn TestReportTableHeaderList\n\n# #获取测试报告表头列表\n# def build_TableHeaderList(sheet) -> list:\n\n# \tTableHeaderList = get_TestReportTableHeader(sheet, 2)\n# \tTableHeaderList[0].append(\"TestResult\")\n# \tTableHeaderList[0].append(\"DelayTime\")\n\n# \treturn TableHeaderList\n\n#获取测试报告表头列表\ndef build_TableHeaderList(srcList) -> list:\n\n\tTableHeaderList = srcList\n\tTableHeaderList[0].append(\"TestResult\")\n\t# TableHeaderList[1].append(\"None\")\n\t#TableHeaderList[0].append(\"DelayTime\")\n\n\treturn TableHeaderList\n\n#获取测试报告表头列表\ndef build_TableHeaderList_pandas(srcList) -> list:\n\n\tTableHeaderList = srcList\n\tTableHeaderList[0].append(\"TestResult\")\n\tTableHeaderList[0].append(\"DelayTime\")\n\n\treturn TableHeaderList\n\ndef build_TableHeaderList_Direct() -> list:\n\tTableHeaderList = [['源报文ID', 'ECU', '周期', 'dlc', 'DTC码', '源网段', '目标网段', 'None', 'None', 'None', 'None', 'None', 'TestResult', 'DelayTime'], \\\n\t\t\t\t\t\t['None', 'None', 'None', 'None', 'None', 'None', 'HybridCAN', 'PTCAN', 'EPCAN', 'BDCAN', 'CHCAN', 'DCAN']]\n\treturn TableHeaderList\n\n\n#读取测试MSGlog\ndef readtestlog(pathname: str) -> list:\n\tDataList = []\n\twith open(pathname) as Rfile:\n\t\tfor tmp in Rfile:\n\t\t\tDataList.append(tmp)\n\n\treturn DataList\n\n#创建MSG测试列表\ndef createmsgtestreportlist(TableHeader: list, DataList: list) -> list:\n\tmsgTestReportList = []\n\totherDataLen = (len(global_var.DTC_Datas) - len(DataList)//11)\n\tmsgTestReportList.extend(TableHeader)\n\tfor i in range(len(global_var.DTC_Datas)):\n\t\ttmplist = []\n\t\tfor tmp in global_var.DTC_Datas[i]:\n\t\t\tif tmp != \"None\":\n\t\t\t\ttmplist.append(tmp)\n\t\t\telse:\n\t\t\t\ttmplist.append(\"NA\")\n\t\tif global_var.DTC_Datas[i][0] != \"None\":\n\t\t\tfor j in range(9):\n\t\t\t\tif DataList[(i - otherDataLen)*11 + j].find(\"OK\") > 0:\n\t\t\t\t\tif j == 8:\n\t\t\t\t\t\ttmplist.append(\"Success\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\telif DataList[(i - otherDataLen)*11 + j].find(\"Fail\") > 0:\n\t\t\t\t\ttmplist.append(\"Fail\")\n\t\t\t\t\tbreak\n\n\t\tmsgTestReportList.append(tmplist)\n\n\treturn msgTestReportList\n\n\n#按行写入excel\ndef write2excel(SheetTitle: str, MSGTestReportList: list, PathName: str) -> None:\n\twb = Workbook()\n\n\tsheet=wb.worksheets[0]\n\n\tsheet.title = SheetTitle\n\tfor tmp in MSGTestReportList:\n\t\tsheet.append(tmp)\n\tfor i in range(sheet.max_row):\n\t\tfor j in range(column_index_from_string('M')):\n\t\t\tif sheet[get_column_letter(j+1) + str(i + 1)].value == \"None\":\n\t\t\t\tsheet[get_column_letter(j+1) + str(i + 1)].value = ''\n\n\t#font = Font(color = colors.RED)\n\t#sheet[\"A1\"].font = font\n\twb.save(PathName)\n\n\n\n#设置单元格格式\ndef setstyle(PathName:str, SheetTitle:str) -> None:\n\t# 默认可读写,若有需要可以指定write_only和read_only为True\n\twb = load_workbook(PathName)\n\tsheet = wb[SheetTitle]\n\tfor i in range(sheet.max_row):\n\t\tfor j in range(column_index_from_string('N')):\n\t\t\tsheet[get_column_letter(j+1) + str(i + 1)].alignment = Alignment(horizontal = 'center', vertical = 'center')\n\t\t\tsheet[get_column_letter(j+1) + str(i + 1)].font = Font(name = 'Times New Roman')\n\t\t\tif i < 2:\n\t\t\t\tsheet[get_column_letter(j+1) + str(i + 1)].font = Font(name = '楷体', size = 12, bold = True)\n\t\tif sheet['K' + str(i + 1)].value == \"Success\":\n\t\t\tsheet['K' + str(i + 1)].fill = styles.fills.GradientFill(stop = [\"00FF00\", \"00FF00\"])\n\t\t\tsheet[get_column_letter(j) + str(i + 1)].font = Font(size = 12, bold = True)\n\t\telif sheet['K' + str(i + 1)].value == \"Fail\":\n\t\t\tsheet['K' + str(i + 1)].fill = styles.fills.GradientFill(stop = [\"FF0000\", \"FF0000\"])\n\t\t\tsheet[get_column_letter(j+1) + str(i + 1)].font = Font(size = 12, bold = True)\n\n\t#设置行高\n\tfor i in range(sheet.max_row):\n\t\tsheet.row_dimensions[i + 1].height = 18\n\n\n\t#设置列宽\n\tsheet.column_dimensions['J'].width = 50\n\tfor i in range(column_index_from_string('A'),column_index_from_string('I')):\n\t\tsheet.column_dimensions[get_column_letter(i+1)].width = 10\n\tsheet.column_dimensions['K'].width = 15\n\n\t#合并单元格\n\tfor i in range(column_index_from_string('K')):\n\t\tsheet.merge_cells(get_column_letter(i+1)+'1:'+get_column_letter(i+1)+'2')\n\t# sheet.merge_cells(\"G1:L1\")\n\t# sheet.merge_cells(\"M1:M2\")\n\t# sheet.merge_cells(\"N1:N2\")\n\n\twb.save(PathName)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wangcq714/SpyTestTool","sub_path":"dtctest/report/testreport.py","file_name":"testreport.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2063581352","text":"import sys, os\nimport torch\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nimport matplotlib.pyplot as plt\n\nfrom torch.autograd import Variable\nfrom torch.utils import data\nfrom tqdm import tqdm\n\nimport config.config as config\nfrom models import get_model\nfrom loader import cityscapesLoader\nfrom tools.metrics import runningScore\nfrom tools.augmentations import *\nfrom tools.utils import *\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef train():\n # Initial\n # mkdir snapshotPath\n if not os.path.exists(config.snapshot_path):\n os.mkdir(config.snapshot_path)\n\n # Setup Dataloader\n t_loader = cityscapesLoader(config.trainList, split = 'train', batchSize = config.train_batch_size, imgSize = config.imgSize, is_augmentation = False, randomResize = False)\n v_loader = cityscapesLoader(config.valList, split = 'val', imgSize = config.imgSize)\n\n n_classes = t_loader.n_classes\n imgSize = t_loader.imgSize\n trainloader = data.DataLoader(t_loader, batch_size=config.train_batch_size, num_workers=8)#not shuffle here, it will break because diffient shape\n valloader = data.DataLoader(v_loader, batch_size=config.test_batch_size, num_workers=8)\n\n # Setup Metrics for Iou calculate\n running_metrics = runningScore(n_classes)\n\n # Setup Model\n model = get_model(config.arch, n_classes, imgSize)\n finetune_params = model.finetune_params\n #model = yolov3SPP(version='cityscapes', n_classes=19)\n model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))\n model.cuda()\n \n # Check if model has custom optimizer / loss\n if hasattr(model.module, 'optimizer'):\n optimizer = model.module.optimizer\n else:\n # freeze the param\n #optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=config.base_lr, momentum=config.momentum, weight_decay=config.weight_decay)\n # finetune the param\n train_params = []\n for idx , param in enumerate(model.parameters()):\n if idx > len(finetune_params):\n train_params.append(param)\n optimizer = torch.optim.SGD([{'params': finetune_params}, {'params': train_params, 'lr': config.base_lr * 1}],\\\n lr=config.base_lr, momentum=config.momentum, weight_decay=config.weight_decay)\n #for param_group in optimizer.param_groups:\n # print(\"{} : {}\".format(param_group['params'], param_group['lr']))\n # nomal optimizer\n #optimizer = torch.optim.SGD(model.parameters(), lr=config.base_lr, momentum=config.momentum, weight_decay=config.weight_decay)\n\n # learning method\n #scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.lr_decay_epochs, gamma=config.lr_decay)\n\n if config.resume is not None: \n if os.path.isfile(config.resume):\n print(\"Loading model and optimizer from checkpoint '{}'\".format(config.resume))\n checkpoint = torch.load(config.resume)\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n start_epoch = checkpoint['epoch']\n print(\"Loaded checkpoint '{}' (epoch {})\" \n .format(config.resume, checkpoint['epoch']))\n else:\n print(\"No checkpoint found at '{}'\".format(config.resume)) \n else:\n #load_pretrained_model\n print(\"Loading pretrained Model: {}\".format(config.pretrainedModel))\n start_epoch = 0 \n if config.pretrainedModel.split(\".\")[-1] == \"pth\":\n model.load_state_dict(torch.load(config.pretrainedModel))\n else:\n model.load_state_dict(torch.load(config.pretrainedModel)['model_state'])\n\n # initial visdom\n if config.visdomTrain:\n fig=plt.figure()\n # train visdom\n ax1 = fig.add_subplot(2,1,1)\n ax1.axis([start_epoch * len(trainloader), (start_epoch+1) * len(trainloader), 0, 1])\n ax1.plot(-1, -1, 'bo', label = 'LossSeg')\n ax1.plot(-1, -1, 'r^', label = 'lossDet')\n ax1.legend(loc='upper left')\n plt.title('LossSeg vs lossDet')\n # val visdom\n ax2 = fig.add_subplot(2,1,2)\n ax2.axis([start_epoch * len(trainloader), (start_epoch+1) * len(trainloader), 0, 1])\n ax2.plot(-1, -1, 'cs', label = 'LossSegVal')\n ax2.plot(-1, -1, 'y*', label = 'lossDetVal')\n ax2.legend(loc='upper left')\n\n bestIou = -100.0 \n bestmAP = -100.0\n lossSegDict = {}\n lossDetDict = {}\n for epoch in range(start_epoch, config.max_epoch):\n # update axis for visdom\n if config.visdomTrain:\n ax1.axis([start_epoch * len(trainloader), (epoch+1) * len(trainloader), 0, 1])\n ax2.axis([start_epoch * len(trainloader), (epoch+1) * len(trainloader), 0, 1])\n\n # model train pocess\n model.train()\n for i, (images, labels, segMaps) in enumerate(trainloader): \n currentIter = epoch * len(trainloader) + i \n poly_lr_scheduler(optimizer, config.base_lr, currentIter, max_iter = config.max_epoch * len(trainloader))\n\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n segMaps = Variable(segMaps.cuda())\n\n optimizer.zero_grad()\n loss_seg, loss_det = model(images, labels, segMaps)# \n\n # fuse loss\n # loss = loss_seg + loss_det\n\n loss_seg.backward()\n optimizer.step()\n\n if (i+1) % 20 == 0:\n if config.visdomTrain:\n lossSegDict[currentIter] = loss_seg.data[0]\n lossDetDict[currentIter] = loss_det.data[0]\n for perEpoch, lossSeg in lossSegDict.items():\n\n ax1.plot(perEpoch, lossSeg, 'bo', label = 'LossSeg')\n ax1.plot(perEpoch, lossDetDict[perEpoch], 'r^', label = 'lossDet')\n plt.pause(0.033)\n\n print(\"[Epoch %d/%d, Batch %d/%d] Learning_rate: %.7f Loss_seg: %.4f Loss_det: %.4f\" % \\\n (epoch+1, config.max_epoch, i, len(trainloader), optimizer.param_groups[0]['lr'], loss_seg.data[0], loss_det.data[0]))#\n\n # model eval pocess\n lossSegVal = []\n lossDetVal = []\n model.eval()\n APs = []\n for i_val, (images_val, labels_val, segMap_val) in tqdm(enumerate(valloader)):\n images_val = Variable(images_val.cuda(), volatile=True)\n labels_val = Variable(labels_val.cuda(), volatile=True)\n segMap_val = Variable(segMap_val.cuda(), volatile=True)\n\n outputSeg, outputDet = model(images_val) \n #loss_segVal, loss_detVal = model(images_val, labels_val, segMap_val) \n #lossSegVal.append(loss_segVal.data[0]) \n #lossDetVal.append(loss_detVal.data[0])\n\n pred = outputSeg.data.max(1)[1].cpu().numpy()\n gt = segMap_val.data.cpu().numpy()\n running_metrics.update(gt, pred)\n\n AP = evalDet(outputDet, labels_val, config.numClasses, config.imgSize, config.confThresh, config.iouThresh)\n APs.append(AP) \n \n # output valid loss\n #print(\"[Epoch %d/%d] Loss_segVal: %.4f Loss_detVal: %.4f\\n\" % \\\n # (epoch+1, config.max_epoch, np.mean(lossSegVal), np.mean(lossDetVal))) \n\n score, class_iou = running_metrics.get_scores()\n for k, v in score.items():\n print(k, v)\n running_metrics.reset()\n\n print(\"Mean Average Precision: %.4f\" % np.mean(APs))\n\n if config.visdomVal:\n ax2.plot((epoch+1) * len(trainloader), np.mean(lossSegVal), 'cs', label = 'LossSegVal')\n ax2.plot((epoch+1) * len(trainloader), np.mean(lossDetVal), 'y*', label = 'lossDetVal')\n plt.pause(0.033) \n\n # write result to log\n with open('MTSD.log', 'a') as f:\n f.write(\"++++++++++MTSD Result+++++++++++++\\nepoch: {} \\nDetection result: \\nMean Iou: {} \\nSegmentation result: \\nmAP: {}\\n\".\\\n format(epoch+1, score['Mean IoU : \\t'], np.mean(APs)))\n\n if score['Mean IoU : \\t'] >= bestIou:# or np.mean(APs) > bestmAp:\n bestIou = score['Mean IoU : \\t']\n bestmAp = np.mean(APs)\n state = {'epoch': epoch+1,\n 'model_state': model.state_dict(),\n 'optimizer_state' : optimizer.state_dict(),}\n torch.save(state, \"{}/{}_best_model.pkl\".format(config.snapshot_path, config.arch))\n\nif __name__ == '__main__':\n train()\n\n","repo_name":"syc10-09/MTSD","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"420733728","text":"# Create your views here.\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse \n\nimport random\nimport string\nimport uuid \n\nfrom datetime import datetime\n\nfrom news.views import render\nfrom users.models import Person\nfrom users.forms import InviteUserForm, EditBioForm\nfrom registration.models import RegistrationProfile\nfrom registration.backends import get_backend\nfrom registration.forms import RegistrationForm\n\n\n\ndef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for x in range(size))\n\n\n\ndef profile(request, slug):\n person = get_object_or_404(Person, user__username=slug)\n return render(request, \"users/profile.html\", locals())\n\n@login_required\ndef user_profile(request):\n person = request.user.get_profile()\n return render(request, \"users/profile.html\", locals())\n \n \ndef all_posts(request, slug):\n person = get_object_or_404(Person, user__username=slug)\n from news.models import NewsItem\n posts = NewsItem.objects.filter(owner=person).order_by('-date')\n return render(request, \"users/all_posts.html\", locals())\n\n\n@login_required\ndef edit_bio(request):\n person = request.user.get_profile()\n if request.method == 'POST':\n form = EditBioForm(request.POST)\n if form.is_valid():\n person.bio = form.cleaned_data['bio']\n person.save()\n \n url = reverse('user_profile')\n return HttpResponseRedirect(url)\n\n else:\n form = EditBioForm(initial={'bio':person.bio})\n \n \n return render(request, 'users/forms/edit_bio.html', locals())\n\n@login_required\ndef invite_user(request):\n if request.method == 'POST':\n form = InviteUserForm(request.POST)\n if form.is_valid():\n # create a registration profile with the email address\n \n # create a user/person with a dummy account\n username = id_generator()\n password = uuid.uuid1().hex\n email = form.cleaned_data['email']\n \n new_user = User.objects.create_user(username, email, password)\n \n # give them a registration profile too, just so that we can track their signup\n reg_profile = RegistrationProfile.objects.create_profile(new_user) \n \n # send an invitation email\n message = render_to_string('registration/invitation_email.txt', {\n 'activation_key': reg_profile.activation_key, \n 'inviter': request.user.get_profile(), \t\n })\n \n subject_line = \"%s has invited you to %s\" % (request.user.get_profile(), settings.SITE_NAME)\n receiver = email\n sender = settings.DEFAULT_FROM_EMAIL\n send_mail(\n subject_line,\n message,\n sender,\n [receiver],\n fail_silently=False,\n )\n \n \n return render(request, 'users/forms/invite_user.html', locals())\n else:\n form = InviteUserForm()\n \n return render(request, 'users/forms/invite_user.html', locals())\n\n\ndef activate_invited_user(request, key):\n account = get_object_or_404(RegistrationProfile, activation_key=key)\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n # first, update the user with real data, not the dummy data\n account.user.username = form.cleaned_data['username']\n account.user.password = form.cleaned_data['password1']\n account.user.is_active = True\n account.user.save()\n \n \n \n # finally, create a user profile based on the user\n person = Person.objects.create(\n user=account.user,\n email=account.user.email,\n date_joined=datetime.now(),\n karma=0,\n )\n \n # now we'll log the user in\n from django.contrib.auth import load_backend, login\n for backend in settings.AUTHENTICATION_BACKENDS:\n if person.user == load_backend(backend).get_user(person.user.pk):\n person.user.backend = backend\n if hasattr(person.user, 'backend'):\n login(request, person.user)\n \n # and finally, we'll activate the registration profile\n activate = RegistrationProfile.objects.activate_user(key)\n \n url = reverse('profile', args=[account.user.username])\n return HttpResponseRedirect(url)\n else:\n form = RegistrationForm()\n return render(request, 'users/activate_invited_user.html', locals()) \n\n\ndef seen_voting_reminder(request):\n if request.GET.get('xhr'):\n person = request.user.get_profile()\n person.seen_voting_reminder = True\n person.save()\n return True\n else:\n return False","repo_name":"minrivertea/chinanews","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4405750782","text":"class Solution(object):\n def addToArrayForm(self, num, k):\n res=[None]*(len(num)+1)\n count=\"\"\n total=0\n j=0\n for item in num: \n count+=str(item)\n total=int(count)+k\n\n for i in str(total):\n res[j]=int(i)\n j+=1\n if j>len(num):\n return res[0:len(num)+1]\n else:\n return res[0:len(num)]\ne1=Solution()\narr=[2,1,5]\nprint(e1.addToArrayForm(arr,806))\n\n \n","repo_name":"Aquib56/DSA","sub_path":"Leetcode/Array/Add to Array-Form of Integer989_E.py","file_name":"Add to Array-Form of Integer989_E.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14844309352","text":"from aiogram.dispatcher.filters.state import StatesGroup, State\nfrom config import bot, dp, db, SUPPORT_CHAT\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove\nfrom hendlers.admin.add import submit_markup, all_right_message, cancel_message\nfrom aiogram.types import Message\nfrom filters import IsUser\nfrom hendlers.user.reserved import send_phone\nfrom aiogram.types import ContentType\nfrom hendlers.user.catalog import btnnaz\nfrom app import sos, btnMenu, btnbar, btnTime, btnBrn, btndlv\n\nmessage_to = \"✉ Написать сообщение\"\nphone = \"📞 Позвонить\"\nback_message = '👈 Назад'\n\n\nclass SosState(StatesGroup):\n question = State()\n phone_number = State()\n submit = State()\n\n\n@dp.message_handler(IsUser(), text=sos)\nasync def command_start(message: Message):\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.add(message_to, phone)\n markup.add(btnnaz)\n await message.answer(\"Выберите способ\", reply_markup=markup)\n\n\n@dp.message_handler(IsUser(), text=phone)\nasync def command_phone(message: Message):\n await message.answer(\"Пожалуйста свяжитесь с нами по номеру телефона\\n\"\n \"+7 (843) 266‒11‒11\")\n\n\n@dp.message_handler(IsUser(), text=message_to)\nasync def command_message(message: Message):\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.add(back_message)\n await SosState.question.set()\n await message.answer('В чем суть проблемы? Опишите как можно детальнее и администратор обязательно вам ответит.',\n reply_markup=markup)\n\n\n@dp.message_handler(IsUser(), state=SosState.question)\nasync def procces_phone(message: Message, state: FSMContext):\n async with state.proxy() as data:\n if message.text != back_message:\n data['question'] = message.text\n await message.answer(\"Введите номер телефона пожалуйста.\", reply_markup=send_phone)\n await SosState.next()\n else:\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.add(btnMenu, btnbar, btnTime)\n markup.add(btnBrn, btndlv, sos)\n await message.answer('Отменено!', reply_markup=markup)\n await state.finish()\n\n\n@dp.message_handler(IsUser(), state=SosState.phone_number)\nasync def process_question(message: Message, state: FSMContext):\n async with state.proxy() as data:\n data['phone_number'] = message.text\n\n await message.answer('Убедитесь, что все верно.', reply_markup=submit_markup())\n await SosState.next()\n\n\n@dp.message_handler(content_types=ContentType.CONTACT, state=SosState.phone_number)\nasync def process_question(message: Message, state: FSMContext):\n async with state.proxy() as data:\n data['phone_number'] = message.contact['phone_number']\n\n await message.answer('Убедитесь, что все верно.', reply_markup=submit_markup())\n await SosState.next()\n\n\n@dp.message_handler(lambda message: message.text not in [cancel_message, all_right_message], state=SosState.submit)\nasync def process_price_invalid(message: Message):\n await message.answer('Такого варианта не было.')\n\n\n@dp.message_handler(text=cancel_message, state=SosState.submit)\nasync def process_cancel(message: Message, state: FSMContext):\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.add(btnMenu, btnbar, btnTime)\n markup.add(btnBrn, btndlv, sos)\n await message.answer('Отменено!', reply_markup=markup)\n await state.finish()\n\n\n@dp.message_handler(text=all_right_message, state=SosState.submit)\nasync def process_submit(message: Message, state: FSMContext):\n cid = message.chat.id\n async with state.proxy() as data:\n db.query('INSERT INTO questions VALUES (?, ?)',\n (cid, data['question']))\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.add(btnMenu, btnbar, btnTime)\n markup.add(btnBrn, btndlv, sos)\n await message.answer('Отправлено!', reply_markup=markup)\n await bot.send_message(SUPPORT_CHAT, \"SOS\\n\"\n \"\\n\"\n f\"Вопрос: {data['question']}\\n\"\n f\"Номер телефона отправителя: {data['phone_number']}\")\n\n await state.finish()\n","repo_name":"Mekan777-alt/outpack","sub_path":"hendlers/user/sos.py","file_name":"sos.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5421623958","text":"import socket\nimport argparse\nfrom threading import Thread\n\nsocketList = []\n# Command format '#-H xxx.xxx.xxx.xxx -p xxxx -c '\n# Send command\ndef sendCmd(cmd):\n print(\"Send command......\")\n for sock in socketList:\n sock.send(cmd.encode('UTF-8'))\n\n# Wait connect\ndef waitConnect(s):\n while True:\n sock, addr = s.accept()\n if sock not in socketList:\n socketList.append(sock)\n\ndef main():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', 58868))\n s.listen(1024)\n t = Thread(target = waitConnect, args = (s, ))\n t.start()\n \n print('Wait at least a client connection!')\n while not len(socketList):\n pass\n print('It has been a client connection!')\n \n while True:\n print('=' * 50)\n print('The command format:\"#-H xxx.xxx.xxx.xxx -p xxx -c \"')\n \n # Wait for input command\n cmd_str = input(\"Please input cmd:\")\n if len(cmd_str):\n if cmd_str[0] == '#':\n sendCmd(cmd_str)\n\nif __name__ == '__main__':\n main()","repo_name":"satan1a/DDoS_Attacket_v0.1","sub_path":"refer/ddos_server_from_author.py","file_name":"ddos_server_from_author.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"22351601640","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTesting NiFi\n\"\"\"\nfrom nipyapi import canvas\nfrom ...labs import exception_context, retry_test\nfrom ...labs.utils import nifi\n\nQUEUED_MSG_THRESHOLD = 1\n\nnifi.set_environment()\n\n\ndef test_data_flowing():\n for pg in canvas.list_all_process_groups():\n if pg.status.name == 'NiFi Flow':\n continue\n assert pg.status.aggregate_snapshot.bytes_in > 0\n\n\ndef test_nifi_bulletins():\n bulletins = [b for b in canvas.get_bulletin_board().bulletin_board.bulletins if b.bulletin]\n with exception_context(bulletins):\n # Ignore bulletins from Default Atlas Reporting Task to avoid some transient authentication errors\n # Also ignore INFO bulletins.\n assert [] == \\\n ['Bulletin: Time: {}, Level: {}, Source: {}, Node: {}, Message: [{}]'.format(\n b.timestamp, b.bulletin.level if b.bulletin else 'UNKNOWN',\n b.bulletin.source_name if b.bulletin else b.source_id,\n b.node_address, b.bulletin.message if b.bulletin else 'UNKNOWN')\n for b in sorted(bulletins, key=lambda x: x.id)\n if not b.bulletin or (b.bulletin.source_name != 'Default Atlas Reporting Task'\n and b.bulletin.level != 'INFO')]\n\n\n@retry_test(max_retries=10, wait_time_secs=5)\ndef test_nifi_queues():\n assert [] == \\\n ['Found queue not empty: {} -> {}, Queued: {}'.format(\n conn.component.source.name, conn.component.destination.name,\n conn.status.aggregate_snapshot.queued)\n for conn in [x for x in canvas.list_all_connections()\n if int(x.status.aggregate_snapshot.queued_count.replace(',', '')) > QUEUED_MSG_THRESHOLD]]\n","repo_name":"asdaraujo/edge2ai-workshop","sub_path":"setup/terraform/resources/tests/base/test_nifi.py","file_name":"test_nifi.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"61"} +{"seq_id":"14859376047","text":"import tkinter as tk\r\nfrom chatbot_controller import ChatbotController\r\n\r\n# Crear la ventana de la interfaz de chatbot\r\nwindow = tk.Tk()\r\nwindow.title(\"Chatbot\")\r\nwindow.geometry(\"400x500\")\r\nwindow.configure(background=\"light pink\") # Establecer color de fondo rosa claro\r\n\r\n# Crear el cuadro de texto de salida del chatbot\r\nchatbot_output = tk.Text(window, height=20, bg=\"#FFFFE0\") # Establecer color de fondo crema\r\nchatbot_output.pack(pady=10)\r\n\r\n# Crear el cuadro de texto de entrada del usuario\r\nuser_input = tk.Text(window, height=2)\r\nuser_input.pack(pady=10)\r\n\r\n# Obtener la API key (debes reemplazar 'TU_API_KEY' con tu propia API key)\r\napi_key = ''\r\n\r\n# Crear una instancia del controlador del chatbot\r\nchatbot_controller = ChatbotController(api_key)\r\n\r\n# Configurar la fuente y el color del texto del bot\r\nchatbot_output.tag_configure(\"bot\", font=(\"Arial\", 12), foreground=\"purple\")\r\n\r\n# Función para obtener la respuesta del chatbot\r\ndef get_chatbot_response(event=None):\r\n # Obtener la pregunta del usuario\r\n question = user_input.get(\"1.0\", tk.END).strip()\r\n\r\n # Obtener la respuesta del chatbot utilizando el controlador\r\n response = chatbot_controller.generate_response(question)\r\n\r\n # Mostrar la respuesta en el cuadro de texto de salida\r\n chatbot_output.insert(tk.END, \"Bot: \" + response + \"\\n\", \"bot\")\r\n\r\n # Desplazarse automáticamente al último mensaje\r\n chatbot_output.see(tk.END)\r\n\r\n # Limpiar el cuadro de entrada\r\n user_input.delete(\"1.0\", tk.END)\r\n\r\n# Asociar la pulsación de la tecla \"Enter\" a la función get_chatbot_response()\r\nuser_input.bind(\"\", get_chatbot_response)\r\n\r\n# Crear el botón de enviar\r\nsend_button = tk.Button(window, text=\"Enviar\", command=get_chatbot_response)\r\nsend_button.pack(pady=10)\r\n\r\n# Ejecutar la ventana de la interfaz de chatbot\r\nwindow.mainloop()\r\n","repo_name":"LeoOrVyus/Chatbot-NAO-reto-5","sub_path":"vol2.0/interfaz.py","file_name":"interfaz.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1653816220","text":"from collections import defaultdict, namedtuple, deque\nfrom pathlib import Path \nimport random\nimport math\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.distributions.categorical import Categorical\n\nfrom plot import plot_queue, moving_average\n\n\ndef plot(ax, data):\n ax.set_title(\"Q-learning score history\")\n x = np.array(data) / 60 # Seconds\n ax.set_ylabel('Time [s]')\n ax.set_xlabel('Attempt number')\n ax.plot(x)\n ax.plot(moving_average(x, k=10))\n\ndef state_to_struct(state: list) -> dict:\n # This function must be defined based on the C++ GameState_DQN struct.\n walls = np.array(state[:12]).reshape(6, 2)\n n_slots = state[13:16].index(1) + 4\n cur_slot = next(i for i, x in enumerate(state[16:16+6]) if x > 0)\n return {\n \"walls\": walls,\n \"n_slots\": n_slots,\n \"cur_slot\": cur_slot,\n \"player_pos\": state[22] * n_slots,\n \"world_rotation\": state[23],\n }\n\ndef get_cur_wall_dist(state_struct: dict) -> tuple:\n dist, width = state_struct[\"walls\"][state_struct[\"cur_slot\"]]\n return dist, width\n\ndef get_cur_center_offset(state_struct: dict) -> float:\n # -1: left edge, 0: center, 1: right edge\n pos = state_struct[\"player_pos\"] * state_struct[\"n_slots\"] \n return (pos % 1.0) * 2.0 - 1.0\n\ndef latest_checkpoint(path):\n checkpoints = list(path.glob(\"checkpoint_*.pth\"))\n if len(checkpoints) > 0:\n n = max(map(int, (p.stem.rsplit('_')[1] for p in checkpoints)))\n return path / f\"checkpoint_{n}.pth\"\n if Path(path, \"checkpoint.pth\").exists():\n return Path(path, \"checkpoint.pth\")\n return None \n\n\nINPUT_SIZE = 6*2 + 1 + 3 + 6 + 1 + 1\nOUT_SIZE = 3\n\nclass SupaNet(nn.Module):\n def __init__(self, sizes=[32, 32]):\n super().__init__()\n\n sizes = [INPUT_SIZE] + sizes + [OUT_SIZE]\n self.net = nn.Sequential()\n for idx, (in_size, out_size) in enumerate(zip(sizes, sizes[1:])):\n self.net.add_module(str(2*idx), nn.Linear(in_size, out_size))\n if idx < len(sizes) - 2:\n self.net.add_module(str(2*idx + 1), nn.ReLU())\n\n def forward(self, state):\n return self.net(state)\n\n\nTransition = namedtuple('Transition',\n ('state', 'action', 'reward', 'next_state'))\n\nclass ReplayMemory:\n def __init__(self, capacity, sample_strategy=\"uniform\", filter_strategy=\"none\"):\n self.sample_strategy = sample_strategy\n self.filter_strategy = filter_strategy\n self.memory = deque([], maxlen=capacity)\n self.weights = [1] * capacity\n self.replay_counts = defaultdict(int)\n\n def push(self, *args):\n def update_count(item):\n self.replay_counts[item] += 1\n if len(self.memory) == self.memory.maxlen:\n to_be_popped = self.memory[0]\n self.replay_counts[to_be_popped] -= 1\n if self.replay_counts[to_be_popped] == 0:\n del self.replay_counts[to_be_popped]\n\n item = Transition(*args)\n if self.filter_strategy == \"none\":\n self.memory.append(item)\n elif self.filter_strategy == \"keepone\":\n if self.replay_counts[item] < 1:\n update_count(item)\n self.memory.append(item)\n else:\n raise ValueError(f\"Unknown filter strategy: {self.filter_strategy}\")\n return item\n\n def sample(self, batch_size):\n if self.sample_strategy == \"weighted_negative\":\n prev = self.memory[0]\n for i, item in enumerate(self.memory):\n if i == 0 or prev.reward < 0:\n self.weights[i] = 1\n else:\n self.weights[i] = self.weights[i - 1] + 1\n prev = item\n if len(self.memory) < self.memory.maxlen:\n weights = self.weights[:len(self.memory)]\n else:\n weights = self.weights\n return random.choices(self.memory, weights=weights, k=batch_size)\n elif self.sample_strategy == \"uniform\":\n return random.sample(self.memory, batch_size)\n raise ValueError(f\"Unknown sample strategy: {self.sample_strategy}\")\n\n def __len__(self):\n return len(self.memory)\n\n def __repr__(self) -> str:\n return repr(self.memory)\n\nclass SupaDQN:\n def __init__(self, experiment_name=\"exp30\"):\n self.experiment_name = experiment_name\n\n ### Hyperparams\n self.params = {\n \"nn_sizes\": [64, 64], # Number of neurons in hidden layers\n \"eps_start\": 0.95, # Exploration rate\n \"eps_end\": 0.001,\n \"eps_decay\": 100000,\n \"eps_restart\": False, # Restart exploration after this many steps\n \"eps_restart_steps\": 500_000,\n \"target_update\": 200, # Update target_net to policy_net every this many steps.\n \"gamma\": 0.99, # Reward decay rate !!!!!!!!!!!\n \"deterministic_actions\": True,\n \"memory_size\": 100000, # Approx. 10 minutes of gameplay\n \"memory_sample_strategy\": \"uniform\", # Memory sampling strategy\n \"memory_filter_strategy\": \"none\",\n \"batch_size\": 128, # Take this many samples from memory for each optimization batch\n \"batch_iterations\": 2, # How many training optimization iterations to make for each step\n \"reward_slot_center\": True, # Receive reward for being close to the center of a slot\n \"reward_slot_center_amount\": 0.1,\n \"reward_far_wall\": True, # Receive reward for being on a slot where the wall is far away\n \"reward_far_wall_amount\": 0.5,\n \"reward_interval\": 60, # Receive interval_reward reward after this many successful steps\n \"interval_reward\": 0,\n \"default_reward\": 0.1, # Receive reward each step\n \"loss_reward\": -1, # Reward when game over\n }\n for param, value in self.params.items():\n setattr(self, param, value)\n ###\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # Double Q-learning uses two networks.\n # target_net is a periodic copy of policy_net.\n self.policy_net = SupaNet(self.nn_sizes).to(self.device)\n self.target_net = SupaNet(self.nn_sizes).to(self.device)\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.policy_net.train()\n self.target_net.eval()\n\n self.optimizer = torch.optim.Adam(self.policy_net.parameters())\n self.criterion = torch.nn.SmoothL1Loss()\n\n self.state = None\n self.action = 0\n self.reward = 0\n self.memory = ReplayMemory(self.memory_size, \n sample_strategy=self.memory_sample_strategy, filter_strategy=self.memory_filter_strategy)\n self.steps_taken = 0\n self.frame_number = 0 # Frame number in episode\n self.reward_in_episode = []\n\n self.is_learning = False \n self.score_history = []\n\n # The model works with indices [0, 3), but the server expects [-1,0,1].\n self.actions_tr = [-1, 0, 1] # Map action index to action\n self.actions_tr_inv = { v: i for i, v in enumerate(self.actions_tr) }\n\n self.tb_writer = SummaryWriter(f\"runs/{self.experiment_name}\") # For tensorboard\n self.checkpoint_enabled = True \n self.checkpoint_update_interval = 20_000 # Steps\n self.checkpoint_path = Path(f'./checkpoints/{self.experiment_name}')\n if self.checkpoint_enabled:\n self.load_checkpoint()\n self.tb_writer.add_text(\"params\", str(self.params), self.steps_taken)\n\n def reset(self):\n self.state = None\n self.action = 0\n self.reward = 0\n self.frame_number = 0\n self.reward_in_episode = []\n\n def exploration_rate(self, t):\n if self.eps_restart:\n t = t % self.eps_restart_steps\n return self.eps_end + (self.eps_start - self.eps_end) * math.exp(-t / self.eps_decay)\n\n def pick_action(self, state):\n state = torch.tensor(state).to(self.device)\n if self.is_learning:\n if torch.rand(1) < self.exploration_rate(self.steps_taken):\n return random.randrange(0, 3)\n with torch.no_grad():\n logits = self.policy_net(state)\n if self.is_learning and not self.deterministic_actions:\n m = Categorical(logits=logits)\n return m.sample().item()\n else:\n return logits.argmax().item()\n\n def optimize(self):\n if len(self.memory) < self.batch_size:\n return \n\n for it in range(self.batch_iterations):\n batch = self.memory.sample(self.batch_size)\n states = torch.tensor([b.state for b in batch]).to(self.device)\n actions = torch.tensor([b.action for b in batch]).view(-1, 1).to(self.device)\n rewards = torch.tensor([b.reward for b in batch]).view(-1, 1).to(self.device)\n non_final_mask = torch.tensor([b.next_state is not None for b in batch]).to(self.device)\n non_final_next_states = torch.tensor([b.next_state for b in batch if b.next_state is not None]).to(self.device)\n\n # Pick the Q values of the selected actions for each state\n model_Qs = self.policy_net(states).gather(1, actions)\n # V(s) = 0 if s is final; get best V(s)s\n next_state_Vs = torch.zeros(self.batch_size, 1).to(self.device)\n with torch.no_grad():\n # detach because this happens on the 'target' net, not the online net\n next_state_Vs[non_final_mask] = self.target_net(non_final_next_states).detach().max(1)[0].view(-1, 1)\n target_Qs = (next_state_Vs * self.gamma) + rewards \n\n loss = self.criterion(model_Qs, target_Qs)\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 10)\n self.optimizer.step()\n\n # self.tb_writer.add_scalar(\"Loss\", loss, self.steps_taken)\n\n def step(self, next_state, reward, done=False):\n self.steps_taken += 1\n if self.steps_taken % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if self.state is None: # First frame\n self.state = next_state\n self.action = self.actions_tr_inv[0]\n return self.action\n\n # Given the reward for the previous action and the new state.\n # When done=True, next_state is None\n # Store (s,a,r,s') into replay memory.\n # N.B. all values are Python types (not tensors).\n self.memory.push(tuple(self.state), self.action, reward, tuple(next_state) if next_state is not None else next_state)\n\n self.optimize()\n\n self.state = next_state \n if not done:\n self.action = self.pick_action(next_state)\n else:\n self.action = self.actions_tr_inv[0]\n return self.action\n\n def on_episode_end(self, score=None):\n action = self.actions_tr_inv[0]\n if self.is_learning:\n self.score_history.append(score)\n reward = self.loss_reward\n action = self.step(None, reward, done=True)\n plot_queue.put((plot, self.score_history))\n\n self.tb_writer.add_scalar(\"Reward/Mean reward per episode\", np.mean(self.reward_in_episode), len(self.score_history))\n self.tb_writer.add_scalar(\"Reward/Total reward per episode\", np.sum(self.reward_in_episode), len(self.score_history))\n self.tb_writer.add_scalar(\"Exploration rate\", self.exploration_rate(self.steps_taken), self.steps_taken)\n self.tb_writer.add_scalar(\"Memory size\", len(self.memory.memory), self.steps_taken)\n self.tb_writer.add_scalar(\"Score/Score vs step\", score, self.steps_taken)\n self.tb_writer.add_scalar(\"Score/Score vs episode\", score, len(self.score_history))\n self.tb_writer.flush()\n \n self.reset()\n return self.actions_tr[action]\n\n def get_action(self, state):\n # plot_queue.put((plot_state, state))\n self.frame_number += 1\n\n if self.is_learning:\n reward = self.default_reward\n # Reward shaping\n if self.frame_number % self.reward_interval == 0:\n reward += self.interval_reward\n state_struct = state_to_struct(state)\n if self.reward_slot_center:\n center_offset = get_cur_center_offset(state_struct)\n reward += self.reward_slot_center_amount * pow(1 - abs(center_offset), 4)\n if self.reward_far_wall:\n wall_dist, wall_width = get_cur_wall_dist(state_struct)\n reward += self.reward_far_wall_amount * wall_dist\n\n action = self.step(state, float(reward), done=False)\n\n self.reward_in_episode.append(reward)\n\n if self.checkpoint_enabled and self.steps_taken % self.checkpoint_update_interval == 0:\n self.save_checkpoint()\n else:\n action = self.pick_action(state)\n return self.actions_tr[action]\n\n def set_is_learning(self, is_learning):\n self.is_learning = is_learning\n self.reset()\n\n def save_checkpoint(self):\n # print([x[1] for x in sorted(self.memory.replay_counts.items(), key=lambda x: x[1], reverse=True)[:100]])\n\n self.checkpoint_path.mkdir(parents=True, exist_ok=True)\n path = self.checkpoint_path / f\"checkpoint_{self.steps_taken}.pth\"\n state = {\n 'memory': self.memory,\n 'score_history': self.score_history,\n 'steps_taken': self.steps_taken,\n 'policy_net': self.policy_net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'params': self.params,\n # scheduler\n }\n torch.save(state, path)\n print(path)\n\n fig, ax = plt.subplots()\n plot(ax, self.score_history)\n fig.savefig(path.with_suffix(\".png\"))\n plt.close(fig)\n\n def load_checkpoint(self, path=None):\n if path is None:\n # Resume latest checkpoint\n path = latest_checkpoint(self.checkpoint_path)\n if path is None:\n return False\n\n print(path)\n state = torch.load(path)\n\n self.policy_net.load_state_dict(state['policy_net'])\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.optimizer.load_state_dict(state['optimizer'])\n\n self.memory = state['memory']\n self.score_history = state['score_history']\n self.steps_taken = state['steps_taken']\n\n self.params = state['params']\n for param, value in self.params.items():\n setattr(self, param, value)\n\n plot_queue.put((plot, self.score_history))\n return True\n","repo_name":"mare5x/SuperHaxagon","sub_path":"py/qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":15008,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16046316899","text":"from ml import *\r\nfrom itp2itc import *\r\nimport subprocess\r\n\r\nITPCNV_PATH = os.path.join(os.path.dirname(__file__), r'tools\\itpcnv.exe')\r\nPNGQUANT_PATH = os.path.join(os.path.dirname(__file__), r'tools\\pngquant.exe')\r\nNCONVERT_PATH = os.path.join(os.path.dirname(__file__), r'tools\\nconvert.exe')\r\n\r\nPNG_TO_ITP = 0\r\nPNG_TO_ITC = 1\r\n\r\ndef detectInputType(name):\r\n parts = name.rsplit('_', 1)\r\n if len(parts) == 1:\r\n return PNG_TO_ITP, None\r\n\r\n prefix, seq = parts\r\n seq = os.path.splitext(seq)[0]\r\n if not seq.isdigit():\r\n return PNG_TO_ITP, None\r\n\r\n return PNG_TO_ITC, (prefix, seq)\r\n\r\ndef replaceExt(input, ext):\r\n return os.path.splitext(input)[0] + ext\r\n\r\ndef call(*args):\r\n p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\r\n p.wait()\r\n return p\r\n\r\ndef png2itp(input, output):\r\n png8 = input + '.8bit.png'\r\n tmppng8 = png8 + '.tmp'\r\n\r\n for f in [png8, tmppng8]:\r\n try:\r\n os.remove(f)\r\n except FileNotFoundError:\r\n pass\r\n\r\n p = call(PNGQUANT_PATH, '--force', '256', input, '--output', tmppng8)\r\n if p.returncode != 0:\r\n print(p.stderr.read().decode('mbcs').strip())\r\n raise Exception('convert %s to png8 failed' % input)\r\n\r\n p = call(NCONVERT_PATH, '-quiet', '-o', png8, tmppng8)\r\n os.remove(tmppng8)\r\n if p.returncode != 0:\r\n print(p.stderr.read())\r\n raise Exception('nconvert %s failed' % input)\r\n\r\n for mode in ['-v6', '-v4', '-v2', '-v0']:\r\n p = call(ITPCNV_PATH, mode, png8, output)\r\n if p.returncode == 0 and os.path.exists(output):\r\n break\r\n\r\n print(p.stdout.read().decode('mbcs'))\r\n p.returncode = -1\r\n\r\n os.remove(png8)\r\n if p.returncode != 0:\r\n raise Exception('convert %s to itp failed with ret code = %X' % (input, p.returncode))\r\n\r\ndef png2itc(prefix, namelen, output):\r\n format = '%0{len}d'.format(len = namelen)\r\n print('%s_%s.itp => %s ...' % (prefix, format, output))\r\n\r\n index = 0\r\n itc = fileio.FileStream(output, 'wb')\r\n itc.Position = SIZE_OF_MAGIC + ITC_ENTRY_COUNT * ITC_ENTRY_SIZE + SIZE_OF_SCALE_INFO\r\n\r\n entries = []\r\n offset = 0\r\n\r\n while True:\r\n name = (format % index) + '.png'\r\n name = prefix + '_' + name\r\n if not os.path.exists(name):\r\n break\r\n\r\n print('\\r%s' % name, end = '')\r\n\r\n tmpitp = name + '.tmp.itp'\r\n index += 1\r\n\r\n png2itp(name, tmpitp)\r\n itp = fileio.FileStream(tmpitp, 'rb')\r\n\r\n entry = ItcEntry(itc.Position, itp.Length)\r\n entries.append(entry)\r\n\r\n itc.Write(itp.Read())\r\n itp.Close()\r\n\r\n os.remove(tmpitp)\r\n\r\n itc.Position = 0\r\n itc.Write(b'V101')\r\n for entry in entries:\r\n itc.WriteULong(entry.offset)\r\n itc.WriteULong(entry.size)\r\n\r\n itc.Position = SIZE_OF_MAGIC + ITC_ENTRY_COUNT * ITC_ENTRY_SIZE\r\n\r\n for info in ScaleInfo:\r\n itc.WriteULong(info)\r\n\r\n print()\r\n\r\ndef handleFile(name):\r\n type, extra = detectInputType(name)\r\n\r\n if type == PNG_TO_ITP:\r\n output = replaceExt(name, '.itp')\r\n print('%s -> %s ...' % (name, output))\r\n png2itp(name, output)\r\n\r\n elif type == PNG_TO_ITC:\r\n prefix, seq = extra\r\n png2itc(prefix, len(seq), prefix + '.itc')\r\n\r\ndef main():\r\n for name in sys.argv[1:]:\r\n handleFile(os.path.abspath(name))\r\n\r\n console.pause('done')\r\n\r\nif __name__ == '__main__':\r\n Try(main)\r\n","repo_name":"Ouroboros/Falcom","sub_path":"ED7/Decompiler/png32toitx.py","file_name":"png32toitx.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"23498803311","text":"\nsmall_data_set = 10\nlarge_data_set = 100\n\nswap_count = 0\n\n\ndef main():\n t = int(input())\n if not 1 <= t or not t <= 100:\n print('1 <= T <= 100')\n return\n\n for i in range(1, t + 1):\n global swap_count\n swap_count = 0\n pancakes = list(input())\n\n for index in range(len(pancakes)):\n pancake = pancakes[index]\n\n # case 1: -+\n if pancake == '-':\n try:\n next_pancake = pancakes[index+1]\n if next_pancake == '+':\n pancakes = recursive_swap(pancakes, index)\n except:\n pancakes = recursive_swap(pancakes, index)\n\n # case 2: +-\n elif pancake == '+':\n try:\n next_pancake = pancakes[index+1]\n if next_pancake == '-':\n pancakes = recursive_swap(pancakes, index)\n except:\n pass\n\n # check pancakes\n if '-' not in pancakes:\n print('Case #{}: {}'.format(i, swap_count))\n break\n\n\ndef recursive_swap(pancakes, end_index):\n global swap_count\n for index in range(end_index+1):\n pancake = pancakes[index]\n pancakes[index] = '+' if pancake == '-' else '-'\n swap_count += 1\n return pancakes\n\n\nmain()\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/3684.py","file_name":"3684.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27510061243","text":"\"\"\" \nWrite a function, prereqs_possible, that takes in a number of courses (n) \nand prerequisites as arguments. Courses have ids ranging from 0 through \nn - 1. A single prerequisite of (A, B) means that course A must be taken \nbefore course B. The function should return a boolean indicating whether \nor not it is possible to complete all courses.\n\nIf you visualize this problem given below, you'll find that there is a cycle \ndetected. For courses which have a cylce, it is impossible to find minimum \nprerequisites. \n\nd -- > b --> a --> c \n^ ^\n| / |\ne <---------------- \n\nThere is a cycle in e, b, a, c, and e nodes. So the answer is False \n\"\"\"\n\nnumCourses = 6\nprereqs = [\n (0, 1),\n (2, 3),\n (0, 2),\n (1, 3),\n (4, 5),\n] \n\ndef prereqs_possible(numCourses, prereqs):\n\n # Helper function to build an adjacency list for the information provided\n graph = build_graph(numCourses, prereqs)\n # Variables to mark nodes as gray and black\n # In this code, nodes are not literally marked white/grey/black. But we keep \n # track of them in visiting (in the process of being explored) or visited (done exploring).\n visiting = set() \n visited = set() \n # Iterating over every node in the adjacency matrix\n for node in graph:\n # If the DFS returns true, then we have detected a cycle and we must return False \n if detect_cycle(graph, node, visiting, visited) == True:\n return False \n # Base case, return True if no cycle is detected\n return True \n\n# DFS implementation\ndef detect_cycle(graph, node, visiting, visited):\n # if node is in visited set, then it has been previously explored. \n # No need to explore any further\n if node in visited:\n return False \n # Base case, node exists in visiting, cycle has been detected\n if node in visiting:\n return True \n # If not, add it to the visiting\n visiting.add(node)\n\n # Exploring all the neighbors of the node\n for neighbor in graph[node]:\n # If the above code return true, we have detected a cycle\n if detect_cycle(graph, neighbor, visiting, visited) == True:\n return True \n # Otherwise remove the node from visitng and add it to visited \n visiting.remove(node)\n visited.add(node)\n\n # return False by defaul if above conditions don't apply. \n return False \n\n# Code to generate an adjacency matrix\nimport collections \ndef build_graph(numCourses, prereqs):\n\n graph = collections.defaultdict(list) \n\n for i in range(numCourses):\n graph[i] = []\n\n for a,b in prereqs:\n graph[a].append(b)\n\n return graph\n# Drive code\nprint(prereqs_possible(numCourses, prereqs))\n","repo_name":"monika0603/glowing-spork","sub_path":"graphs/prereqs_possible.py","file_name":"prereqs_possible.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23406770341","text":"#!/usr/bin/python\n\nimport sys\nimport functools\nimport operator\nimport bisect\n\ndef isC(c):\n return c not in \"aeiou\"\n\ndef findNext(s, i, n):\n a = 0\n l = len(s)\n start = -1\n current = i\n while (a < n and current < l):\n if isC(s[current]):\n if start == -1:\n start = current\n a += 1\n else:\n start = -1\n a = 0\n \n current += 1\n\n if a < n:\n return -1\n else:\n return start\n\ndef solve(s, n):\n num = 0\n a = -1\n i = findNext(s,0,n)\n\n while (i != -1):\n r = i - (a+1)\n q = len(s) - (i + n)\n num += 1 + r + q + r*q\n\n a = i\n i = findNext(s, a+1, n)\n\n return num\n\n \n \n \n\ndef main():\n N = int(sys.stdin.readline()) # number of testcases\n for i in range(N):\n [s, n] = sys.stdin.readline().rstrip().split()\n n = int(n)\n result = solve(s, n)\n print (\"Case #%s: %s\" % (i+1, result))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_126/496.py","file_name":"496.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4160605758","text":"from matplotlib.pyplot import legend, title\nimport plotly.graph_objects as go\nfrom plotly import offline\n\nmen = [37.9,46.5,37.6,25.6]\nwomen = [35.4,43.3,37.8,22.7]\ntotal=[36.6,44.9,37.7,24.1]\nage_range=[\"Over 20\",\"20-39\",\"40-59\",\"Over 60\"]\n\n\ncolors=['purple', 'green', 'black', 'yellow']\n\nfig = go.Figure()\nfig.add_trace(go.Bar(\n x =age_range,\n y=men,\n name = \"Men\",\n marker_color=\"yellow\"\n))\nfig.add_trace(go.Bar(\n x=age_range,\n y=women,\n name=\"Women\",\n marker_color = \"purple\"\n))\n\nlayout=fig.update_layout(\n title=\"Amount of humans who eat fast food eaten every day\",\n title_font_color=\"darkblue\",\n title_font_size=40,\n xaxis_tickfont_size=14,\n yaxis= dict(\n title='Age Range',\n titlefont_size=16,\n tickfont_size=14,\n ),\n legend =dict(\n x=0,\n y=1.0,\n bgcolor='black',\n bordercolor='gray'\n ),\n barmode='group',\n bargap=0.25,\n bargroupgap=0.1\n)\n\n#fig.show()\noffline.plot({'data': fig, 'layout':layout}, filename='bar.html')\n# Pie Chart Example:\n\n# labels = 'Freshwater Fish', 'Cats', 'Dogs', 'Birds'\n# numPets = [142, 88.3, 74.8, 16]\n# colors=['lightgreen','lightblue','lavender','lemonchiffon']\n# fig = go.Figure(data=[go.Pie(labels=labels, values=numPets)])\n\n","repo_name":"RiggityRussell/CIT228","sub_path":"Chapter15/plotly_chart.py","file_name":"plotly_chart.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23468290641","text":"inputFile = open('A-large.in', 'r')\ninString = inputFile.read()\ninputLines = inString.split('\\n')\ninputLines.pop(0)\noutput = ''\ncase = 1\ni = 0\nfor line in inputLines:\n\tif i == 1:\n\t\ti -= 1\n\t\tmushroomIncrements = line.split(' ')\n\t\tmushroomIncrements = map(int, mushroomIncrements)\n\t\tmethodOneMin = 0\n\t\tmethodTwoMin = 0\n\t\tmethodTwoMaxEat = 0\n\t\tdifferences = []\n\t\tfor j in range(len(mushroomIncrements) - 1):\n\t\t\tdifference = mushroomIncrements[j] - mushroomIncrements[j + 1]\n\t\t\tdifferences.append(difference)\n\t\t\tif mushroomIncrements[j] > mushroomIncrements[j + 1]:\n\t\t\t\tmethodOneMin += difference\n\t\t\tif difference > methodTwoMaxEat:\n\t\t\t\tmethodTwoMaxEat = difference\n\t\tif methodTwoMaxEat == -1:\n\t\t\tmethodTwoMaxEat = 0\n\t\tj = 0 \n\t\tfor difference in differences:\n\t\t\tmethodTwoMin += min(mushroomIncrements[j], methodTwoMaxEat)\n\t\t\tj += 1\n\t\toutput += ('Case #' + str(case) + ': ' + str(methodOneMin) + ' ' + str(methodTwoMin) + '\\n')\t\n\t\tcase += 1\n\telse:\n\t\ti += 1\n\t\noutputFile = open('out.out', 'w')\noutputFile.write(output)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_159/632.py","file_name":"632.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36503218139","text":"bl_info = {\n \"name\" : \"Prepare Unity Binary\",\n \"description\" : \"Prepares file for unity binary-fbx\",\n \"author\" : \"Steffenvy\",\n \"blender\" : (2, 80, 0),\n \"category\" : \"Custom\"\n }\n\nimport bpy\n\n\ndef rename(name):\n return name.replace(\" \", \"_\").replace(\".\", \"_\")\n \n \nclass PrepareUnityBinary(bpy.types.Operator):\n \"\"\"Tooltip\"\"\"\n bl_idname = \"custom.prepare_unity_binary\"\n bl_label = \"Prepare Unity Binary\"\n\n def execute(self, context):\n \n #Target Converter\n try:\n bpy.ops.target_converter.upgrade()\n except:\n pass\n \n for object in context.blend_data.objects:\n \n #Object\n object.name = rename(object.name)\n \n #Bones\n if object.type == \"ARMATURE\":\n bones = object.data.bones\n for b in bones:\n b.name = rename(b.name)\n\n #Curve Converter\n if \"names\" in object.keys():\n object[\"names\"] = rename(object[\"names\"])\n \n return {'FINISHED'}\n\n\ndef register() :\n bpy.utils.register_class(PrepareUnityBinary)\n \ndef unregister():\n bpy.utils.unregister_class(PrepareUnityBinary)\n \n \nif __name__ == \"__main__\":\n register()\n ","repo_name":"Steffenvy/Misc-Blender-Addons","sub_path":"blender_unity_fixer.py","file_name":"blender_unity_fixer.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35368374385","text":"from __future__ import annotations\n\nfrom contextlib import nullcontext\n\nimport pytest\nfrom packaging.version import Version\nfrom pygls.workspace import Workspace\n\nfrom ruff_lsp.server import (\n VERSION_REQUIREMENT_FORMATTER,\n Document,\n _fixed_source_to_edits,\n _run_format_on_document,\n)\nfrom tests.client import utils\n\noriginal = \"\"\"\nx = 1\n\"\"\"\n\nexpected = \"\"\"x = 1\n\"\"\"\n\n\n@pytest.mark.asyncio\nasync def test_format(tmp_path, ruff_version: Version):\n test_file = tmp_path.joinpath(\"main.py\")\n test_file.write_text(original)\n uri = utils.as_uri(str(test_file))\n\n workspace = Workspace(str(tmp_path))\n document = Document.from_text_document(workspace.get_text_document(uri))\n\n handle_unsupported = (\n pytest.raises(RuntimeError, match=f\"Ruff .* required, but found {ruff_version}\")\n if not VERSION_REQUIREMENT_FORMATTER.contains(ruff_version)\n else nullcontext()\n )\n\n with handle_unsupported:\n result = await _run_format_on_document(document)\n assert result is not None\n assert result.exit_code == 0\n [edit] = _fixed_source_to_edits(\n original_source=document.source, fixed_source=result.stdout.decode(\"utf-8\")\n )\n assert edit.new_text == expected\n\n\n@pytest.mark.asyncio\nasync def test_format_code_with_syntax_error(tmp_path, ruff_version: Version):\n source = \"\"\"\nfoo =\n\"\"\"\n\n test_file = tmp_path.joinpath(\"main.py\")\n test_file.write_text(source)\n uri = utils.as_uri(str(test_file))\n\n workspace = Workspace(str(tmp_path))\n document = Document.from_text_document(workspace.get_text_document(uri))\n\n handle_unsupported = (\n pytest.raises(RuntimeError, match=f\"Ruff .* required, but found {ruff_version}\")\n if not VERSION_REQUIREMENT_FORMATTER.contains(ruff_version)\n else nullcontext()\n )\n\n with handle_unsupported:\n result = await _run_format_on_document(document)\n assert result is not None\n assert result.exit_code == 2\n","repo_name":"charliermarsh/ruff-lsp","sub_path":"tests/test_format.py","file_name":"test_format.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":421,"dataset":"github-code","pt":"61"} +{"seq_id":"3688120778","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport ezodf\nfrom ezodf.text import Paragraph, Heading, Span, ListItem, List, CN\n\nfrom svd_util import optz\noptz.text( 'template')\noptz,args = optz.get()\n\ninp = args[0]\n\nd = ezodf.opendoc( inp)\n\nodt = ezodf.newdoc( doctype='odt',\n filename= inp.replace('.odt','.flat.odt'),\n template= optz.template or inp)\nbody = odt.body\n#pic = body.find( CN(\"draw:frame\"))\n\nbody.clear()\n#if pic: body.append( pic)\n\nif 0:\n img = CN('draw:image')\n fra = CN('draw:frame')\n tbx = CN(\"draw:text-box\")\n\n once = 0\n def delimg( a):\n global once\n for i in a:\n if i.xmlnode.tag == img:\n if once:\n print( 66666, i.xmlnode)\n a.remove( i)\n once +=1\n else:\n delimg( i)\n\ndef walk( a, lvl =0):\n #print(11111111111111, a, a.xmlnode, a.plaintext())\n if lvl<2 and a.__class__ is ezodf.base.GenericWrapper:\n for i in a:\n walk( i, lvl= lvl+1)\n else:\n body.append( a)\n\nfor a in d.body:\n #delimg(a)\n walk(a)\nodt.save()\n\n# vim:ts=4:sw=4:expandtab\n","repo_name":"svilendobrev/svd_bin","sub_path":"convert/openoffice/odt2flat.py","file_name":"odt2flat.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"432951649","text":"import math\nfrom engine import Vector\nimport cairo\nimport time\n\n\nclass Projectile(object):\n def __init__(self, heading, sourcebot):\n self.heading = heading % 360\n self.sourcebot = sourcebot\n\n self.position = [self.sourcebot.position[0], self.sourcebot.position[1]]\n self.detonated = False\n\n self.radius = 2\n self.speed = 10\n self.velocity = self.calc_velocity()\n self.damage = 10\n\n self.init_position()\n\n def init_position(self):\n radians = math.radians(self.heading)\n dx = math.cos(radians) * self.sourcebot.body.radius+self.radius + 1\n dy = math.sin(radians) * self.sourcebot.body.radius+self.radius + 1\n self.position[0] += dx\n self.position[1] += dy\n\n\n def calc_velocity(self):\n radians = math.radians(self.heading)\n dx = math.cos(radians) * self.speed\n dy = math.sin(radians) * self.speed\n return Vector(dx, dy)\n\n def update(self):\n self.position[0] += self.velocity[0]\n self.position[1] += self.velocity[1]\n\n def draw(self, ctx):\n ctx.set_operator(cairo.OPERATOR_ADD)\n ctx.set_line_width(1)\n ctx.set_source_rgb(*self.sourcebot.color)\n ctx.arc(self.position[0], self.position[1], self.radius, 0, math.pi*2)\n ctx.fill()\n\n","repo_name":"fostrb/robotfight","sub_path":"projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30240812059","text":"from rest_framework import serializers\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\nfrom smartforests.models import User\n\n\nclass UserField(serializers.Field):\n def to_representation(self, value):\n return UserSerializer(value)\n\n def to_internal_value(self, data):\n if isinstance(data, User):\n return data\n elif isinstance(data, int):\n return User.objects.get(pk=data)\n elif isinstance(data, str):\n return User.objects.get(username=data)\n else:\n raise serializers.ValidationError(\"Invalid user\")\n\n\nclass UserSerializer(serializers.Serializer):\n class Meta:\n model = User\n fields = '__all__'\n\n def to_representation(self, instance):\n return {\n 'id': instance.id,\n 'username': instance.username,\n 'first_name': instance.first_name,\n 'last_name': instance.last_name,\n }\n\n\nclass PageCoordinatesSerializer(GeoFeatureModelSerializer):\n class Meta:\n model = 'logbooks.GeocodedMixin'\n geo_field = \"coordinates\"\n fields = '__all__'\n","repo_name":"planetarypraxis/smartforests","sub_path":"logbooks/models/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6875649727","text":"from django import forms\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext as _\nfrom nested_inline.admin import NestedStackedInline\n\nfrom ESSArch_Core.storage.models import (\n STORAGE_TARGET_STATUS_ENABLED,\n TAPE,\n Robot,\n StorageMedium,\n StorageMethod,\n StorageMethodTargetRelation,\n StorageObject,\n StorageTarget,\n TapeDrive,\n)\n\n\nclass StorageTargetInline(NestedStackedInline):\n \"\"\"\n StorageTarget configuration\n \"\"\"\n model = StorageMethodTargetRelation\n fk_name = 'storage_method'\n extra = 0\n fields = (\n 'name',\n 'status',\n 'storage_target',\n )\n verbose_name = 'target relation'\n verbose_name_plural = ''\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj, **kwargs)\n form = formset.form\n form.base_fields['storage_target'].widget.can_add_related = False\n form.base_fields['storage_target'].widget.can_change_related = False\n return formset\n\n\nclass StorageMethodTargetRelationInlineFormSet(forms.models.BaseInlineFormSet):\n def clean(self):\n enabled = False\n for form in self.forms:\n if form.cleaned_data.get('status') == STORAGE_TARGET_STATUS_ENABLED:\n if enabled:\n raise forms.ValidationError(\n _('Only 1 target can be enabled for a storage method at a time'),\n code='invalid',\n )\n\n enabled = True\n\n\nclass StorageMethodTargetRelationInline(admin.TabularInline):\n \"\"\"\n StorageMethodTargetRelation configuration\n \"\"\"\n model = StorageMethod.targets.through\n formset = StorageMethodTargetRelationInlineFormSet\n extra = 0\n\n\nclass StorageTargetsAdmin(admin.ModelAdmin):\n \"\"\"\n StorageTargets configuration\n \"\"\"\n list_display = ('name', 'target')\n sortable_field_name = \"name\"\n fieldsets = (\n (None, {\n 'fields': (\n 'name',\n 'status',\n 'type',\n 'default_block_size',\n 'default_format',\n 'min_chunk_size',\n 'min_capacity_warning',\n 'max_capacity',\n 'remote_server',\n 'master_server',\n 'target',\n )\n }),\n )\n\n\nclass StorageMediumAdmin(admin.ModelAdmin):\n exclude = (\n 'create_date',\n 'last_changed_local',\n 'last_changed_external',\n 'num_of_mounts',\n 'used_capacity',\n )\n\n\nclass StorageMethodAdminForm(forms.ModelForm):\n def clean(self):\n cleaned_data = super().clean()\n storage_type = cleaned_data.get('type')\n containers = cleaned_data.get('containers')\n\n if storage_type == TAPE and not containers:\n raise forms.ValidationError('Tapes can only be used for containers')\n\n\nclass StorageMethodAdmin(admin.ModelAdmin):\n form = StorageMethodAdminForm\n list_display = ('name', 'enabled', 'type', 'containers',)\n exclude = ('storage_policies',)\n inlines = [StorageMethodTargetRelationInline]\n\n\nclass StorageObjectAdmin(admin.ModelAdmin):\n list_display = ('ip', 'storage_medium',)\n exclude = ('last_changed_local', 'last_changed_external',)\n\n\nclass TapeDriveAdmin(admin.ModelAdmin):\n exclude = ('last_change', 'num_of_mounts',)\n\n\nadmin.site.register(Robot)\nadmin.site.register(TapeDrive, TapeDriveAdmin)\n\nadmin.site.register(StorageMedium, StorageMediumAdmin)\nadmin.site.register(StorageMethod, StorageMethodAdmin)\nadmin.site.register(StorageObject, StorageObjectAdmin)\nadmin.site.register(StorageTarget, StorageTargetsAdmin)\n","repo_name":"OskarPersson/ESSArch","sub_path":"ESSArch_Core/storage/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"46928420740","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n#Color to check for\nwish = input(\"Which color would you like to detect?\")\n\nwhile True:\n _, frame = cap.read()\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Red color\n low_red = np.array([161, 155, 84])\n high_red = np.array([179, 255, 255])\n red_mask = cv2.inRange(hsv_frame, low_red, high_red)\n red = cv2.bitwise_and(frame, frame, mask=red_mask)\n\n # Blue color\n low_blue = np.array([94, 80, 2])\n high_blue = np.array([126, 255, 255])\n blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)\n blue = cv2.bitwise_and(frame, frame, mask=blue_mask)\n\n # Yellow color\n low_yellow = np.array([25, 52, 72])\n high_yellow = np.array([102, 255, 255])\n yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow)\n yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask)\n\n # Every color except white\n low = np.array([0, 42, 0])\n high = np.array([179, 255, 255])\n mask = cv2.inRange(hsv_frame, low, high)\n no_white = cv2.bitwise_and(frame, frame, mask=mask)\n\n #For Red\n if wish=='red':\n cv2.imshow(\"Red\", red)\n \n #For Blue\n elif wish=='blue':\n cv2.imshow('Blue', blue)\n\n #For Yellow\n elif wish=='yellow':\n cv2.imshow(\"Yellow\", yellow)\n\n #For all except white\n elif wish=='no white':\n cv2.imshow(\"No White\", no_white)\n\n #For everything\n elif wish=='everything':\n cv2.imshow(\"Frame\", frame)\n cv2.imshow(\"Red\", red)\n cv2.imshow(\"Blue\", blue)\n cv2.imshow(\"Yellow\", yellow)\n cv2.imshow(\"Result\", no_white)\n\n key = cv2.waitKey(1)\n if key == 5:\n break\n","repo_name":"YashKarthik/detect_color","sub_path":"mask.py","file_name":"mask.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71220701635","text":"# 6. В программе генерируется случайное целое число от 0 до 100. Пользователь должен его отгадать не более\n# чем за 10 попыток. После каждой неудачной попытки должно сообщаться больше или меньше введенное\n# пользователем число, чем то, что загадано. Если за 10 п��пыток число не отгадано, то вывести загаданное\n# число.\n\n\nimport random\n\nn = random.randint(0, 100)\nprint('Попробуйте угадать загаданное число от 0 до 100. У вас есть 10 попыток')\n\nfor i in range(10):\n a = int(input(f'Попытка №{i + 1}. Введите число: '))\n if a < n and i < 9:\n print('Загаданное число больше')\n elif a > n and i < 9:\n print('Загаданное число меньше')\n elif a == n:\n print('Вы угадали!')\n break\n else:\n print('Вы не угадали')\n\nprint(f'Загаданное число - {n}')\n","repo_name":"Shorh/GB_Python_Algorithms","sub_path":"lesson2/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72443646274","text":"#!/usr/bin/env python3\n# coding: utf8\n\n\"\"\"\n@Author: yabin\n@Date: 2017.5.21\n\n“Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target.\nReturn the sum of the three integers. You may assume that each input would have exactly one solution.\n\nFor example, given array S = {-1 2 1 -4}, and target = 1.\nThe sum that is closest to the target is 2. (-1 + 2 + 1 = 2).”\n\n摘录来自: yuanbin. “Data Structure and Algorithm notes”。 iBooks.\n\"\"\"\n\n\ndef solution(s, k):\n s.sort()\n closet = 0\n for i in range(len(s)):\n r = two_sum_coloset(s[i + 1:], k)\n temp = r + s[i]\n # print('temp ', r, s[i])\n if abs(temp - k) < abs(closet - k):\n closet = temp\n return closet\n\n\ndef two_sum_coloset(s, k):\n if len(s) < 2:\n return 0\n\n left, right = 0, len(s) - 1\n closet = s[left] + s[right]\n\n while(left < right):\n if closet - k > 0:\n right -= 1\n elif closet == k:\n return closet\n else:\n left += 1\n if left < right and \\\n abs(s[left] + s[right] - k) < abs(closet - k):\n # print(s[left], s[right])\n closet = s[left] + s[right]\n return closet\n\n\nif __name__ == '__main__':\n s = [-1, 2, 1, -4]\n r = solution(s, 2)\n # r = two_sum_coloset(s, 3)\n print('result', r)","repo_name":"YabZhang/algo","sub_path":"tri_sum_closet.py","file_name":"tri_sum_closet.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9296254233","text":"import random\nfrom enum import Enum, auto\n\nfrom pygame.event import Event\nfrom pytmx import load_pygame\n\nfrom enemy import Enemy\nfrom game_settings import *\nfrom helpers import check_wall_collision\nfrom player import Player\nfrom tile import Tile\n\n\nclass GameState(Enum):\n DEFEAT = auto()\n VICTORY = auto()\n DRAW = auto()\n GAME_ON = auto()\n\n\nclass Game:\n def __init__(self):\n pygame.init()\n pygame.mixer.init()\n self.running = True\n self.screen = pygame.display.set_mode((WIDTH, HEIGHT))\n self.clock = pygame.time.Clock()\n\n self.player = None\n self.enemies = []\n self.enemies_left = NUMBER_OF_ENEMIES\n self.tanks = []\n self.map = None\n self.tiles = dict()\n\n self.game_state = GameState.GAME_ON\n\n def get_starting_positions(self, number):\n positions = []\n tiles = list(self.tiles.values())\n angles = [0, 90, 180, 270]\n for _ in range(number + 1):\n tile = random.choice(tiles)\n tiles.remove(tile)\n tile = Tile(tile.get_pos(), tile.get_indices())\n tile.angle = random.choice(angles)\n positions.append(tile)\n return positions\n\n def set_map(self):\n number = random.randint(1, NUMBER_OF_MAPS)\n self.map = load_pygame('maps/map{0}.tmx'.format(str(number)))\n\n def process(self, event: Event):\n if event.type == pygame.QUIT:\n self.running = False\n self.player.shoot_control(event)\n\n def update(self):\n if self.game_state != GameState.GAME_ON:\n return\n\n if not self.player.alive and all(not e.alive for e in self.enemies):\n self.game_state = GameState.DRAW\n return\n\n if not self.player.alive:\n self.game_state = GameState.DEFEAT\n return\n\n if all(not e.alive for e in self.enemies):\n self.game_state = GameState.VICTORY\n return\n\n self.player.move_control()\n\n shells = []\n for tank in self.tanks:\n tank.check_shells()\n shells += tank.shells\n\n for enemy in self.enemies:\n enemy.move()\n if enemy.in_sight(self.player):\n enemy.shoot()\n\n for shell in shells:\n shell.move(SHELL_SPEED)\n for point in shell.points.values():\n if check_wall_collision(point, self.map):\n for shooting_tank in self.tanks:\n shooting_tank.pop_shell(shell)\n\n if shell in self.player.shells:\n for enemy in self.enemies:\n if shell.collides(enemy):\n enemy.destroy()\n self.enemies_left -= 1\n self.player.pop_shell(shell)\n if any(shell in e.shells for e in self.enemies):\n if shell.collides(self.player):\n self.player.destroy()\n for shooting_tank in self.tanks:\n shooting_tank.pop_shell(shell)\n\n def draw(self):\n self.draw_map()\n for tank in self.tanks:\n tank.draw(self.screen)\n for shell in tank.shells:\n shell.draw(self.screen)\n self.draw_text('Enemies left: ' + str(self.enemies_left), WHITE, 30, (140, 40))\n\n if self.game_state == GameState.GAME_ON:\n reload_time = self.player.get_reload_time()\n if reload_time != 0:\n self.draw_text('Reloading: {:.1f}'.format(reload_time), WHITE, 30, (WIDTH - 140, 40))\n else:\n self.draw_text(self.game_state.name, WHITE, 30, (WIDTH - 90, 40))\n\n pygame.display.update()\n\n def draw_map(self):\n for layer in [layer for layer in self.map.layers if layer.Type == 'Tiles']:\n for x, y, tile in layer.tiles():\n self.screen.blit(tile, (x * TILE_SIZE, y * TILE_SIZE))\n\n def draw_text(self, text, color, size, location):\n font = pygame.font.Font('freesansbold.ttf', size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = location\n self.screen.blit(text_surface, text_rect)\n\n def run(self):\n self.set_map()\n self.tiles = dict()\n for row, col, _ in self.map.get_layer_by_name('Path').tiles():\n pos = (row * TILE_SIZE + TILE_SIZE / 2, col * TILE_SIZE + TILE_SIZE / 2)\n self.tiles[(row, col)] = Tile(pos, (row, col))\n\n positions = self.get_starting_positions(NUMBER_OF_ENEMIES + 1)\n self.player = Player(self.map, positions[0])\n self.enemies = [Enemy(self.map, self.tiles, positions[i]) for i in range(1, NUMBER_OF_ENEMIES + 1)]\n\n self.tanks = [self.player]\n self.tanks.extend(self.enemies)\n\n for tank in self.tanks:\n tank.reset_shells()\n\n while self.running:\n for event in pygame.event.get():\n self.process(event)\n self.update()\n self.draw()\n self.clock.tick(60)\n\n def cleanup(self):\n pygame.quit()\n\n\nif __name__ == '__main__':\n game = Game()\n game.run()\n game.cleanup()\n","repo_name":"relidaar/TanksGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17182341556","text":"#!/usr/bin/env python\nimport sys,os\nimport argparse\n\nparser = argparse.ArgumentParser( description=\"Manually pick items\")\nparser.add_argument(\"-i\", help=\"Input item list file\",type=str,required=True,)\nparser.add_argument(\"-c\", help=\"Command to run. \",type=str,required=True,)\nparser.add_argument(\"-o\", help=\"Output prefix, default is 'manually'.\",type=str,default = \"manually\")\n\n\nargs = parser.parse_args()\n\npositive_path = \"%s_positive.list\"%args.o\nnegative_path = \"%s_negative.list\"%args.o\nlistfile = args.i\nprog = args.c\n\nofpp = open(positive_path,'w')\nofpn = open(negative_path,'w')\n\nprint(\">>>>> Start selection ...\")\nfor line in open(listfile):\n item = line.strip()\n print(\" > Identifying %s\"%line)\n os.system(\"%s %s\"%(args.c,item))\n answer = input(\" Keep? (Y/N,default N):\")\n if len(answer.strip().lower()) > 0 and answer.strip().lower()[0] == 'y' :\n print(\" + Keep %s\"%item)\n ofpp.write(\"%s\\n\"%item)\n else:\n print(\" + Ignore %s\"%item)\n ofpn.write(\"%s\\n\"%item)\n\nofpp.close()\nofpn.close()\n\n\n","repo_name":"ffallrain/MyScripts","sub_path":"manually_select.py","file_name":"manually_select.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42010942093","text":"import os\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom scripts.Utility_Functions import utility_functions\n\nclass LDM_Model:\n \n def __init__(self, ldm_data,\n input_shape = (54,),\n output_heads = 54,\n algorithm = 'xtm',\n epochs = 100,\n batch_size = 32,\n loss_function= 'binary_crossentropy',\n optimizer = keras.optimizers.Adam(learning_rate=1e-3),\n training= False):\n self.ldm_data = ldm_data\n self.input_shape = input_shape\n self.output_heads = output_heads\n self.algorithm = algorithm\n self.epochs = epochs\n self.batch_size = batch_size\n self.loss_function = loss_function\n self.optimizer = optimizer\n self.training = training\n self.model = self.ldm_model()\n if self.training:\n self.compile_model()\n self.callbacks = [keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)] \n \n else:\n self.checkpoint_file_path = os.path.join(os.getcwd(), \"checkpoint\\\\\"+\"ldm_\"+self.algorithm+\".h5\")\n print(\"loading ldm model trained with predictions from \"+self.algorithm+\" from the saved models...\")\n self.model = keras.models.load_model(self.checkpoint_file_path)\n\n \n \n def ldm_model(self):\n pred_inp = keras.Input(shape=self.input_shape)\n fdi_inp = keras.Input(shape = self.input_shape)\n concat = layers.concatenate([pred_inp,fdi_inp])\n dense1 = layers.Dense(128, activation = 'relu')(concat)\n dense2 = layers.Dense(128,activation = 'relu')(dense1)\n dense3 = layers.Dense(128, activation = 'relu')(dense2)\n outputs = [layers.Dense(1, activation = 'sigmoid')(dense3) for i in range(self.output_heads)]\n \n return keras.Model([pred_inp,fdi_inp], outputs)\n\n def compile_model(self):\n losses = [self.loss_function for i in range(self.input_shape[-1])]\n self.model.compile(loss = losses, optimizer = self.optimizer)\n \n \n def train(self, save_model = False):\n training_history= self.model.fit([self.ldm_data.train_predictions,self.ldm_data.train_set],\n [i for i in self.ldm_data.fdi_location_train.T],\n validation_data = ([self.ldm_data.val_predictions,self.ldm_data.val_set],[k for k in self.ldm_data.fdi_location_val.T]),\n epochs=self.epochs,\n batch_size = self.batch_size,\n callbacks=self.callbacks\n )\n \n if save_model:\n self.model.save(self.checkpoint_file_path)\n history_df = pd.DataFrame(training_history.history)\n history_df.to_excel(os.path.join(os.getcwd(), 'checkpoint\\\\training_history\\\\'+self.algorithm+'_location_training_history.xlsx'))\n\n \n def get_model_summary(self):\n return self.model.summary()\n \n def plot_model(self):\n return keras.utils.plot_model(self.model)\n \n def predict(self, real_data, predicted_data, threshold = 0.5):\n raw_location_prediction = np.squeeze(self.model.predict([predicted_data,real_data]))\n raw_location_prediction = raw_location_prediction.T\n location_prediction = []\n for i in raw_location_prediction:\n temp = []\n for k in i:\n if k> threshold:\n temp.append(1)\n elif (threshold == 1.0) and (k == 1.0):\n temp.append(0)\n elif (threshold == 0) and (k == 0):\n temp.append(1)\n else:\n temp.append(0) \n location_prediction.append(temp)\n return np.array(location_prediction)\n \n def roc_curve(self, real_data , predicted_data, threshold, location_set):\n cms = []\n tprs0 = []\n fprs0 = []\n tprs1 = []\n fprs1 = []\n loc = self.predict(real_data, predicted_data, threshold)\n for i in range(54):\n cms.append(confusion_matrix(location_set.T[i],loc.T[i], labels = [0,1]).T)\n\n for i in cms:\n #class 0\n tp = i[0][0]\n fp = i[0][1]\n fn = i[1][0]\n tn = i[1][1]\n\n tpr0 = tp/(tp+fn)\n fpr0 = fp/(fp+tn)\n\n #class 1\n tp = i[1][1]\n fp = i[1][0]\n fn = i[0][1]\n tn = i[0][0]\n\n tpr1 = tp/(tp+fn)\n fpr1 = fp/(fp+tn)\n \n tprs0.append(tpr0)\n fprs0.append(fpr0)\n tprs1.append(tpr1)\n fprs1.append(fpr1)\n tprs0,fprs0,tprs1,fprs1 = np.array(tprs0),np.array(fprs0),np.array(tprs1),np.array(fprs1)\n return np.mean(tprs0), np.mean(fprs0), np.mean(tprs1), np.mean(fprs1)\n \n def get_roc_curve(self, real_data , predicted_data, location_set):\n thresholds = []\n tpr_class0 = []\n fpr_class0 = []\n tpr_class1 = []\n fpr_class1 = []\n\n ts = np.linspace(0,1.0, num = 11)\n\n for i in ts:\n temp = self.roc_curve(real_data = real_data, predicted_data = predicted_data ,threshold = i, location_set = location_set)\n thresholds.append(i)\n tpr_class0.append(temp[0])\n fpr_class0.append(temp[1])\n tpr_class1.append(temp[2])\n fpr_class1.append(temp[3])\n \n utility_functions.plot_roc_curve([tpr_class0,tpr_class1],[fpr_class0,fpr_class1], n_classes=2)\n \n def get_ldm_prf(self, real_location_data, predicted_location_data):\n prf_loc = []\n for i in range(54):\n cm = confusion_matrix(real_location_data.T[i],predicted_location_data.T[i], labels = [0,1])\n prf_loc.append(np.array(utility_functions.cm2prf(cm)))\n \n prf_loc = np.array(prf_loc)\n prf_loc = np.mean(prf_loc, axis = 0)\n print('precision = {}, recall = {}, f1 score = {}'.format(prf_loc[0],prf_loc[1],prf_loc[2]))\n return prf_loc","repo_name":"gcsarker/XTM","sub_path":"scripts/LDM_Model.py","file_name":"LDM_Model.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16734784796","text":"TotalGasto = 0\n# acima de R$1000\nProdutoCaro = 0\n#Produto mais barato\nProdutoBarato = 0\n\nwhile True: \n NomeProduto = input('Nome do Produto: ').strip()\n ValorProduto = int(input('Valor: '))\n if TotalGasto == 0:\n ProdutoBarato = ValorProduto\n NomeProdutoBarato = NomeProduto\n\n elif ValorProduto < ProdutoBarato:\n NomeProdutoBarato = NomeProduto\n ProdutoBarato = ValorProduto\n\n if ValorProduto > 1000:\n ProdutoCaro += 1\n\n TotalGasto += ValorProduto\n\n Loop = input('Deseja continuar [S/N]: ').lower().strip()\n if Loop == 'n':\n break\n\nNomeProdutoBarato = NomeProdutoBarato.capitalize()\n\nprint(f'O total gasto foi de: {TotalGasto} \\nProdutos acima de R$1000: {ProdutoCaro} \\n O produto mais barato foi: {NomeProdutoBarato} e ele curstou R${ProdutoBarato}')\n","repo_name":"Lobster-dev/exercicios-python","sub_path":"aula-15-new/ex-70/ex-70.py","file_name":"ex-70.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30093461923","text":"while True:\r\n print(\"Welcome to the Computer Science Quiz!\")\r\n ready = input(\"Okay, Ready? (Yes/No): \")\r\n\r\n if ready.lower() != \"yes\":\r\n quit()\r\n\r\n score = 0\r\n answer = input(\"What does CPU stand for?: \")\r\n if answer.lower() == \"central processing unit\":\r\n print(\"Correct!\")\r\n score += 1\r\n else:\r\n print(\"Incorrect!\")\r\n\r\n answer = input(\"What does RAM stand for?: \")\r\n if answer.lower() == \"random access memory\":\r\n print(\"Correct!\")\r\n score += 1\r\n else:\r\n print(\"Incorrect!\")\r\n\r\n answer = input(\"What does CLI stand for?: \")\r\n if answer.lower() == \"command line interface\":\r\n print(\"Correct!\")\r\n score += 1\r\n else:\r\n print(\"Incorrect!\")\r\n\r\n answer = input(\"What does GUI stand for?: \")\r\n if answer.lower() == \"graphical user interface\":\r\n print(\"Correct!\")\r\n score += 1\r\n else:\r\n print(\"Incorrect!\")\r\n\r\n answer = input(\"What does ARP stand for?: \")\r\n if answer.lower() == \"address resolution protocol\":\r\n print(\"Correct!\")\r\n score += 1\r\n else:\r\n print(\"Incorrect!\")\r\n\r\n print(\"You got \" + str(score) + \" (\" + str((score/5 * 100)) + \"%) question(s) correct!\")\r\n\r\n while True:\r\n again = str(input('Want to play again? (Yes/No): '))\r\n if again.lower() in ('yes', 'no'):\r\n break\r\n print(\"Invalid input.\")\r\n if again.lower() == 'yes':\r\n continue\r\n else:\r\n print(\"Goodbye!\")\r\n quit()\r\n\r\n\r\n## Commentary\r\n\"\"\"\r\nLine 1:\r\n Line 1 is a while loop that runs the program as long as the out put is true. For the output\r\n to be True however, the block of while loop between line 46 and 55 must equate to two. This \r\n is one way of restarting a program after it has finished executing. Learnt it on StackOverflow.\r\n\r\nLine 2:\r\n This is a basic print command that welcomes users to the game.\r\n \r\nLine 3: \r\n This line of code accepts an input (yes/no) from the user. The input determines whether the\r\n user wants to begin/continue play or quit the program.\r\n\r\nLine 5-6:\r\n This block of code evaluates the response from the user from line 3 and performs the required\r\n action. The .lower() function appended to the variable converts the input type to lowercase\r\n to effectively match the code.\r\n\r\nLine 8:\r\n A score global variable is created to hold the total score (correct answers) from the game.\r\n\r\nLine 9-14 et al.:\r\n The variable answer is assigned the input from the question asked. If the input evaluates to\r\n the right answer in Line 10, the program prints 'Correct!' as indicated on line 11, and proceeds\r\n to add one to the score (Line 12). If the answer (user input) to the question evaluates to false,\r\n the program prints 'Incorrect!' as indicated on line 14. \r\n\r\nLine 44:\r\n Through the print statement and basic concatenation, the total score and percentage from the \r\n game is printed out to the user. Note that all integers and float variable types are converted\r\n tp strings before concatenated in the print statement.\r\n \r\nLine 46-55:\r\n This block of code collects user input (yes/no) to tell whether to close the game or run it\r\n once more. In line 48, the code evaluates the user input to either yes or no, and prints out \r\n 'Invalid input.' (Line 50) to anything apart from the two. The program proceeds to run if the\r\n user answers 'yes' but quits and prints out 'Goodbye!' if the user answers no. \r\n\"\"\"","repo_name":"iamnvna/cs-quiz-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23549033620","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next_node = None\n\nclass LinkedList:\n def __init__(self):\n self.list = None\n # Append a node to the end\n def append(self, data):\n new_node = Node(data)\n \n if self.list != None:\n curr = self.list\n while curr.next_node != None:\n curr = curr.next_node\n curr.next_node = new_node\n else:\n self.list = new_node\n\n # Insert a node at any position of the list. If the index is greater than the zise of the list, then insert it at the end \n def insert(self, data, index):\n if index == 0:\n new_node = Node(data)\n curr = self.list\n new_node.next_node = curr\n self.list = new_node\n else:\n new_node = Node(data)\n curr = self.list\n while index > 1:\n if curr.next_node == None:\n break\n curr = curr.next_node\n index -=1\n new_node.next_node = curr.next_node\n curr.next_node = new_node\n\n # Preappend a node to the beginning of the list \n def preappend(self, data):\n new_node = Node(data)\n new_node.next_node = self.list\n self.list = new_node\n \n def search(self, key):\n curr = self.list\n while curr.next_node != None:\n if curr.data == key:\n return key\n else:\n curr = curr.next_node\n if curr.data == key:\n return key\n return False\n \n # Remove a node by by passing the value of that node \n def remove(self, value):\n curr = self.list.next_node\n prevPointer = self.list\n found = False\n\n if prevPointer.data == value:\n prevPointer.next_node = None\n self.list = curr\n return\n \n while curr.next_node != None and found:\n if curr.data == value:\n found = True\n currPointer = curr.next_node\n prevPointer.next_node = None\n curr.next_node = None\n prevPointer.next_node = currPointer\n else:\n curr = curr.next_node\n prevPointer = prevPointer.next_node\n \n if (curr.next_node == None and curr.data == value) and found == False:\n prevPointer.next_node = None\n \n \nl = LinkedList()\n\n# l.append(0)\n# l.append(888)\n# l.append(777)\n# l.append(111)\n# l.append(555)\n# l.insert(10, 0)\n# l.insert(555, 3)\n# l.insert(33, 10)\n# l.append(666)\n# l.preappend(2222)\n# l.preappend(1111)\n# l.search(999)\n# l.remove(777)\n","repo_name":"khezam/Learning_process","sub_path":"linkedlist/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38836462263","text":"## @ingroup Components-Energy-Distributors\n# Electronic_Speed_Controller.py\n#\n# Created: Jun 2014, E. Botero\n# Modified: Jan 2016, T. MacDonald\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# suave imports\nimport SUAVE\n\nfrom SUAVE.Components.Energy.Energy_Component import Energy_Component\n\n# ----------------------------------------------------------------------\n# Electronic Speed Controller Class\n# ----------------------------------------------------------------------\n\n## @ingroup Components-Energy-Distributors\nclass Electronic_Speed_Controller(Energy_Component):\n \n def __defaults__(self):\n \"\"\" This sets the default values.\n \n Assumptions:\n None\n \n Source:\n N/A\n \n Inputs:\n None\n \n Outputs:\n None\n \n Properties Used:\n None\n \"\"\" \n \n self.efficiency = 0.0\n \n def voltageout(self,conditions):\n \"\"\" The voltage out of the electronic speed controller\n \n Assumptions:\n The ESC's output voltage is linearly related to throttle setting\n \n Source:\n N/A\n \n Inputs:\n conditions.propulsion.throttle [0-1] \n self.inputs.voltage [volts]\n \n Outputs:\n voltsout [volts]\n self.outputs.voltageout [volts]\n \n Properties Used:\n None\n \n \"\"\"\n # Unpack, don't modify the throttle\n eta = (conditions.propulsion.throttle[:,0,None])*1.0\n \n # Negative throttle is bad\n eta[eta<=0.0] = 0.0\n \n # Cap the throttle\n eta[eta>=1.0] = 1.0\n \n voltsin = self.inputs.voltagein\n voltsout = eta*voltsin\n \n # Pack the output\n self.outputs.voltageout = voltsout\n \n return voltsout\n \n def currentin(self,conditions):\n \"\"\" The current going into the speed controller\n \n Assumptions:\n The ESC draws current.\n \n Inputs:\n self.inputs.currentout [amps]\n \n Outputs:\n outputs.currentin [amps]\n \n Properties Used:\n self.efficiency - [0-1] efficiency of the ESC\n \n \"\"\"\n \n # Unpack, don't modify the throttle\n eta = (conditions.propulsion.throttle[:,0,None])*1.0 \n eff = self.efficiency\n currentout = self.inputs.currentout\n currentin = currentout*eta/eff # The inclusion of eta satisfies a power balance: p_in = p_out/eff\n \n # Pack\n self.outputs.currentin = currentin\n self.outputs.power_in = self.inputs.voltagein*currentin\n \n return currentin","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Components/Energy/Distributors/Electronic_Speed_Controller.py","file_name":"Electronic_Speed_Controller.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"14503970559","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom functools import wraps\n\nfrom OpenSSL import crypto, SSL\nfrom OpenSSL.crypto import load_certificate, FILETYPE_PEM\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.internet.threads import deferToThread\n\nfrom globaleaks.orm import transact\nfrom globaleaks.settings import GLSettings\nfrom globaleaks.handlers.base import BaseHandler, HANDLER_EXEC_TIME_THRESHOLD\nfrom globaleaks.models.config import PrivateFactory, NodeFactory, load_tls_dict\nfrom globaleaks.rest import errors, requests\nfrom globaleaks.utils import tls\nfrom globaleaks.utils.utility import datetime_to_ISO8601, format_cert_expr_date, log\n\n\nclass FileResource(object):\n \"\"\"\n An interface for interacting with files stored on disk or in the db\n \"\"\"\n @classmethod\n @transact\n def create_file(store, content):\n raise errors.MethodNotImplemented()\n\n @staticmethod\n def perform_file_action():\n raise errors.MethodNotImplemented()\n\n @staticmethod\n @transact\n def get_file(store):\n \"\"\"\n :rtype: A `unicode` string\n \"\"\"\n raise errors.MethodNotImplemented()\n\n @staticmethod\n @transact\n def delete_file(store):\n raise errors.MethodNotImplemented()\n\n @classmethod\n @transact\n def serialize(cls, store):\n return cls.db_serialize(store)\n\n @staticmethod\n def db_serialize(store):\n \"\"\"\n :rtype: A `dict` to be converted into JSON for delivery to a client\n \"\"\"\n raise errors.MethodNotImplemented()\n\n @staticmethod\n @transact\n def should_gen_dh_params(store):\n return PrivateFactory(store).get_val('https_dh_params') == u''\n\n @staticmethod\n @transact\n def save_dh_params(store, dh_params):\n PrivateFactory(store).set_val('https_dh_params', dh_params)\n\n @classmethod\n @inlineCallbacks\n def generate_dh_params_if_missing(cls):\n gen_dh = yield FileResource.should_gen_dh_params()\n if gen_dh:\n log.info(\"Generating the HTTPS DH params with %d bits\" % GLSettings.key_bits)\n dh_params = yield deferToThread(tls.gen_dh_params, GLSettings.key_bits)\n\n log.info(\"Storing the HTTPS DH params\")\n yield cls.save_dh_params(dh_params)\n\n\nclass PrivKeyFileRes(FileResource):\n validator = tls.PrivKeyValidator\n\n @classmethod\n @transact\n def create_file(store, cls, raw_key):\n db_cfg = load_tls_dict(store)\n db_cfg['ssl_key'] = raw_key\n\n prv_fact = PrivateFactory(store)\n pkv = cls.validator()\n ok, err = pkv.validate(db_cfg)\n if ok:\n prv_fact.set_val('https_priv_key', raw_key)\n prv_fact.set_val('https_priv_gen', False)\n else:\n log.debug('Key validation failed')\n\n return ok\n\n @staticmethod\n @transact\n def save_tls_key(store, prv_key):\n prv_fact = PrivateFactory(store)\n prv_fact.set_val('https_priv_key', prv_key)\n prv_fact.set_val('https_priv_gen', True)\n\n @classmethod\n @inlineCallbacks\n def perform_file_action(cls):\n log.info(\"Generating the HTTPS key with %d bits\" % GLSettings.key_bits)\n key = yield deferToThread(tls.gen_rsa_key, GLSettings.key_bits)\n\n log.debug(\"Saving the HTTPS key\")\n yield cls.save_tls_key(key)\n\n @staticmethod\n @transact\n def delete_file(store):\n prv_fact = PrivateFactory(store)\n prv_fact.set_val('https_priv_key', u'')\n prv_fact.set_val('https_priv_gen', False)\n\n @staticmethod\n def db_serialize(store):\n prv_fact = PrivateFactory(store)\n\n return {\n 'set': prv_fact.get_val('https_priv_key') != u'',\n 'gen': prv_fact.get_val('https_priv_gen')\n }\n\n\nclass CertFileRes(FileResource):\n validator = tls.CertValidator\n\n @classmethod\n @transact\n def create_file(store, cls, raw_cert):\n prv_fact = PrivateFactory(store)\n\n db_cfg = load_tls_dict(store)\n db_cfg['ssl_cert'] = raw_cert\n\n cv = cls.validator()\n ok, err = cv.validate(db_cfg)\n if ok:\n prv_fact.set_val('https_cert', raw_cert)\n else:\n log.err(\"Cert validation failed\")\n return ok\n\n @staticmethod\n @transact\n def delete_file(store):\n prv_fact = PrivateFactory(store)\n prv_fact.set_val('https_cert', u'')\n\n @staticmethod\n @transact\n def get_file(store):\n prv_fact = PrivateFactory(store)\n return prv_fact.get_val('https_cert')\n\n @staticmethod\n def db_serialize(store):\n c = PrivateFactory(store).get_val('https_cert')\n if len(c) == 0:\n return {'name': 'cert', 'set': False}\n\n x509 = crypto.load_certificate(FILETYPE_PEM, c)\n expr_date = format_cert_expr_date(x509.get_notAfter())\n\n return {\n 'name': 'cert',\n 'issuer': x509.get_issuer().organizationName,\n 'expiration_date': datetime_to_ISO8601(expr_date),\n 'set': True,\n }\n\n\nclass ChainFileRes(FileResource):\n validator = tls.ChainValidator\n\n @classmethod\n @transact\n def create_file(store, cls, raw_chain):\n prv_fact = PrivateFactory(store)\n\n db_cfg = load_tls_dict(store)\n db_cfg['ssl_intermediate'] = raw_chain\n\n cv = cls.validator()\n ok, err = cv.validate(db_cfg)\n if ok:\n prv_fact.set_val('https_chain', raw_chain)\n else:\n log.debug('Chain validation failed')\n return ok\n\n @staticmethod\n @transact\n def delete_file(store):\n prv_fact = PrivateFactory(store)\n prv_fact.set_val('https_chain', u'')\n\n @staticmethod\n @transact\n def get_file(store):\n prv_fact = PrivateFactory(store)\n return prv_fact.get_val('https_chain')\n\n @staticmethod\n def db_serialize(store):\n c = PrivateFactory(store).get_val('https_chain')\n if len(c) == 0:\n return {'name': 'chain', 'set': False}\n\n x509 = load_certificate(FILETYPE_PEM, c)\n expr_date = format_cert_expr_date(x509.get_notAfter())\n\n return {\n 'name': 'chain',\n 'issuer': x509.get_issuer().organizationName,\n 'expiration_date': datetime_to_ISO8601(expr_date),\n 'set': True,\n }\n\n\nclass CsrFileRes(FileResource):\n @classmethod\n @transact\n def create_file(store, cls, raw_csr):\n prv_fact = PrivateFactory(store)\n\n prv_fact.set_val('https_csr', raw_csr)\n\n return True\n\n @staticmethod\n @transact\n def delete_file(store):\n prv_fact = PrivateFactory(store)\n prv_fact.set_val('https_csr', u'')\n\n @staticmethod\n @transact\n def get_file(store):\n prv_fact = PrivateFactory(store)\n return prv_fact.get_val('https_csr')\n\n @staticmethod\n def db_serialize(store):\n c = PrivateFactory(store).get_val('https_csr')\n if len(c) == 0:\n return {'name': 'csr', 'set': False}\n\n return {\n 'name': 'csr',\n 'set': True,\n }\n\n\nclass FileHandler(BaseHandler):\n check_roles = 'admin'\n\n mapped_file_resources = {\n 'priv_key': PrivKeyFileRes,\n 'cert': CertFileRes,\n 'chain': ChainFileRes,\n 'csr': CsrFileRes,\n }\n\n # TODO move generate_dh_params to priv_key file handler to free handler timing\n # analysis on the mapped file resources.\n handler_exec_time_threshold = 10*HANDLER_EXEC_TIME_THRESHOLD\n\n def get_file_res_or_raise(self, name):\n if name not in self.mapped_file_resources:\n raise errors.MethodNotImplemented()\n\n return self.mapped_file_resources[name]\n\n @BaseHandler.https_disabled\n def delete(self, name):\n file_res_cls = self.get_file_res_or_raise(name)\n return file_res_cls.delete_file()\n\n @BaseHandler.https_disabled\n @inlineCallbacks\n def post(self, name):\n req = self.validate_message(self.request.content.read(),\n requests.AdminTLSCfgFileResourceDesc)\n\n file_res_cls = self.get_file_res_or_raise(name)\n\n yield file_res_cls.generate_dh_params_if_missing()\n\n ok = yield file_res_cls.create_file(req['content'])\n if not ok:\n raise errors.ValidationError()\n\n @BaseHandler.https_disabled\n @inlineCallbacks\n def put(self, name):\n file_res_cls = self.get_file_res_or_raise(name)\n\n yield file_res_cls.generate_dh_params_if_missing()\n\n yield file_res_cls.perform_file_action()\n\n @BaseHandler.https_disabled\n def get(self, name):\n file_res_cls = self.get_file_res_or_raise(name)\n\n return file_res_cls.get_file()\n\n\n@transact\ndef serialize_https_config_summary(store):\n prv_fact = PrivateFactory(store)\n\n file_summaries = {}\n\n for key, file_res_cls in FileHandler.mapped_file_resources.iteritems():\n file_summaries[key] = file_res_cls.db_serialize(store)\n\n ret = {\n 'enabled': prv_fact.get_val('https_enabled'),\n 'running': GLSettings.state.process_supervisor.is_running(),\n 'status': GLSettings.state.process_supervisor.get_status(),\n 'files': file_summaries,\n }\n\n return ret\n\n\n@transact\ndef try_to_enable_https(store):\n prv_fact = PrivateFactory(store)\n\n cv = tls.ChainValidator()\n db_cfg = load_tls_dict(store)\n db_cfg['https_enabled'] = False\n\n ok, err = cv.validate(db_cfg)\n if ok:\n prv_fact.set_val('https_enabled', True)\n GLSettings.memory_copy.private.https_enabled = True\n else:\n raise err\n\n@transact\ndef disable_https(store):\n prv_fact = PrivateFactory(store)\n log.debug('Disabling https on the node.')\n prv_fact.set_val('https_enabled', False)\n\n\nclass ConfigHandler(BaseHandler):\n check_roles = 'admin'\n\n def get(self):\n return serialize_https_config_summary()\n\n @BaseHandler.https_disabled\n @inlineCallbacks\n def post(self):\n try:\n yield try_to_enable_https()\n yield GLSettings.state.process_supervisor.maybe_launch_https_workers()\n except Exception as e:\n log.err(e)\n raise errors.InternalServerError(e)\n\n @BaseHandler.https_enabled\n @inlineCallbacks\n def put(self):\n \"\"\"\n Disables HTTPS config and shutdown subprocesses.\n \"\"\"\n yield disable_https()\n GLSettings.memory_copy.private.https_enabled = False\n yield GLSettings.state.process_supervisor.shutdown()\n\n\nclass CSRFileHandler(FileHandler):\n check_roles = 'admin'\n\n @BaseHandler.https_disabled\n @inlineCallbacks\n def post(self, name):\n request = self.validate_message(self.request.content.read(),\n requests.AdminCSRFileDesc)\n\n desc = request['content']\n\n csr_fields = {\n 'C': desc['country'].upper(),\n 'ST': desc['province'],\n 'L': desc['city'],\n 'O': desc['company'],\n 'OU': desc['department'],\n 'CN': desc['commonname'],\n 'emailAddress': desc['email'],\n }\n\n csr_txt = yield self.perform_action(csr_fields)\n\n file_res_cls = self.get_file_res_or_raise(name)\n\n ok = yield file_res_cls.create_file(csr_txt)\n if not ok:\n raise errors.ValidationError()\n\n @staticmethod\n @transact\n def perform_action(store, csr_fields):\n db_cfg = load_tls_dict(store)\n\n pkv = tls.PrivKeyValidator()\n ok, err = pkv.validate(db_cfg)\n if not ok or not err is None:\n raise err\n\n key_pair = db_cfg['ssl_key']\n try:\n csr_txt = tls.gen_x509_csr(key_pair, csr_fields, GLSettings.csr_sign_bits)\n log.debug(\"Generated a new CSR\")\n return csr_txt\n except Exception as e:\n log.err(e)\n raise errors.ValidationError('CSR gen failed')\n","repo_name":"adolfoeliazat/GlobaLeaks","sub_path":"backend/globaleaks/handlers/admin/https.py","file_name":"https.py","file_ext":"py","file_size_in_byte":11933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"13873692729","text":"import heapq\nimport sys\n\nINF = 1e9\nn, m, c = map(int, sys.stdin.readline().split())\narr = [[] for _ in range(n+1)]\nfor i in range(m):\n a, b, d = map(int, sys.stdin.readline().split())\n arr[a].append((b, d))\n\n\ndistance = [INF] * (n+1)\ndistance[c] = 0\nq = []\nheapq.heappush(q, (0, c))\nwhile q:\n dist, now, = heapq.heappop(q)\n if distance[now] < dist:\n continue\n for i in arr[now]:\n cost = dist + i[1]\n if cost < distance[i[0]]:\n distance[i[0]] = cost\n heapq.heappush(q, (cost, i[0]))\n\ncnt1 = 0\ncnt2 = 0\nfor i in range(1, n+1):\n if distance[i] != INF:\n cnt1 += 1\n cnt2 = max(cnt2, distance[i])\n\n# 동시에 전파되므로 제일 오래걸리는 것이 완료 시간\nprint(cnt1-1, cnt2)","repo_name":"whiskey21/my-algorithm-book","sub_path":"최단경로/전보.py","file_name":"전보.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3016443912","text":"from rest_framework.authentication import BaseAuthentication\r\nfrom rest_framework.exceptions import AuthenticationFailed\r\nfrom apps.models import UserToken\r\n\r\n\r\nclass MyAuthentication(BaseAuthentication):\r\n def authenticate(self, request):\r\n # 写认证逻辑,如果正常返回两个值 user, auth\r\n # 如果失败,抛出异常\r\n token = request.GET.get('token')\r\n if token:\r\n user_token = UserToken.objects.filter(token=token).first()\r\n # 认证通过\r\n if user_token:\r\n return user_token.user, token\r\n else:\r\n raise AuthenticationFailed('认证失败')\r\n else:\r\n raise AuthenticationFailed('请求地址中需要携带token')\r\n\r\n\r\n","repo_name":"jichengxi/study","sub_path":"python/django-rest-framework/drfdemo/apps/apps_auth.py","file_name":"apps_auth.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40093259986","text":"import os\r\n\r\nfrom .common import Config, KeyMapper, Context, ImageManger\r\nfrom .service import LogicService\r\nfrom .gui import TetrisWin\r\nfrom .control import GameControl\r\nimport signal\r\n\r\n\r\nclass App:\r\n singleton = None\r\n\r\n def __init__(self):\r\n self.context = Context.get_instance()\r\n\r\n self.config = Config()\r\n self.context.register(self.config)\r\n\r\n keymapper = KeyMapper()\r\n self.context.register(keymapper)\r\n\r\n imagemanger = ImageManger(os.path.join(os.getcwd(), \"resource\"))\r\n self.context.register(imagemanger)\r\n\r\n self.win = TetrisWin()\r\n self.context.register(self.win)\r\n\r\n self.context.register(LogicService())\r\n self.control = GameControl()\r\n keymapper.bind(self.config.keymapper, self.control)\r\n\r\n self.context.register(self.control)\r\n\r\n self.control.start()\r\n\r\n def run(self):\r\n bind_signal()\r\n self.win.show()\r\n\r\n\r\ndef bind_signal():\r\n # interrupt(Ctrl+C中断)\r\n signal.signal(signal.SIGINT, exit_gracefully)\r\n # Software termination signal from kill(Kill发出的软件终止)\r\n signal.signal(signal.SIGTERM, exit_gracefully)\r\n # signal.signal(signal.SIGKILL, exit_gracefully)\r\n # signal.signal(signal.SIGHUP, exit_gracefully)\r\n\r\n\r\ndef exit_gracefully(signum, frame):\r\n global is_exit\r\n is_exit = True\r\n print(\"receive a signal %d, is_exit = %d\" % (signum, is_exit))\r\n","repo_name":"godghdai/tetris_python","sub_path":"core/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74292828995","text":"from __future__ import annotations\n\nfrom typing import Any, Optional\n\nfrom paddle.base import core as core\n\ndef graph_reindex(\n x: Any,\n neighbors: Any,\n count: Any,\n value_buffer: Any | None = ...,\n index_buffer: Any | None = ...,\n flag_buffer_hashtable: bool = ...,\n name: str | None = ...,\n): ...\n","repo_name":"cattidea/paddlepaddle-stubs","sub_path":"paddle-stubs/incubate/operators/graph_reindex.pyi","file_name":"graph_reindex.pyi","file_ext":"pyi","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"28636891484","text":"import numpy as np\n\nfrom npnd import gather_nd\nimport npnd.test_util as ntu\nimport npnd.numpy as nnp\nimport functools\nimport inspect\n\n# reference implementation of gather_nd.\ndef gather_nd_ref(params, indices, batch_dims=0):\n assert batch_dims == 0\n params = np.asarray(params)\n indices = np.asarray(indices)\n return params[tuple(indices.T)]\n\n# verify gathernd matches reference implementation.\ndef check_gather_nd(params, indices, batch_dims=0, output=None, output_shape=None, desc=None):\n y = gather_nd(params, indices, batch_dims=batch_dims)\n if output_shape is not None:\n x = np.asarray(output_shape)\n y = np.asarray(y.shape)\n elif output is not None:\n x = np.asarray(output)\n else:\n x = gather_nd_ref(params, indices, batch_dims=batch_dims)\n ntu.check_eq(x, y)\n return y\n\ndef gather_nd_name_func(testcase_func, param_num, param):\n kwargs = dict(param.kwargs)\n name = testcase_func.__name__ + '_' + str(param_num)\n if 'desc' in kwargs:\n desc = kwargs.pop('desc')\n name += f\" {desc!r} \"\n args = ntu.get_call_args(check_gather_nd, *param.args, **kwargs)\n indices = np.asarray(args['indices'])\n params = np.asarray(args['params'])\n batch_dims = args['batch_dims']\n kvs = []\n kvs.append(('params.shape', params.shape))\n kvs.append(('indices.shape', indices.shape))\n if batch_dims != 0:\n kvs.append(('batch_dims', batch_dims))\n for k, v in kvs:\n name += \" \"\n #name += ntu.parameterized.to_safe_name(str(k)).strip('_')\n name += str(k)\n name += \"=\"\n #name += ntu.parameterized.to_safe_name(str(v)).strip('_').replace('_', 'x')\n name += str(v)\n return name\n # return \"%s_%s\" %(\n # testcase_func.__name__,\n # ntu.parameterized.to_safe_name(\"_\".join(str(x) for x in param.args)),\n # )\n\ngather_nd_test_case = functools.partial(ntu.parameterized.expand, name_func=gather_nd_name_func)\n\n# test data.\ndata1 = [1, 2, 3, 4, 5, 6, 7, 8]\nindices1 = [[4], [3], [1], [7]]\nupdates1 = [9, 10, 11, 12]\noutput1 = [1, 11, 3, 10, 9, 6, 7, 12]\ndata1, indices1, updates1 = [np.asarray(x) for x in (data1, indices1, updates1)]\n\ndata2 = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\nindices2 = [[0], [2]]\nupdates2 = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]\ndata2, indices2, updates2 = [np.asarray(x) for x in (data2, indices2, updates2)]\n\ndata3 = np.stack([data1,data1]).T\nindices3 = indices1.copy()\nupdates3 = np.stack([updates1,updates1]).T\ndata3, indices3, updates3 = [np.asarray(x) for x in (data3, indices3, updates3)]\n\ndata4 = -1 - nnp.values_like(data3)\nindices4 = indices3.copy()\nupdates4 = nnp.values_like(updates3)\n\n\nclass GatherNdTestCase(ntu.TestCase):\n @gather_nd_test_case([\n (data1, indices1),\n (data2, indices2),\n (data3, indices3),\n (data4, indices4),\n ])\n def test_gather_nd(self, *args, **kws):\n check_gather_nd(*args, **kws)\n\n # ONNX GatherND from https://github.com/onnx/onnx/blob/main/docs/Operators.md#GatherND\n @gather_nd_test_case([\n # Example 1\n ntu.param(\n batch_dims = 0,\n params = [[0,1],[2,3]], # params_shape = [2, 2]\n indices = [[0,0],[1,1]], # indices_shape = [2, 2]\n output = [0,3] # output_shape = [2]\n ),\n # Example 2\n ntu.param(\n batch_dims = 0,\n params = [[0,1],[2,3]], # params_shape = [2, 2]\n indices = [[1],[0]], # indices_shape = [2, 2]\n output = [[2,3],[0,1]], # output_shape = [2]\n ),\n # Example 3\n ntu.param(\n batch_dims = 0,\n params = [[[0,1],[2,3]],[[4,5],[6,7]]], # params_shape = [2, 2, 2]\n indices = [[0,1],[1,0]], # indices_shape = [2, 2]\n output = [[2,3],[4,5]], # output_shape = [2, 2]\n ),\n # Example 4\n ntu.param(\n batch_dims = 0,\n params = [[[0,1],[2,3]],[[4,5],[6,7]]], # params_shape = [2, 2, 2]\n indices = [[[0,1],[1,0]]], # indices_shape = [2, 1, 2]\n output = [[[2,3],[4,5]]], # output_shape = [2, 1, 2]\n ),\n # Example 5\n ntu.param(\n batch_dims = 1,\n params = [[[0,1],[2,3]],[[4,5],[6,7]]], # params_shape = [2, 2, 2]\n indices = [[1],[0]], # indices_shape = [2, 1]\n output = [[2,3],[4,5]], # output_shape = [2, 2]\n ),\n ], name_func=gather_nd_name_func)\n def test_ONNX_GatherND_examples(self, *args, **kws):\n check_gather_nd(*args, **kws)\n\n # tf.gather_nd examples from https://www.tensorflow.org/api_docs/python/tf/gather_nd\n\n # Gathering scalars\n @gather_nd_test_case([\n ntu.param(\n indices=[[0, 0],\n [1, 1]],\n params = [['a', 'b'],\n ['c', 'd']],\n output = ['a', 'd'],\n ),\n ])\n def test_tf_gather_nd_scalars(self, *args, **kws):\n check_gather_nd(*args, **kws)\n\n # Gathering slices\n @gather_nd_test_case([\n ntu.param(\n indices = [[1],\n [0]],\n params = [['a', 'b', 'c'],\n ['d', 'e', 'f']],\n output = [['d', 'e', 'f'],\n ['a', 'b', 'c']],\n ),\n ])\n def test_tf_gather_nd_slices(self, *args, **kws):\n check_gather_nd(*args, **kws)\n\n # Batches\n @gather_nd_test_case([\n ntu.param(\n indices = [[0, 1],\n [1, 0],\n [2, 4],\n [3, 2],\n [4, 1]],\n params = nnp.values((5,7,3)),\n output= [[ 3, 4, 5],\n [21, 22, 23],\n [54, 55, 56],\n [69, 70, 71],\n [87, 88, 89]],\n ),\n ntu.param(\n batch_dims=1,\n indices = [[1],\n [0],\n [4],\n [2],\n [1]],\n params = nnp.values((5,7,3)),\n output= [[ 3, 4, 5],\n [21, 22, 23],\n [54, 55, 56],\n [69, 70, 71],\n [87, 88, 89]],\n ),\n ])\n def test_tf_gather_nd_batches(self, *args, **kws):\n check_gather_nd(*args, **kws)\n\n # More examples\n @gather_nd_test_case([\n ntu.param(\n desc = \"Indexing into a 3-tensor\",\n indices = [[1]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [[['a1', 'b1'],\n ['c1', 'd1']]],\n ),\n ntu.param(\n desc = \"Indexing into a 3-tensor\",\n indices = [[0, 1], [1, 0]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [['c0', 'd0'],\n ['a1', 'b1']],\n ),\n ntu.param(\n desc = \"Indexing into a 3-tensor\",\n indices = [[0, 0, 1], [1, 0, 1]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = ['b0', 'b1'],\n ),\n ntu.param(\n desc = \"Batched indexing into a matrix\",\n indices = [[[0, 0]], [[0, 1]]],\n params = [['a', 'b'], ['c', 'd']],\n output = [['a'],\n ['b']],\n ),\n ntu.param(\n desc = \"Batched slice indexing into a matrix\",\n indices = [[[1]], [[0]]],\n params = [['a', 'b'], ['c', 'd']],\n output = [[['c', 'd']],\n [['a', 'b']]],\n ),\n ntu.param(\n desc = \"Batched indexing into a 3-tensor\",\n indices = [[[1]], [[0]]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [[[['a1', 'b1'],\n ['c1', 'd1']]],\n [[['a0', 'b0'],\n ['c0', 'd0']]]],\n ),\n ntu.param(\n desc = \"Batched indexing into a 3-tensor\",\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [[['c0', 'd0'],\n ['a1', 'b1']],\n [['a0', 'b0'],\n ['c1', 'd1']]],\n ),\n ntu.param(\n desc = \"Batched indexing into a 3-tensor\",\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [['b0', 'b1'],\n ['d0', 'c1']],\n ),\n ntu.param(\n desc = \"Examples with batched 'params' and 'indices'\",\n batch_dims = 1,\n indices = [[1],\n [0]],\n params = [[['a0', 'b0'],\n ['c0', 'd0']],\n [['a1', 'b1'],\n ['c1', 'd1']]],\n output = [['c0', 'd0'],\n ['a1', 'b1']],\n ),\n ntu.param(\n desc = \"Examples with batched 'params' and 'indices'\",\n batch_dims = 1,\n indices = [[[1]], [[0]]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [[['c0', 'd0']],\n [['a1', 'b1']]],\n ),\n ntu.param(\n desc = \"Examples with batched 'params' and 'indices'\",\n batch_dims = 1,\n indices = [[[1, 0]], [[0, 1]]],\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]],\n output = [['c0'],\n ['b1']],\n ),\n ])\n def test_tf_gather_nd_more_examples(self, *args, **kws):\n check_gather_nd(*args, **kws)\n\n\nif __name__ == '__main__':\n ntu.main()\n","repo_name":"shawwn/npnd","sub_path":"tests/test_gather_nd.py","file_name":"test_gather_nd.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12544507239","text":"g1 = smiles('C=C=C=C=C', name=\"Test\", add=False)\n#g1.print()\n'''dumb_rule = ruleGMLString(\"\"\"rule [\n ruleID \"Dumb Rule\"\n left [\n edge [ source 0 target 1 label \"=\"]\n ]\n context [\n node [ id 0 label \"C\"]\n node [ id 1 label \"C\"]\n ]\n right [\n edge [ source 0 target 1 label \"-\"]\n ]\n]\"\"\")\ndumb_rule.print()'''\n\n#include(os.path.join(\"..\", \"rules/benzilicAcidRearrangement.py\"))\n#include(os.path.join(\"..\", \"rules/aldolCondensationOneStep.py\"))\n#include(os.path.join(\"..\", \"rules/aldolCondensation.py\"))\ninclude(os.path.join(\"..\", \"rules/elimination1.py\"))\ninclude(os.path.join(\"..\", \"rules/elimination2.py\"))\ninclude(os.path.join(\"..\", \"rules/retroAldol.py\"))\ninclude(os.path.join(\"..\", \"rules/ketoEnolisation.py\"))\ninclude(os.path.join(\"..\", \"rules/hydration.py\"))\nketoEnolisation[0].makeInverse()\nwater = smiles(\"O\", name=\"Water\")\n#test_molecule = smiles(\"O=C(C(=O)c1ccccc1)c1ccccc1\", name = \"Test Aromatic Compound\")\n#cyclohexandione = smiles(\"O=C1CCCCC1=O\", name=\"Cyclohexan-1,2-dione\")\n#straight_chain = smiles(\"C(=O)C(=O)\", name=\"Pentan-2,3-dione\")\n#acetaldehyde = smiles(\"CC=O\", name=\"Acetaldehyde\")\n#ethanamide = smiles(\"NC(=O)C\", name = \"Ethanamide\")\n#mercaptan = smiles(\"SC(=O)C\", name = \"Mercaptoethanone\")\n#formaldehyde = smiles(\"C=O\", name=\"Formaldehyde\")\naldol = smiles(\"O=CCC(O)C\", name='Aldol')\np = GraphPrinter()\np.collapseHydrogens = True\np.simpleCarbons = True\np.withColour = True\n\n#test_molecule.print(p)\n\nforbidden = graphGMLString(\"\"\"graph[\n node [ id 0 label \"*\" ]\n node [ id 1 label \"*\" ]\n node [ id 2 label \"*\"]\n edge [ source 0 target 1 label \"=\" ]\n edge [ source 1 target 2 label \"=\" ]\n]\n\"\"\", name=\"Two double bonds\", add=False)\n\n# Test if double bonds can \ndef pred(derivation):\n for g in derivation.right:\n if forbidden.monomorphism(g, \n labelSettings=LabelSettings(LabelType.Term, LabelRelation.Specialisation)) > 0:\n print(\"Found double bonds!\")\n return False\n return True\n\nfor rule in inputRules:\n rule.print()\n\ngenerations = 3\n\nstrat = (\n addSubset(inputGraphs)\n >> rightPredicate[pred](repeat[generations](inputRules))\n)\n\n\ndg = DG(graphDatabase=inputGraphs, labelSettings=LabelSettings(LabelType.Term,LabelRelation.Specialisation))\nwith dg.build() as b:\n b.execute(strat, verbosity=8)\n\ndg.print()\n\npostSection(\"Elimination\")\ncount = 0\nfor e in dg.edges:\n if count > 100:\n break\n else:\n for rule in e.rules:\n if \"Elimination +\" in rule.name:\n count += 1\n dg2 = DG(graphDatabase=inputGraphs)\n with dg2.build() as b:\n d = Derivations()\n sources = [source.graph for source in e.sources]\n targets = [target.graph for target in e.targets]\n d.left = sources\n d.rules = [rule]\n d.right = targets\n fake_edge = b.addDerivation(d)\n print(\"Printing reaction: \", fake_edge)\n dg2.print()\n\npostSection(\"Individual Molecules in the Network\")\nfor g in dg.vertices:\n g.graph.print(p)\n\n'''\ndef dumpDerivation(graph, filename):\n # graph is a DG object\n with open(filename, 'w') as file:\n file.write(\"# (Format) Vertex: , , , \\n\")\n file.write(\"# Edge: , \")\n for v in graph.vertices:\n file.write(f'Vertex:{v.id}\\t{v.graph.graphDFS}\\t{[e.id for e in v.inEdges]}\\t{[e.id for e in v.outEdges]}')\n file.write('\\n')\n for e in graph.edges:\n # save edge id and rule names\n file.write(f'Edge:\\t{e.id}\\t{[r.name for r in e.rules]}')\n file.write('\\n')\n print('Dump file: ', filename)\n\ndef loadDump(filename):\n try:\n with open(filename) as file:\n for line in file.readlines():\n if line.startswith('#'):\n # Ignore comments\n pass\n lineElements = line.split(sep='\\t')\n if lineElements[0] == 'Edge:':\n pass\n except:\n print(f'Error writing {filename}')\n'''\n'''some_rule = ruleGMLString(\"\"\"rule[\n left[\n node [ id 0 label \"C\"]\n ]\n]\n\"\"\")\nsome_rule.print()\ndg = DG(graphDatabase=inputGraphs)\nwith dg.build() as b:\n res = b.execute(strat)\n subset = res.subset\n universe = res.universe\n print(universe)\n d = Derivations()\n d.left = [universe[0]]\n d.rules = [some_rule]\n d.right = [universe[1]]\n new_edge = b.addDerivation(d)\n #new_edge.print()\n for i in range(generations-1):\n subset = res.subset\n for item in subset:\n print(type(item))\n universe = res.universe\n b.execute(addSubset(subset) >> addUniverse(universe) >> strat)\ndg.print()'''\n\n#dumpDerivation(dg, 'test_dump.txt')","repo_name":"Reaction-Space-Explorer/reac-space-exp","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"13348731164","text":"import sys\nsys.path.insert(1,\"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\nfrom h2o.grid.grid_search import H2OGridSearch\n\ndef test_glm_binomial():\n \"\"\"\n PUBDEV-6349: make sure gridsearch works with negative binomial family.\n \"\"\"\n\n prostate = h2o.import_file(path=pyunit_utils.locate(\"smalldata/prostate/prostate_complete.csv.zip\"))\n myY = \"GLEASON\"\n myX = [\"ID\",\"AGE\",\"RACE\",\"CAPSULE\",\"DCAPS\",\"PSA\",\"VOL\",\"DPROS\"]\n hyper_parameters = {'alpha': [0, 0.5, 0.99], \n 'theta' : [0.000000001, 0.01, 0.1, 0.5, 1]} # set hyper_parameters for grid search\n\n # train H2O GLM model with grid search\n model_h2o_grid_search = H2OGridSearch(H2OGeneralizedLinearEstimator(family=\"negativebinomial\", Lambda=0.5),\n hyper_parameters)\n model_h2o_grid_search.train(x=myX, y=myY, training_frame=prostate)\n fk1 = model_h2o_grid_search.get_grid(sort_by=\"residual_deviance\")\n\n model_h2o_grid_search2 = H2OGridSearch(H2OGeneralizedLinearEstimator(family=\"negativebinomial\", lambda_search=True),\n hyper_parameters)\n model_h2o_grid_search2.train(x=myX, y=myY, training_frame=prostate)\n fk2 = model_h2o_grid_search2.get_grid(sort_by=\"residual_deviance\")\n rdev1 = list(fk1.mean_residual_deviance(train=True).values())\n rdev2 = list(fk2.mean_residual_deviance(train=True).values())\n print(model_h2o_grid_search.get_grid(\"residual_deviance\"))\n print(model_h2o_grid_search2.get_grid(\"residual_deviance\"))\n assert(len(model_h2o_grid_search.get_grid())==15)\n assert min(rdev1) >= min(rdev2), \"Lambda search should provide a better solution {0} than fixed lambda value {1}\" \\\n \" but is not.\".format(rdev2, rdev1)\n \n # assert model_h2o_grid_search.\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_glm_binomial)\nelse:\n test_glm_binomial()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_6349_negbinomial_gridsearch.py","file_name":"pyunit_PUBDEV_6349_negbinomial_gridsearch.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"5816448773","text":"# https://www.acmicpc.net/problem/11047\n\nN, K = map(int, input().split())\n\ncoin = []\n\nfor _ in range(N):\n coin.append(int(input()))\n\ncoin.sort(reverse=True)\ncount = 0\nfor i in coin:\n if K >= i:\n count += K // i\n K %= i\n if K <= 0:\n break\n\nprint(count)","repo_name":"ryongseong/BaekJoon-Python","sub_path":"coding_test/11047.py","file_name":"11047.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29732872932","text":"import tensorflow as tf\n\nfrom tensorforce.core.explorations import Exploration\n\n\nclass GaussianNoise(Exploration):\n \"\"\"\n Explores via gaussian noise.\n \"\"\"\n\n def __init__(\n self,\n sigma=0.3,\n mu=0.0,\n scope='gaussian_noise',\n summary_labels=()\n ):\n \"\"\"\n Initializes distribution values for gaussian noise\n \"\"\"\n self.sigma = sigma\n self.mu = float(mu) # need to add cast to float to avoid tf type-mismatch error in case mu=0.0\n\n super(GaussianNoise, self).__init__(scope=scope, summary_labels=summary_labels)\n\n def tf_explore(self, episode, timestep, action_spec):\n return tf.random_normal(shape=action_spec['shape'], mean=self.mu, stddev=self.sigma)\n","repo_name":"nekrald/quadrotor_reinforcement_learning","sub_path":"libraries/tensorforce/tensorforce/core/explorations/gaussian_noise.py","file_name":"gaussian_noise.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"12385466307","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 20 18:11:46 2018\n\n@author: mitra\n\"\"\"\nimport pandas as pd\n\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import (\n Flask,\n render_template,\n jsonify)\n\napp = Flask(__name__)\n\nengine = create_engine(\"sqlite:///belly_button_biodiversity.sqlite\")\nBase = automap_base()\nBase.prepare(engine, reflect=True)\nsession = Session(engine)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/names\")\ndef names():\n sample =[]\n df = pd.read_csv(\"DataSets/belly_button_biodiversity_samples.csv\", dtype=object)\n sample = list(df)\n sample.pop(0)\n return jsonify(sample)\n\n@app.route(\"/otu\")\ndef otu():\n df = pd.read_csv(\"DataSets/belly_button_biodiversity_otu_id.csv\", dtype=object)\n otu_id = df.loc[:,'lowest_taxonomic_unit_found'].values.tolist()\n print(otu_id)\n return jsonify(otu_id)\n \n@app.route(\"/samples/\")\ndef sample(tsample):\n df = pd.read_csv(\"DataSets/belly_button_biodiversity_samples.csv\", dtype=object)\n df = df.loc[:,['otu_id', tsample]]\n df = df.sort_values(tsample,ascending=False)\n sample = [{\"out_id\":df.iloc[0:10,0].values.tolist(),\n \"sample_values\":df.iloc[0:10,1].values.tolist()\n }]\n return jsonify(sample)\n \n@app.route('/metadata/') \ndef metadata(tsample):\n tsample = tsample[3:]\n df = pd.read_csv(\"DataSets/Belly_Button_Biodiversity_Metadata.csv\", dtype=object)\n df = df.loc[df['SAMPLEID'] == tsample,:]\n df = df.loc[:,['AGE','BBTYPE','ETHNICITY','GENDER','LOCATION','SAMPLEID']]\n metadata =df.to_dict(orient=\"records\")\n return jsonify(metadata)\n\n@app.route('/wfreq/')\ndef wfreq(tsample):\n tsample = tsample[3:]\n df = pd.read_csv(\"DataSets/Belly_Button_Biodiversity_Metadata.csv\", dtype=object)\n df = df.loc[df['SAMPLEID'] == tsample,:]\n df = df.loc[:,['WFREQ']]\n wfreq =df.to_dict(orient=\"records\")\n return jsonify(wfreq)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"mitrapiya25/bellybuttonbiodiversity","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23598806758","text":"import numpy as np\nfrom pendulum_MPC_sim import simulate_pendulum_MPC, get_parameter, Ts_fast, get_default_parameters\nfrom scipy import sparse\nfrom pendulum_model import *\n\nN_eval = 0\n\nVAR_NAMES = ['QDu_scale',\n 'Qy11',\n 'Qy22',\n 'Np',\n 'Nc_perc',\n 'Ts_MPC',\n 'QP_eps_abs_log',\n 'QP_eps_rel_log',\n 'Q_kal_11',\n 'Q_kal_22',\n 'Q_kal_33',\n 'Q_kal_44',\n 'R_kal_11',\n 'R_kal_22',\n ]\n\n\ndef dict_to_x(dict_x):\n N_vars1 = len(dict_x)\n N_vars2 = len(VAR_NAMES)\n assert (N_vars1 == N_vars2)\n N_vars = N_vars1\n\n x = np.zeros(N_vars)\n for var_idx in range(N_vars):\n x[var_idx] = dict_x[VAR_NAMES[var_idx]]\n return x\n\n\ndef x_to_dict(x):\n if len(x.shape) == 2:\n x = x[0]\n N_vars1 = len(x)\n N_vars2 = len(VAR_NAMES)\n assert (N_vars1 == N_vars2)\n N_vars = N_vars1\n\n dict_x = {}\n for var_idx in range(N_vars):\n dict_x[VAR_NAMES[var_idx]] = x[var_idx]\n return dict_x\n\n\ndef get_simoptions_x(x):\n so = x_to_dict(x) # simopt\n\n # MPC cost: weight matrices\n Qx = sparse.diags([so['Qy11'], 0, so['Qy22'], 0]) # /sum_MPC_Qy # Quadratic cost for states x0, x1, ..., x_N-1\n QxN = Qx\n QDu = so['QDu_scale'] * sparse.eye(1) # Quadratic cost for Du0, Du1, ...., Du_N-1\n\n so['Qx'] = Qx\n so['QxN'] = QxN\n so['Qu'] = 0.0 * sparse.eye(1)\n so['QDu'] = QDu\n\n # MPC cost: prediction and control horizon\n so['Np'] = np.int(round(so['Np']))\n so['Nc'] = np.int(round(so['Np'] * so['Nc_perc']))\n\n # MPC cost: sample time\n # Make Ts_MPC a multiple of Ts_fast\n Ts_MPC = so['Ts_MPC']\n Ts_MPC = ((Ts_MPC // Ts_fast)) * Ts_fast # make Ts_MPC an integer multiple of Ts_fast\n so['Ts_MPC'] = Ts_MPC\n\n # MPC: solver settings\n so['QP_eps_abs'] = 10 ** so['QP_eps_abs_log']\n so['QP_eps_rel'] = 10 ** so['QP_eps_rel_log']\n\n # Kalman filter: matrices\n Q_kal = np.diag([so['Q_kal_11'], so['Q_kal_22'], so['Q_kal_33'], so['Q_kal_44']])\n R_kal = np.diag([so['R_kal_11'], so['R_kal_22']])\n\n so['Q_kal'] = Q_kal\n so['R_kal'] = R_kal\n\n # Fixed simulation settings\n so['std_nphi'] = 0.01\n so['std_npos'] = 0.02\n\n so['std_dF'] = 0.1\n so['w_F'] = 5\n\n return so\n\n\ndef f_x(x, eps_calc=1.0, seed_val=None):\n global N_eval\n\n if seed_val is None:\n seed_val = N_eval\n\n simoptions = get_simoptions_x(x)\n simoptions['seed_val'] = seed_val\n\n sim_failed = False\n try:\n simout = simulate_pendulum_MPC(simoptions)\n except ValueError as e:\n print(e)\n sim_failed = True\n\n if not sim_failed:\n t = simout['t']\n y_meas = simout['y_meas']\n x_ref = simout['x_ref']\n p_meas = y_meas[:, 0]\n phi_meas = y_meas[:, 1]\n\n p_ref = x_ref[:, 0]\n phi_ref = x_ref[:, 2]\n\n J_perf = 10 * np.mean(np.abs(p_ref - p_meas)) + 0.0 * np.max(np.abs(p_ref - p_meas)) + \\\n 30 * np.mean(np.abs(np.abs(phi_ref - phi_meas))) # + 15*np.max(np.abs(np.abs(phi_ref - phi_meas)))\n\n # Computation of the barrier function\n t_calc = simout['t_calc']\n eps_margin = 0.8\n\n t_calc = eps_calc * t_calc\n t_calc_wc = np.max(t_calc) # worst-case computational cost (max computational time)\n\n Ts_MPC = simout['Ts_MPC']\n t_available = Ts_MPC * eps_margin\n\n delay_wc = (t_calc_wc - t_available)\n delay_wc = delay_wc * (delay_wc >= 0)\n J_calc = (delay_wc / t_available) * 1e3\n\n emergency = simout['emergency_fast']\n emergency_time, _ = np.where(emergency > 0)\n if len(emergency_time) > 0:\n J_emergency = (len(emergency) - emergency_time[0]) / len(emergency) * 1e3\n else:\n J_emergency = 0.0\n\n else:\n J_perf = 1e3\n J_calc = 1e3\n J_emergency = 1e3 # (len(emergency) - emergency_time[0]) / len(emergency) * 1e3\n # J_perf = 2e1\n # J_fit = 2e1\n\n J_cl = np.log(J_perf) + np.log(1 + J_calc) + np.log(1+J_emergency)\n\n print(f\"N_eval: {N_eval}, J_perf:{J_perf:.2f}, J_calc:{J_calc:.2f}, J_emergency:{J_emergency:.2f}, J_cl:{J_cl:.2f}\")\n N_eval += 1\n\n return J_cl # + J_fit\n\n\nif __name__ == '__main__':\n\n import matplotlib.pyplot as plt\n\n dict_x0 = {\n\n 'QDu_scale': 0.001,\n 'Qy11': 0.1,\n 'Qy22': 0.5,\n 'Np': 100,\n 'Nc_perc': 0.5,\n 'Ts_MPC': 10e-3,\n 'QP_eps_abs_log': -3,\n 'QP_eps_rel_log': -3,\n 'Q_kal_11': 0.1,\n 'Q_kal_22': 0.9,\n 'Q_kal_33': 0.1,\n 'Q_kal_44': 0.9,\n 'R_kal_11': 0.5,\n 'R_kal_22': 0.5\n }\n x0 = dict_to_x(dict_x0)\n\n f_x0 = x_to_dict(x0)\n J_tot = f_x(x0)\n\n simopt = get_simoptions_x(x0)\n simout = simulate_pendulum_MPC(simopt)\n\n t = simout['t']\n x = simout['x']\n u = simout['u']\n y = simout['y']\n y_meas = simout['y_meas']\n x_ref = simout['x_ref']\n x_MPC_pred = simout['x_MPC_pred']\n x_fast = simout['x_fast']\n x_ref_fast = simout['x_ref_fast']\n y_ref = x_ref[:, [0, 2]] # on-line predictions from the Kalman Filter\n uref = get_parameter({}, 'uref')\n\n fig, axes = plt.subplots(3, 1, figsize=(10, 10))\n axes[0].plot(t, y_meas[:, 0], \"b\", label='p_meas')\n axes[0].plot(t, y[:, 0], \"k\", label='p')\n axes[0].plot(t, y_ref[:, 0], \"k--\", label=\"p_ref\")\n axes[0].set_title(\"Position (m)\")\n\n axes[1].plot(t, y_meas[:, 1] * RAD_TO_DEG, \"b\", label='phi_meas')\n axes[1].plot(t, y[:, 1] * RAD_TO_DEG, 'k', label=\"phi\")\n axes[1].plot(t, y_ref[:, 1] * RAD_TO_DEG, \"k--\", label=\"phi_ref\")\n axes[1].set_title(\"Angle (deg)\")\n\n axes[2].plot(t, u[:, 0], label=\"u\")\n axes[2].plot(t, uref * np.ones(np.shape(t)), \"r--\", label=\"u_ref\")\n axes[2].set_title(\"Force (N)\")\n\n for ax in axes:\n ax.grid(True)\n ax.legend()\n\n default = get_default_parameters(simopt)\n\n t_calc = simout['t_calc']\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n plt.hist(t_calc * 1e3)\n plt.title(\"Computation time (ms)\")\n\n t_int = simout['t_int_fast']\n t_fast = simout['t_fast']\n u_fast = simout['u_fast']\n\n # MPC time check\n # In[MPC computation time ]\n fig, axes = plt.subplots(4, 1, figsize=(14, 10), sharex=True)\n axes[0].plot(t, y_meas[:, 0], \"b\", label='p_meas')\n axes[0].plot(t_fast, x_fast[:, 0], \"k\", label='p')\n axes[0].step(t, y_ref[:, 0], \"k--\", where='post', label=\"p_ref\")\n axes[0].set_ylim(-0.2, 1.0)\n axes[0].set_xlabel(\"Simulation time (s)\")\n axes[0].set_ylabel(\"Position (m)\")\n\n axes[1].step(t, t_calc[:, 0] * 1e3, \"b\", where='post', label='T_MPC')\n axes[1].set_xlabel(\"Simulation time (s)\")\n axes[1].set_ylabel(\"MPC time (ms)\")\n axes[1].set_ylim(0, 4)\n axes[2].step(t_fast[1:], t_int[1:, 0] * 1e3, \"b\", where='post', label='T_ODE') # why is 1st slow? check numba\n axes[2].set_xlabel(\"Simulation time (s)\")\n axes[2].set_ylabel(\"ODE time (ms)\")\n axes[2].set_ylim(0, 0.3)\n axes[3].step(t, u[:, 0], where='post', label=\"F\")\n axes[3].step(t_fast, u_fast[:, 0], where='post', label=\"F_d\")\n axes[3].set_xlabel(\"Simulation time (s)\")\n axes[3].set_ylabel(\"Force (N)\")\n\n for ax in axes:\n ax.grid(True)\n ax.legend()\n","repo_name":"forgi86/efficient-calibration-embedded-MPC","sub_path":"objective_function.py","file_name":"objective_function.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"39007374167","text":"import glob, shutil, os, time\r\nfrom os import system\r\n\r\nsystem('cls');\r\n\r\ndirectory= ''; validpath = 0; series_len = 0; filesfound = False\r\n\r\ndef Choice():\r\n print (\" ______ _ _ _ _ _ _ _ \")\r\n print (\"| ____(_) | | | | | | (_) |\")\r\n print (\"| |__ _| | ___ | | | | |_ _| |\")\r\n print (\"| __| | | |/ _ \\ | | | | __| | |\")\r\n print (\"| | | | | __/ | |__| | |_| | |\")\r\n print (\"|_| |_|_|\\___| \\____/ \\__|_|_|\")\r\n if validpath ==1:\r\n print (\"\\n\\n\\n\" + \"Options: \")\r\n print (\"1) File Search\")\r\n print (\"2) Move Files\")\r\n print (\"3) Set Path\")\r\n print (\"4) File List\")\r\n print (\"5) Quit\")\r\n\r\ndef Set_Path():\r\n flag = 0\r\n while flag == False:\r\n dir__ = input (\"\\nEnter directory\\n(format: C:/Media/Files/): \")\r\n ext_ = input(\"\\nEnter file extension\\n(format: txt, mp4, srt): \")\r\n file_list=[]; directory_ = dir__ + \"*.\" + ext_ \r\n if file_list == 0:\r\n print (\"\\nNo Path Set. Try Again.\") \r\n for files in glob.glob(directory_):\r\n names = os.path.basename(files)\r\n file_list.append(names) \r\n filelist_l= len(file_list)\r\n if filelist_l > 0:\r\n print (\"\\n\" + \"Got file list!\" + \" Found \" + str(filelist_l) + \" files.\" \"\\n\")\r\n print (*file_list, sep=\"\\n\");input (\"\\nHit Enter...\")\r\n else:\r\n print (\"\\nNo files returned.\"); input(\"\\nHit Enter...\")\r\n filesfound = True; flag = True\r\n return file_list, filelist_l, dir__, directory_, filesfound, ext_,\r\n\r\ndef File_Search():\r\n print(\"\\nCurrent file type: \" + ext + \"\\n\")\r\n f_search_= input(\"\\nEnter series or sub name\\n(format: The Office, Stranger Things): \")\r\n season = input (\"\\nEnter season (format: 1, 2..):\")\r\n filtered_ = dir_ + \"[S\" + season + \".E*\" + f_search_ + \"*\" + ext\r\n series_list=[] \r\n for series in glob.glob(filtered_):\r\n series_list.append(series)\r\n flag = True\r\n series_l= len(series_list)\r\n if series_l < 1:\r\n print(\"\\nNo files returned.\")\r\n else:\r\n print (\"\\n\" + \"Got file list!\" + \" Found \" + str(series_l) + \" files.\" \"\\n\")\r\n print (*series_list, sep=\"\\n\")\r\n return series_list, f_search_, filtered_, series_l\r\n\r\ndef Move_Files():\r\n print (\"\\nThe series which will be moved is: \" + filesearch +\"\\n\")\r\n print (\"\\nThe path it will be moved to is: \" + str(newdir) +\"\\n\")\r\n correct = input (\"\\nIs this correct(y/n)? \")\r\n if correct == \"y\":\r\n for allfiles in glob.glob(str(filtered)):\r\n shutil.move(allfiles, newdir)\r\n\r\nf=0; ctr=0\r\nwhile f == False:\r\n Choice(); print(\"\\n\\nA valid path must be set \\n\")\r\n flist, flist_len, dir_, directory, filesfound, ext= Set_Path()\r\n validpath=1; f=1\r\n \r\nwhile ctr ==False:\r\n system('cls'); Choice()\r\n option = input (\"\\nWhat would you like to do? \")\r\n if option == \"1\":\r\n series, filesearch, filtered, series_len = File_Search()\r\n input (\"\\nHit Enter...\")\r\n if option == \"2\":\r\n system('cls')\r\n if series_len == 0:\r\n print (\"\\nThere is nothing stored. Returning.\")\r\n input(\"\\nHit Enter...\")\r\n else:\r\n print (\"\\nThese are the files: \\n\"); print (*series, sep=\"\\n\")\r\n prompt= input (\"\\nWould you like to create a directory (y/n)? \")\r\n while True:\r\n try:\r\n if prompt == \"y\":\r\n dir_name= input (\"\\nEnter partial new dir name\\n(format: TV/subs/): \") \r\n path = os.path.join(dir_, dir_name)\r\n newdir = os.makedirs(path); newdir = str(path)\r\n print (\"\\nCreated '\" + path + \"' \")\r\n break\r\n else:\r\n enterdir= input (\"\\nEnter full directory to move to\\n(format: C:/Media/TV/..): \")\r\n newdir = enterdir\r\n break\r\n except OSError:\r\n print (\"\\n\" + path + \" already exists.\"),\r\n there= input(\"Move there (y/n)?\")\r\n if there == \"y\":\r\n newdir = path; break\r\n def_ = Move_Files()\r\n if option ==\"3\":\r\n flist, flist_len, dir_, directory, filesfound, ext = Set_Path() \r\n if option ==\"4\":\r\n system('cls')\r\n fl=[]\r\n for f in glob.glob(directory):\r\n fl.append(f)\r\n if len(fl) ==0:\r\n print(\"\\n\\n\\nCurrent path contains no files. \\n\")\r\n else:\r\n print(\"\\n\\n\\nCurrent path contains: \\n\")\r\n print (*fl, sep=\"\\n\")\r\n input(\"\\nHit Enter...\")\r\n if option == \"5\":\r\n print(\"\\nGoodbye.\")\r\n time.sleep(2)\r\n ctr = True\r\n\r\n","repo_name":"g3th/File-Manipulation","sub_path":"fileutil.py","file_name":"fileutil.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32099182415","text":"#jai mata di#\nimport sys\nsys.stdin = open('input.in', 'r') \nsys.stdout = open('output.out', 'w') \n\n\n\n\n#start the code from here\n\ntrump=input()\nfir,sec=map(str,input().split())\ntrlist=[\"S\", \"H\", \"D\",\"C\"]\nrank=[\"6\", \"7\", \"8\", \"9\", \"T\", \"J\", \"Q\", \"K\",\"A\"]\nif fir[1]==trump and sec[1]!=trump:\n\tprint(\"YES\")\nelif fir[1]==sec[1]:\n\tif rank.index(fir[0])>rank.index(sec[0]):\n\t\tprint(\"YES\")\n\telse:\n\t\tprint(\"NO\")\nelse:\n\tprint(\"NO\")","repo_name":"adityachaudhary147/py-codes","sub_path":"vir 1 div2 #82.py","file_name":"vir 1 div2 #82.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35543660292","text":"from flask import Flask, request\nfrom flask import jsonify\nfrom instasentiments import getPublicProfileCaptions, getPrivateProfileCaptions, getSentiments\n\n#Initializing our application\napp = Flask(__name__)\n\n\n#Route that sends the JSON response to the client\n@app.route('/requestjson', methods=['POST','GET'])\ndef receiveSendJSON():\n if request.method == 'GET': #We want some data to be received by the server, hence we return an error if the method was GET\n return \"

GET requests are not allowed, send some JSON data to this URL.

\"\n else:\n data = request.json # JSON received from the client\n if data['type'] == 'Public':\n login_id = data['login_id']\n print(login_id)\n result, profile_pic, full_name = getPublicProfileCaptions(login_id)\n\n print(full_name)\n if type(result) == str:\n return jsonify({\n 'Type':'Fail',\n 'Value': result\n })\n else:\n sentiments = getSentiments(result)\n if type(sentiments) == str:\n return jsonify({\n 'Type': 'Fail',\n 'Value': sentiments,\n })\n else:\n return jsonify({\n 'Type': 'Success',\n 'Value': sentiments,\n 'Picture': profile_pic,\n 'Name': full_name\n })\n\n elif data['type'] == 'Private':\n login_id = data['login_id']\n login_username = data['login_username']\n password = data['password']\n print(login_id)\n result, profile_pic, full_name = getPrivateProfileCaptions(login_id, login_username, password)\n if type(result) == str:\n return jsonify({\n 'Type': 'Fail',\n 'Value': result\n })\n else:\n sentiments = getSentiments(result)\n if type(sentiments) == str:\n return jsonify({\n 'Type': 'Fail',\n 'Value': sentiments\n })\n else:\n return jsonify({\n 'Type': 'Success',\n 'Value': sentiments,\n 'Picture': profile_pic,\n 'Name': full_name\n })\n\n\nif __name__ == '__main__':\n app.run(debug=False,threaded=False)\n","repo_name":"namas191297/instaknow_backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"41480422981","text":"from typing import List\r\nfrom unittest import TestCase\r\n\r\nimport pytest\r\n\r\nfrom aplicacao.models import ModeloUnidadeSenai, ModeloDocente, ModeloTelefone\r\nfrom aplicacao.servicos import ServicoConverterModeloDocente\r\nfrom dominio.entidades import Docente\r\nfrom dominio.objetos_de_valor import Id\r\nfrom testes.fabricas import FabricaTesteModeloUnidadeSenai, FabricaTesteDocente, FabricaTesteModeloDocente, \\\r\n FabricaTesteModeloTelefone\r\n\r\n\r\n@pytest.mark.django_db\r\nclass TestServicoConverterModeloDocente(TestCase):\r\n def test_de_entidade_QUANDO_entidade_fornecida_ENTAO_retorna_modelo_com_atributos_esperados(self) -> None:\r\n modelo_unidade_senai: ModeloUnidadeSenai = FabricaTesteModeloUnidadeSenai.create()\r\n docente: Docente = FabricaTesteDocente.build(unidade_senai_id=Id(modelo_unidade_senai.id))\r\n\r\n modelo_docente = ServicoConverterModeloDocente.de_entidade(entidade=docente)\r\n\r\n atributos_resultantes = [\r\n modelo_docente.id,\r\n modelo_docente.nome,\r\n modelo_docente.email,\r\n modelo_docente.unidade_senai.id,\r\n modelo_docente.tipo_de_contratacao,\r\n modelo_docente.ativo\r\n ]\r\n atributos_esperados = [\r\n docente.id.valor,\r\n docente.nome.valor,\r\n docente.email.valor,\r\n modelo_unidade_senai.id,\r\n docente.tipo_de_contratacao.valor.value,\r\n docente.ativo\r\n ]\r\n self.assertEqual(atributos_resultantes, atributos_esperados)\r\n\r\n def test_para_entidade_QUANDO_chamado_ENTAO_retorna_entidade_com_atributos_esperados(self) -> None:\r\n modelo_unidade_senai: ModeloUnidadeSenai = FabricaTesteModeloUnidadeSenai.create()\r\n modelo_docente: ModeloDocente = FabricaTesteModeloDocente.create(unidade_senai=modelo_unidade_senai)\r\n modelos_telefones: List[ModeloTelefone] = [\r\n FabricaTesteModeloTelefone.create(numero='(00)0000-0000', docente=modelo_docente),\r\n FabricaTesteModeloTelefone.create(numero='(11)1111-1111', docente=modelo_docente),\r\n ]\r\n\r\n docente = ServicoConverterModeloDocente.para_entidade(modelo_docente)\r\n\r\n atributos_resultantes = [\r\n docente.id.valor,\r\n docente.nome.valor,\r\n docente.email.valor,\r\n [telefone.valor for telefone in docente.telefones],\r\n modelo_unidade_senai.id,\r\n docente.tipo_de_contratacao.valor.value,\r\n docente.ativo\r\n ]\r\n atributos_esperados = [\r\n modelo_docente.id,\r\n modelo_docente.nome,\r\n modelo_docente.email,\r\n [telefone.numero for telefone in modelos_telefones],\r\n modelo_docente.unidade_senai.id,\r\n modelo_docente.tipo_de_contratacao,\r\n modelo_docente.ativo\r\n ]\r\n self.assertEqual(atributos_resultantes, atributos_esperados)","repo_name":"fr-mm/competencias-backend","sub_path":"testes/unitarios/aplicacao/servicos/test_servico_converter_modelo_docente.py","file_name":"test_servico_converter_modelo_docente.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"gl","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19052588330","text":"#coding:utf-8\n\"\"\" Write a Python program to check whether it follows the sequence given in the patterns array. Go to the editor\nPattern example:\nFor color1 = [\"red\", \"green\", \"green\"] and patterns = [\"a\", \"b\", \"b\"]\nthe output should be samePatterns(color1, patterns) = true;\nFor color2 = [\"red\", \"green\", \"greenn\"] and patterns = [\"a\", \"b\", \"b\"]\nthe output should be samePatterns (strings, color2) = false.\"\"\"\nfrom array import *\ndef test(an_array):\n for i in an_array:\n if an_array.count(i)>=2:\n return i\n return -1\n\nprint(test([4, 3, 5, 3, 7, 4, 9, 3]))\nprint(test([1, 3, 5, 7, 9]))\nprint(test([1, 3, 5, 3, 7, 1, 9, 3]))","repo_name":"DonaFidele/PythonExercices","sub_path":"arrays/exo_16.py","file_name":"exo_16.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4574545649","text":"from math import sin, cos, sqrt, atan2, radians\n\ndef measure(from_loc, to_loc):\n\tearth_radius = 6373\n\n\tlat1 = radians(from_loc[0]) \n\tlon1 = radians(from_loc[1]) \n\tlat2 = radians(to_loc[0]) \n\tlon2 = radians(to_loc[1]) \n\t\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\t\n\ta = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n\tc = 2 * atan2(sqrt(a), sqrt(1-a))\n\t\n\tdistance = round((earth_radius * c) * 5 / 8, 2)\n\t\n\treturn distance\n","repo_name":"idx-horizon/pkr","sub_path":"app/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28134004184","text":"import requests\nimport json\nimport time\nimport datetime\nimport pandas as pd\nfrom pybithumb.core import *\nfrom xcoin_api_client import *\nimport pybithumb\nimport tkinter as tk\nfrom tkinter import*\nimport tkinter.scrolledtext as tkst\nfrom tkinter import Menu\nfrom tkinter import ttk\nimport os\nimport bitcoin_api\nimport threading\nfrom pybithumb import Bithumb\n\ndef trunc(number, ndigits):\n parts = str(number).split('.') # divides number into 2 parts. for ex: -5, and 4427926\n truncated_number = '.'.join([parts[0], parts[1][:ndigits]]) # We keep this first part, while taking only 2 digits from the second part. Then we concat it together to get '-5.44'\n return round(float(truncated_number), ndigits) # This should return a float number, but to make sure it is roundded to 2 decimals.\n\ndef coin_ticker_public(coin_name,avg_h,sto_N,sto_m,sto_t,delay_time):\n try:\n time.sleep(float(delay_time))\n bitcoin_api_url = 'https://api.bithumb.com/public/ticker/%s_%s'%(coin_name,\"KRW\")\n response = requests.get(bitcoin_api_url)\n response_json = response.json()\n\n closing_price = response_json['data']['closing_price']\n\n\n #print(\"closing_price\",closing_price)\n\n ##\n bitcoin_api_url = 'https://api.bithumb.com/public/candlestick/%s_%s/%s' % (coin_name, \"KRW\", \"1m\")\n response = requests.get(bitcoin_api_url)\n #print(\"candlestick\",coin_name, response.text)\n response_json = response.json()\n df = pd.DataFrame(response_json['data'])\n df.set_index(0, inplace=True)\n df.columns = [\"price\", \"close\", \"high\", \"low\", \"trade\"]\n\n # 스토캐스틱 %K (fast %K) = (현재가격-N일중 최저가)/(N일중 최고가-N일중 최저가) ×100\n df[\"max%d\" % sto_N] = (df[\"high\"]).rolling(sto_N).max()\n df[\"min%d\" % sto_N] = (df[\"low\"]).rolling(sto_N).min()\n df[\"stochastic%K\"] = df.apply(lambda x: 100 * (float(x[\"close\"]) - x[\"min%d\" % sto_N]) /\n (x[\"max%d\" % sto_N] - x[\"min%d\" % sto_N])\n if (x[\"max%d\" % sto_N] - x[\"min%d\" % sto_N]) != 0 else 50, 1)\n # 스토캐스틱 %D (fast %D) = m일 동안 %K 평균 = Slow %K\n # slow %K = 위에서 구한 스토캐스틱 %D\n df[\"slow_%K\"] = df[\"stochastic%K\"].rolling(sto_m).mean()\n # slow %D = t일 동안의 slow %K 평균\n df[\"slow_%D\"] = df[\"slow_%K\"].rolling(sto_t).mean()\n\n length = df.iloc[len(df) - 1][\"stochastic%K\"]\n smoothk = df.iloc[len(df) - 1][\"slow_%K\"]\n smoothd = df.iloc[len(df) - 1][\"slow_%D\"]\n\n ## 5분 평균\n avg_5mins = df.iloc[len(df['close']) - 5:]['close']\n total_5min_price = 0\n for avg_5min in avg_5mins:\n total_5min_price += float(avg_5min)\n avg_5min_price = total_5min_price / 5\n #print(\"avg_5min_price\", avg_5min_price)\n\n ## 10분 평균\n avg_10mins = df.iloc[len(df['close']) - 10:]['close']\n total_10min_price = 0\n for avg_10min in avg_10mins:\n total_10min_price += float(avg_10min)\n avg_10min_price = total_10min_price / 10\n #print(\"avg_10min_price\", avg_10min_price)\n\n ## 30분 평균\n avg_30mins = df.iloc[len(df['close']) - 30:]['close']\n total_30min_price = 0\n for avg_30min in avg_30mins:\n total_30min_price += float(avg_30min)\n avg_30min_price = total_30min_price / 30\n #print(\"avg_30min_price\", avg_30min_price)\n\n ## 1시간 평균\n avg_1hours = df.iloc[len(df['close']) - (60*int(avg_h)):]['close']\n total_1hour_price = 0\n for avg_1hour in avg_1hours:\n total_1hour_price += float(avg_1hour)\n avg_1hour_price = total_1hour_price / (60*int(avg_h))\n #print(\"avg_1hour_price\", avg_1hour_price)\n\n length = df.iloc[len(df) - 1][\"stochastic%K\"]\n #print(\"length\", length)\n\n smoothk = df.iloc[len(df) - 1][\"slow_%K\"]\n #print(\"smoothk\", smoothk)\n\n smoothd = df.iloc[len(df) - 1][\"slow_%D\"]\n #print(\"smoothd\", smoothd)\n except Exception as ex:\n print(\"Exception\",ex)\n closing_price=0\n avg_5min_price=0\n avg_10min_price=0\n avg_30min_price=0\n avg_1hour_price=0\n length=0\n smoothk=0\n smoothd=0\n\n return closing_price,avg_5min_price,avg_10min_price,avg_30min_price,avg_1hour_price,length,smoothk,smoothd\n\n\ndef get_balance(con_key,sec_key):\n api = XCoinAPI(con_key, sec_key)\n\n parm = {\n }\n result = api.xcoinApiCall(\"/info/balance\", parm)\n available_krw = result[\"data\"][\"available_krw\"]\n\n return available_krw\n\ndef market_buy(con_key,sec_key,coin):\n bitcoin_api_url = 'https://api.bithumb.com/public/ticker/%s_%s' % (coin, \"KRW\")\n response = requests.get(bitcoin_api_url)\n response_json = response.json()\n closing_price = response_json['data']['closing_price']\n balance = float(closing_price)\n try:\n available_krw = float(get_balance(con_key,sec_key))\n #available_krw = float(trunc(available_krw,4))\n\n\n available_coin_count = float((available_krw * 0.7) / balance)\n\n available_coin_count = format(float(available_coin_count), \".8f\")\n\n available_coin_count = float(trunc(available_coin_count,6))\n\n print(\"현재 사용가능원화\",(available_krw))\n\n print(\"현재 시장가\", (balance))\n\n print(\"현재 구매가능수량\", (available_coin_count))\n\n\n bithumb = Bithumb(con_key, sec_key)\n result =bithumb.buy_market_order(coin,available_coin_count,\"KRW\")\n result = \"coin : \" + coin + \" 현재 사용가능원화 : \" + str(available_krw) + \" 현재 시장가 : \" + str(\n balance) + \" 현재 구매가능수량 : \" + str(available_coin_count) + str(result)\n except:\n result = \"존재하지 않는 API KEY\"\n\n print(\"response\", str(result))\n return str(result)\n\ndef get_account(con_key,sec_key,coin):\n api = XCoinAPI(con_key, sec_key)\n\n parm = {\n \"order_currency\": coin,\n \"payment_currency\": \"KRW\"\n }\n result = api.xcoinApiCall(\"/info/account\", parm)\n balance = result['data'][\"balance\"]\n return balance\n\ndef market_sell(con_key,sec_key,coin):\n try:\n balance = float(get_account(con_key,sec_key,coin))\n\n balance = float(format(float(balance), \".8f\"))\n balance = float(trunc(balance, 4))\n\n print(coin,balance)\n\n api = XCoinAPI(con_key, sec_key)\n\n parm = {\n \"order_currency\": coin,\n \"payment_currency\": \"KRW\",\n \"units\": (balance)\n }\n result = api.xcoinApiCall(\"/trade/market_sell\", parm)\n result = \"coin : \" + coin + \" 판매가능 코인개수 : \" + str(balance) + str(result)\n except:\n result = \"존재하지 않는 API KEY\"\n\n print(\"response\", str(result))\n\n\n return str(result)","repo_name":"YouJaeBeom/BitcoinProject","sub_path":"bitcoin_api.py","file_name":"bitcoin_api.py","file_ext":"py","file_size_in_byte":6880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72118786115","text":"import cv2\nimport numpy as np\n\n\ndef calc_center(img):\n '''重心座標(x,y)を求める'''\n mu = cv2.moments(img, False)\n x = int(mu[\"m10\"] / (mu[\"m00\"] + 1e-7))\n y = int(mu[\"m01\"] / (mu[\"m00\"] + 1e-7))\n return x, y\n\n\ndef green_detect(img):\n '''緑色のマスク生成'''\n # HSV色空間に変換\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # 緑色のHSVの値域\n hsv_min = np.array([30, 64, 0])\n hsv_max = np.array([90, 255, 255])\n mask = cv2.inRange(hsv, hsv_min, hsv_max)\n return mask\n\n\ndef red_detect(img):\n '''赤色のマスク生成'''\n # HSV色空間に変換\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # 赤色のHSVの値域1\n hsv_min = np.array([0, 127, 0])\n hsv_max = np.array([149, 255, 255])\n mask1 = cv2.inRange(hsv, hsv_min, hsv_max)\n # 赤色のHSVの値域2\n hsv_min = np.array([150, 127, 0])\n hsv_max = np.array([179, 255, 255])\n mask2 = cv2.inRange(hsv, hsv_min, hsv_max)\n return mask1 + mask2\n","repo_name":"yumion/onodera-lab","sub_path":"robotHandV2/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28482461345","text":"import requests\nimport os\nimport time\n\nserver = \"http://localhost:5000/\"\n\nrun = True\ncards_points = {\n 1: 2,\n 2: 3,\n 3: 4,\n 4: 5,\n 5: 6,\n 6: 7,\n 7: 8,\n 8: 9,\n 9: 10,\n 10: 10,\n 11: 10,\n 12: 10,\n 13: 11\n}\n\n\ndef enter_game(bid):\n js = requests.post(server + \"begin\", json={\"header\": \"register\", \"bid\": bid}).json()\n if js[\"header\"] == \"error\":\n raise Exception(js[\"message\"])\n return js\n\n\ndef split(gid):\n js = requests.post(server + \"game-\" + str(gid), json={\"header\": \"split\"}).json()\n if js[\"header\"] == \"error\":\n raise Exception(js[\"message\"])\n return js\n\n\ndef double(gid):\n js = requests.post(server + \"game-\" + str(gid), json={\"header\": \"double\"}).json()\n if js[\"header\"] == \"error\":\n raise Exception(js[\"message\"])\n return js\n\n\ndef pas(gid):\n js = requests.post(server + \"game-\" + str(gid), json={\"header\": \"pas\"}).json()\n if js[\"header\"] == \"error\":\n raise Exception(js[\"message\"])\n return js\n\n\ndef insure(gid):\n js = requests.post(server + \"game-\" + str(gid), json={\"header\": \"insure\"}).json()\n if js[\"header\"] == \"error\":\n raise Exception(js[\"message\"])\n return js\n\n\ndef get(gid):\n js = requests.post(server + \"game-\" + str(gid), json={\"header\": \"get\"}).json()\n if js[\"header\"] == \"error\":\n raise Exception(js[\"message\"])\n return js\n\n\ndef print_hand(hand):\n print(\"(value \" + str(count_cards(hand)) + \"):\")\n for card in hand[\"cards\"]:\n print(ranks[card[\"rank\"]] + \" of \" + colors[card[\"color\"]] + \" \")\n\n\ndef print_table(js):\n print(\"insurance: \" + str(js[\"insurance\"]))\n print(\"bid: \" + str(js[\"bid\"]))\n hands = js[\"hands\"]\n print()\n for hand in hands:\n print(\"hand \", end=\"\")\n print_hand(hand)\n print(\"\\ncroupier \", end=\"\")\n print_hand(js[\"croupier\"])\n\n\ndef print_end(js):\n print(\"winner: \" + str(js[\"winner\"]))\n hands = js[\"hands\"]\n print()\n for hand in hands:\n print(\"hand \", end=\"\")\n print_hand(hand)\n print(\"\\ncroupier \", end=\"\")\n print_hand(js[\"croupier\"])\n\n\ndef cls():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef count_cards(hand):\n cards = hand[\"cards\"]\n aces = len([x for x in cards if x[\"rank\"] == 13])\n value = sum([cards_points[x[\"rank\"]] for x in cards])\n\n if value <= 21 or aces == 0:\n return value\n\n while aces > 0 and value > 21:\n value -= 10 # value = value - 11 (ace) + 1 (other value ace)\n aces -= 1\n return value\n\n\nranks = {\n 1: \"2\",\n 2: \"3\",\n 3: \"4\",\n 4: \"5\",\n 5: \"6\",\n 6: \"7\",\n 7: \"8\",\n 8: \"9\",\n 9: \"10\",\n 10: \"Jack\",\n 11: \"Queen\",\n 12: \"King\",\n 13: \"Ace\"\n}\ncolors = {\n \"D\": \"Diamonds\",\n \"S\": \"Spades\",\n \"H\": \"Hearts\",\n \"C\": \"Clubs\"\n}\n\ninit_js = enter_game(10)\ngid = init_js[\"id\"]\nactions = {\n \"split\": split,\n \"double\": double,\n \"pas\": pas,\n \"insure\": insure,\n \"hit\": get\n}\nprint(\"Game started (id \" + str(gid) + \")\")\nprint_table(init_js[\"table\"])\n\nwhile run:\n command = input()\n js = \"\"\n cls()\n if command == \"quit\":\n break\n try:\n js = actions[command](gid)\n if js[\"header\"] == \"in_game\":\n print_table(js)\n elif js[\"header\"] == \"end_game\":\n print_end(js)\n break\n except Exception as e:\n print(\"Error: \" + str(e))\n print_table(js)\ntime.sleep(3)\ncls()\n","repo_name":"Creestoph/Blackjack","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10944578694","text":"#!/usr/bin/env py.test\n\"\"\"\nTests of the interaction between all the schemes\nand the problem interface.\n\nThese tests should act as a documentation of\nthe control and data flow between the classes.\n\"\"\"\n\nimport inspect\n\nfrom cbcflow import NSProblem, all_schemes\nfrom cbcpost import ParamDict\nfrom cbcflow import *\nfrom cbcflow.dol import *\n#from cbcflow.utils.common import Timer\nfrom cbcpost.utils import Timer\n\nclass Left(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and x[0] <= DOLFIN_EPS\n\nclass Right(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and x[0] >= 1.0-DOLFIN_EPS\n\n# Make meshes only once\nsquare = UnitSquareMesh(8,8)\nsquare_facet_domains = FacetFunction(\"size_t\", square)\nsquare_facet_domains.set_all(0)\nLeft().mark(square_facet_domains, 1)\nRight().mark(square_facet_domains, 2)\n\ncube = UnitCubeMesh(5,5,5)\ncube_facet_domains = FacetFunction(\"size_t\", cube)\ncube_facet_domains.set_all(0)\nLeft().mark(cube_facet_domains, 1)\nRight().mark(cube_facet_domains, 2)\n\nc0 = Constant(0.0)\nc1 = Constant(0.0)\n\nclass MockProblem(NSProblem):\n def __init__(self, params=None):\n NSProblem.__init__(self, params)\n\n # Select mesh and domains based on dimension parameter\n d = self.params.d\n if d == 2:\n mesh = square\n facet_domains = square_facet_domains\n elif d == 3:\n mesh = cube\n facet_domains = cube_facet_domains\n\n # Required initialization\n self.initialize_geometry(mesh, facet_domains=facet_domains)\n\n # These will be returned from problem interface functions\n self._observations = []\n self._controls = []\n self._ics = (as_vector([c0]*d),\n c0)\n self._bcs = ([([c0]*d, 0)],\n [(c0, 1), (c0, 2)])\n\n # List of recorded calls through the problem interface\n self._calls = []\n\n @classmethod\n def default_params(cls):\n params = NSProblem.default_params()\n params.replace(\n T0=1.0,\n T=2.0,\n dt=0.5,\n\n mu=1.0,\n rho=1.0,\n )\n params.update(\n d=2,\n )\n return params\n\n def observations(self, spaces, t):\n # Record this call\n self._calls.append( (\"observations\", float(t)) )\n\n # Check that input data conforms to expected interface\n assert hasattr(spaces, 'U')\n assert isinstance(t, Constant)\n\n # Return something conforming to problem specification\n return self._observations\n\n def controls(self, spaces):\n # Record this call\n self._calls.append( (\"controls\",) )\n\n # Check that input data conforms to expected interface\n assert hasattr(spaces, 'V')\n\n # Return something conforming to problem specification\n return self._controls\n\n def initial_conditions(self, spaces, controls):\n # Record this call\n self._calls.append( (\"initial_conditions\",) )\n\n # Check that input data conforms to expected interface\n d = self.params.d\n assert controls is self._controls\n assert spaces.d == d\n\n # Return something conforming to problem specification\n return self._ics\n\n def boundary_conditions(self, spaces, u, p, t, controls):\n # Record this call\n self._calls.append( (\"boundary_conditions\", type(spaces), type(u), type(p), type(t), float(t), len(controls)) )\n\n # Check that input data conforms to expected interface\n assert controls is self._controls\n assert hasattr(spaces, 'Q')\n\n # TODO: Checks ...\n\n # Return something conforming to problem specification\n return self._bcs\n\n\ndef test_scheme_calls_update_properly(scheme_factory, dim):\n # Mock postprocessing update function\n update_record = []\n\n # Run scheme with mock problem and configured scheme\n problem = MockProblem({'d':dim})\n scheme = scheme_factory()\n\n #namespace = scheme.solve(problem, Timer(0))\n for namespace in scheme.solve(problem, Timer(0)):\n update_record.append((namespace.t, namespace.timestep))\n\n # Check that update has been called properly and that the timesteps are as they should\n assert [r[0] for r in update_record] == [1.0,1.5,2.0]\n assert [r[1] for r in update_record] == [0,1,2]\n\n # TODO: Add checks for all problem interface components\n\n # Check that all problem interface functions were called\n callnames = [c[0] for c in problem._calls]\n assert \"observations\" in callnames\n assert \"controls\" in callnames\n assert \"initial_conditions\" in callnames\n assert \"boundary_conditions\" in callnames\n\n # TODO: Inspect problem._calls data\n\n # Check that the returned namespace contains all expected values\n assert \"spaces\" in namespace\n assert \"observations\" in namespace\n assert \"controls\" in namespace\n assert \"state\" in namespace\n assert \"t\" in namespace\n assert \"timestep\" in namespace\n\n # TODO: Inspect namespace contents\n\n # Check that the spaces object has the right function space properties\n spaces = namespace[\"spaces\"]\n assert spaces.V.ufl_element().degree() == scheme.params.u_degree\n assert spaces.Q.ufl_element().degree() == scheme.params.p_degree\n assert spaces.V.ufl_element().value_shape() == (dim,)\n assert spaces.Q.ufl_element().value_shape() == ()\n","repo_name":"fmoabreu/fluxo","sub_path":"test/unit/test_problem_scheme_interfacing.py","file_name":"test_problem_scheme_interfacing.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43557501040","text":"#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n\n# create a new 'Legacy VTK Reader'\nv_in10_NN_0vtk = LegacyVTKReader(FileNames=['C:\\\\Program Files\\\\blueCFD-Core-2017\\\\ofuser-of5\\\\run\\\\check\\\\V_in=1.0_NN\\\\VTK\\\\V_in=1.0_NN_0.vtk'])\n\n# get active view\nrenderView1 = GetActiveViewOrCreate('RenderView')\n# uncomment following to set a specific view size\n# renderView1.ViewSize = [1002, 740]\n\n# show data in view\nv_in10_NN_0vtkDisplay = Show(v_in10_NN_0vtk, renderView1)\n# trace defaults for the display properties.\nv_in10_NN_0vtkDisplay.Representation = 'Surface'\nv_in10_NN_0vtkDisplay.ColorArrayName = [None, '']\nv_in10_NN_0vtkDisplay.OSPRayScaleArray = 'C'\nv_in10_NN_0vtkDisplay.OSPRayScaleFunction = 'PiecewiseFunction'\nv_in10_NN_0vtkDisplay.SelectOrientationVectors = 'C'\nv_in10_NN_0vtkDisplay.ScaleFactor = 0.4\nv_in10_NN_0vtkDisplay.SelectScaleArray = 'C'\nv_in10_NN_0vtkDisplay.GlyphType = 'Arrow'\nv_in10_NN_0vtkDisplay.GlyphTableIndexArray = 'C'\nv_in10_NN_0vtkDisplay.DataAxesGrid = 'GridAxesRepresentation'\nv_in10_NN_0vtkDisplay.PolarAxes = 'PolarAxesRepresentation'\nv_in10_NN_0vtkDisplay.ScalarOpacityUnitDistance = 0.36371927394484016\nv_in10_NN_0vtkDisplay.GaussianRadius = 0.2\nv_in10_NN_0vtkDisplay.SetScaleArray = ['POINTS', 'Cx']\nv_in10_NN_0vtkDisplay.ScaleTransferFunction = 'PiecewiseFunction'\nv_in10_NN_0vtkDisplay.OpacityArray = ['POINTS', 'Cx']\nv_in10_NN_0vtkDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Slice'\nslice1 = Slice(Input=v_in10_NN_0vtk)\nslice1.SliceType = 'Plane'\nslice1.SliceOffsetValues = [0.0]\n\n# init the 'Plane' selected for 'SliceType'\nslice1.SliceType.Origin = [0.0, 1.0, 0.0]\n\n# Properties modified on slice1.SliceType\nslice1.SliceType.Normal = [0.0, 0.0, 1.0]\n\n# Properties modified on slice1.SliceType\nslice1.SliceType.Normal = [0.0, 0.0, 1.0]\n\n# show data in view\nslice1Display = Show(slice1, renderView1)\n# trace defaults for the display properties.\nslice1Display.Representation = 'Surface'\nslice1Display.ColorArrayName = [None, '']\nslice1Display.OSPRayScaleArray = 'C'\nslice1Display.OSPRayScaleFunction = 'PiecewiseFunction'\nslice1Display.SelectOrientationVectors = 'C'\nslice1Display.ScaleFactor = 0.4\nslice1Display.SelectScaleArray = 'C'\nslice1Display.GlyphType = 'Arrow'\nslice1Display.GlyphTableIndexArray = 'C'\nslice1Display.DataAxesGrid = 'GridAxesRepresentation'\nslice1Display.PolarAxes = 'PolarAxesRepresentation'\nslice1Display.GaussianRadius = 0.2\nslice1Display.SetScaleArray = ['POINTS', 'Cx']\nslice1Display.ScaleTransferFunction = 'PiecewiseFunction'\nslice1Display.OpacityArray = ['POINTS', 'Cx']\nslice1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(v_in10_NN_0vtk, renderView1)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# change representation type\nslice1Display.SetRepresentationType('Surface With Edges')\n\n# set scalar coloring\nColorBy(slice1Display, ('CELLS', 'Ux'))\n\n# rescale color and/or opacity maps used to include current data range\nslice1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\nslice1Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'Ux'\nuxLUT = GetColorTransferFunction('Ux')\nuxLUT.RGBPoints = [-10.0, 0.231373, 0.298039, 0.752941, -0.5, 0.865003, 0.865003, 0.865003, 9.0, 0.705882, 0.0156863, 0.14902]\nuxLUT.ScalarRangeInitialized = 1.0\n\n# Rescale transfer function\nuxLUT.RescaleTransferFunction(-10.6, 10.6)\n\n# get opacity transfer function/opacity map for 'Ux'\nuxPWF = GetOpacityTransferFunction('Ux')\nuxPWF.Points = [-10.0, 0.0, 0.5, 0.0, 9.0, 1.0, 0.5, 0.0]\nuxPWF.ScalarRangeInitialized = 1\n\n# Rescale transfer function\nuxPWF.RescaleTransferFunction(-10.6, 10.6)\n\n# get color legend/bar for uxLUT in view renderView1\nuxLUTColorBar = GetScalarBar(uxLUT, renderView1)\nuxLUTColorBar.Title = 'Ux'\nuxLUTColorBar.ComponentTitle = ''\n\n# change scalar bar placement\nuxLUTColorBar.Orientation = 'Horizontal'\nuxLUTColorBar.WindowLocation = 'AnyLocation'\nuxLUTColorBar.Position = [0.3421556886227546, 0.0445945945945946]\nuxLUTColorBar.ScalarBarLength = 0.3299999999999996\n\n# set active source\nSetActiveSource(v_in10_NN_0vtk)\n\n# show data in view\nv_in10_NN_0vtkDisplay = Show(v_in10_NN_0vtk, renderView1)\n\n# hide data in view\nHide(v_in10_NN_0vtk, renderView1)\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-0.0060213444053787216, 0.8649845884368418, 5.794619354882062]\nrenderView1.CameraFocalPoint = [-0.0060213444053787216, 0.8649845884368418, -0.252000704789296]\nrenderView1.CameraParallelScale = 2.29128784747792\n\n# save screenshot\nSaveScreenshot('C:/Program Files/blueCFD-Core-2017/ofuser-of5/run/check/V_in=1.0_NN/Ux.jpg', renderView1, ImageResolution=[1002, 740])\n\n# set active source\nSetActiveSource(slice1)\n\n# set scalar coloring\nColorBy(slice1Display, ('CELLS', 'Uy'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(uxLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\nslice1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\nslice1Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'Uy'\nuyLUT = GetColorTransferFunction('Uy')\nuyLUT.RGBPoints = [-0.9477149844169617, 0.231373, 0.298039, 0.752941, 0.0024490058422088623, 0.865003, 0.865003, 0.865003, 0.9526129961013794, 0.705882, 0.0156863, 0.14902]\nuyLUT.ScalarRangeInitialized = 1.0\n\n# Rescale transfer function\nuyLUT.RescaleTransferFunction(-1.15, 1.15)\n\n# get opacity transfer function/opacity map for 'Uy'\nuyPWF = GetOpacityTransferFunction('Uy')\nuyPWF.Points = [-0.9477149844169617, 0.0, 0.5, 0.0, 0.9526129961013794, 1.0, 0.5, 0.0]\nuyPWF.ScalarRangeInitialized = 1\n\n# Rescale transfer function\nuyPWF.RescaleTransferFunction(-1.15, 1.15)\n\n# get color legend/bar for uyLUT in view renderView1\nuyLUTColorBar = GetScalarBar(uyLUT, renderView1)\nuyLUTColorBar.Title = 'Uy'\nuyLUTColorBar.ComponentTitle = ''\n\n# change scalar bar placement\nuyLUTColorBar.Orientation = 'Horizontal'\nuyLUTColorBar.WindowLocation = 'AnyLocation'\nuyLUTColorBar.Position = [0.34015968063872243, 0.0]\nuyLUTColorBar.ScalarBarLength = 0.33000000000000024\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# set active source\nSetActiveSource(v_in10_NN_0vtk)\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-0.0060213444053787216, 0.8649845884368418, 5.794619354882062]\nrenderView1.CameraFocalPoint = [-0.0060213444053787216, 0.8649845884368418, -0.252000704789296]\nrenderView1.CameraParallelScale = 2.29128784747792\n\n# save screenshot\nSaveScreenshot('C:/Program Files/blueCFD-Core-2017/ofuser-of5/run/check/V_in=1.0_NN/Uy.jpg', renderView1, ImageResolution=[1002, 740])\n\n#### saving camera placements for all active views\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-0.0060213444053787216, 0.8649845884368418, 5.794619354882062]\nrenderView1.CameraFocalPoint = [-0.0060213444053787216, 0.8649845884368418, -0.252000704789296]\nrenderView1.CameraParallelScale = 2.29128784747792\n\n#### uncomment the following to render all views\n# RenderAllViews()\n# alternatively, if you want to write images, you can use SaveScreenshot(...).","repo_name":"jtnmlvk/Machine-Learning-based-Surrogate-Model-for-CFD","sub_path":"Lib/paraView/getContoursUxUy.py","file_name":"getContoursUxUy.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16923722265","text":"import threading\nimport queue\n\nfrom .DyEvent import *\n\n\nclass DyTimerHand(threading.Thread):\n def __init__(self, queue, eventEngine):\n super().__init__()\n\n self._intervals = {} # {interval: interval count}\n self._queue = queue\n self._eventEngine = eventEngine\n\n def run(self):\n while True:\n try:\n event = self._queue.get(block=True, timeout=1)\n interval = event.data\n\n # register event\n if event.type == DyEventType.register:\n if interval not in self._intervals:\n self._intervals[interval] = interval\n else: # unregister event\n if interval in self._intervals:\n del self._intervals[interval]\n \n except queue.Empty: # 1s time out\n for interval in self._intervals:\n count = self._intervals[interval]\n\n count -= 1\n\n # trigger timer out event\n if count == 0:\n self._eventEngine.put(DyEvent(DyEventType.timer + str(interval)))\n\n # new start\n count = interval\n\n if DyEventEngine.enableTimerLog:\n print('Timer_%s'%interval)\n\n self._intervals[interval] = count\n\n\nclass DyEventHand(threading.Thread):\n def __init__(self, queue):\n super().__init__()\n\n self._handlers = {} # {event type:[handlers]}\n self._queue = queue\n\n def run(self):\n while True:\n event = self._queue.get()\n\n if event.type == DyEventType.register:\n self._processRegisterEvent(event.data['type'], event.data['handler'])\n elif event.type == DyEventType.unregister:\n self._processUnregisterEvent(event.data['type'], event.data['handler'])\n else:\n self._processOtherEvent(event)\n\n def _processRegisterEvent(self, type, handler):\n if type not in self._handlers:\n self._handlers[type] = []\n\n if handler not in self._handlers[type]:\n self._handlers[type].append(handler)\n\n def _processUnregisterEvent(self, type, handler):\n if type in self._handlers:\n if handler in self._handlers[type]:\n self._handlers[type].remove(handler)\n\n if not self._handlers[type]:\n del self._handlers[type]\n\n def _processOtherEvent(self, event):\n if event.type in self._handlers:\n for handler in self._handlers[event.type]:\n handler(event)\n\n\nclass DyEventEngine(threading.Thread):\n \"\"\"\n 事件引擎类\n\n 非timer事件:\n 三元组合(event type,handler,hand)标识一个事件监听。\n 重复注册相同的事件监听(event type,handler,hand都相同),只有第一次注册生效。\n 支持注销不存在的事件监听。\n\n timer事件:\n 三元组合(handler,hand,timer interval)标识一个timer监听。\n 重复注册相同的timer监听(handler,hand,timer interval都相同),只有第一次注册生效。\n 也就是说,不同的timer interval,相同的handler和hand,是可以注册成功的。\n 支持注销不存在的timer监听。\n \"\"\"\n enableTimerLog = False\n\n\n def __init__(self, handNbr, timer=True):\n super().__init__()\n\n self._handNbr = handNbr\n\n # timer\n if timer:\n self._timerHandQueue = queue.Queue()\n self._timerHand = DyTimerHand(self._timerHandQueue, self)\n else:\n self._timerHandQueue = None\n self._timerHand = None\n\n self._timerMap = {} # {interval:{hand:[handlers]}}\n\n # hands\n self._hands = []\n self._handQueues = [] # each hand maps to one element in list, [Queue()]\n\n # main data of event engine\n self._engineQueue = queue.Queue()\n self._eventMap = {} # which hand handles which event, {event type:{hand:[handlers]}}\n\n self._initHands()\n\n def _initHands(self):\n for i in range(self._handNbr):\n queue_ = queue.Queue()\n self._handQueues.append(queue_)\n\n self._hands.append(DyEventHand(queue_))\n\n def _processUnregister(self, data):\n \"\"\" @data: {'type':,'handler':, 'hand':}\n \"\"\"\n type = data['type']\n handler = data['handler']\n hand = data['hand']\n\n event = DyEvent(DyEventType.unregister)\n event.data['type'] = type\n event.data['handler'] = handler\n\n # remove handler from event map\n if type in self._eventMap:\n if hand in self._eventMap[type]:\n if handler in self._eventMap[type][hand]:\n self._eventMap[type][hand].remove(handler)\n\n # unregister from corresponding hand\n self._handQueues[hand].put(event)\n\n if not self._eventMap[type][hand]:\n del self._eventMap[type][hand]\n\n if not self._eventMap[type]:\n del self._eventMap[type]\n\n def _processRegister(self, data):\n \"\"\" @data: {'type':,'handler':, 'hand':}\n \"\"\"\n type = data['type']\n handler = data['handler']\n hand = data['hand']\n\n event = DyEvent(DyEventType.register)\n event.data['type'] = type\n event.data['handler'] = handler\n\n # add to event map\n if type not in self._eventMap:\n self._eventMap[type] = {}\n\n if hand not in self._eventMap[type]:\n self._eventMap[type][hand] = []\n\n if handler not in self._eventMap[type][hand]:\n self._eventMap[type][hand].append(handler)\n\n # register to corresponding hand\n self._handQueues[hand].put(event)\n\n def _processRegisterTimer(self, data):\n # unpack\n interval = data['interval']\n handler = data['handler']\n hand = data['hand']\n\n # register timer event to corresponding hand\n self._processRegister(dict(type=DyEventType.timer + str(interval),\n handler=handler,\n hand=hand))\n\n # add to timer map\n if interval not in self._timerMap:\n self._timerMap[interval] = {}\n\n # register new interval to timer hand\n event = DyEvent(DyEventType.register)\n event.data = interval\n\n self._timerHandQueue.put(event)\n\n if hand not in self._timerMap[interval]:\n self._timerMap[interval][hand] = []\n\n if handler not in self._timerMap[interval][hand]:\n self._timerMap[interval][hand].append(handler)\n\n def _processUnregisterTimer(self, data):\n # unpack\n interval = data['interval']\n handler = data['handler']\n hand = data['hand']\n\n # unregister timer event from corresponding hand\n self._processUnregister(dict(type=DyEventType.timer + str(interval),\n handler=handler,\n hand=hand))\n\n # remove handler from timer map\n if interval in self._timerMap:\n if hand in self._timerMap[interval]:\n if handler in self._timerMap[interval][hand]:\n self._timerMap[interval][hand].remove(handler)\n\n if not self._timerMap[interval][hand]: # empty\n del self._timerMap[interval][hand]\n\n if not self._timerMap[interval]: # empty\n del self._timerMap[interval]\n\n # no any handler for this timer, so unregister interval from timer hand\n event = DyEvent(DyEventType.unregister)\n event.data = interval\n\n self._timerHandQueue.put(event)\n\n def run(self):\n while True:\n event = self._engineQueue.get()\n\n if event.type == DyEventType.registerTimer:\n self._processRegisterTimer(event.data)\n\n elif event.type == DyEventType.register:\n self._processRegister(event.data)\n\n elif event.type == DyEventType.unregisterTimer:\n self._processUnregisterTimer(event.data)\n\n elif event.type == DyEventType.unregister:\n self._processUnregister(event.data)\n\n else: # event for applications\n hands = self._eventMap.get(event.type)\n if hands is not None:\n for hand in hands: # hand which is listening this event\n self._handQueues[hand].put(event)\n\n def registerTimer(self, handler, hand=None, interval=1):\n if hand is None:\n hand = self._handNbr - 1\n\n event = DyEvent(DyEventType.registerTimer)\n event.data = dict(hand=hand, handler=handler, interval=interval)\n\n self.put(event)\n\n def register(self, type, handler, hand=None):\n if hand is None:\n hand = self._handNbr - 1\n\n event = DyEvent(DyEventType.register)\n event.data = dict(type=type, handler=handler, hand=hand)\n\n self.put(event)\n\n def unregister(self, type, handler, hand=None):\n if hand is None:\n hand = self._handNbr - 1\n\n event = DyEvent(DyEventType.unregister)\n event.data = dict(type=type, handler=handler, hand=hand)\n\n self.put(event)\n\n def unregisterTimer(self, handler, hand=None, interval=1):\n if hand is None:\n hand = self._handNbr - 1\n\n event = DyEvent(DyEventType.unregisterTimer)\n event.data = dict(hand=hand, handler=handler, interval=interval)\n\n self.put(event)\n\n def put(self, event):\n self._engineQueue.put(event)\n\n def stop(self):\n pass\n\n def start(self):\n for hand in self._hands:\n hand.start()\n\n if self._timerHand:\n self._timerHand.start()\n\n super().start()\n\n\nclass DyDummyEventEngine:\n def __init__(self):\n pass\n\n def put(self, event):\n pass\n","repo_name":"MicroEngine/DevilYuan","sub_path":"EventEngine/DyEventEngine.py","file_name":"DyEventEngine.py","file_ext":"py","file_size_in_byte":10242,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"} +{"seq_id":"252802350","text":"from RLTest import Env\nfrom redisgraph import Graph, Node, Edge\nfrom base import FlowTestsBase\n\ndis_redis = None\nredis_graph = None\nredis_con = None\nnode_names = [\"A\", \"B\", \"C\", \"D\"]\n\n# A can reach 3 nodes, B can reach 2 nodes, C can reach 1 node\nmax_results = 6\n\nclass VariableLengthTraversals(FlowTestsBase):\n def __init__(self):\n super(VariableLengthTraversals, self).__init__()\n global redis_con\n global redis_graph\n redis_con = self.env.getConnection()\n redis_graph = Graph(\"G\", redis_con)\n self.populate_graph()\n\n @classmethod\n def tearDownClass(cls):\n if dis_redis is not None:\n dis_redis.stop()\n\n @classmethod\n def populate_graph(cls):\n global redis_graph\n\n nodes = []\n # Create nodes\n for n in node_names:\n node = Node(label=\"node\", properties={\"name\": n})\n redis_graph.add_node(node)\n nodes.append(node)\n\n # Create edges\n for i in range(len(nodes) - 1):\n edge = Edge(nodes[i], \"knows\", nodes[i+1], properties={\"connects\": node_names[i] + node_names[i+1]})\n redis_graph.add_edge(edge)\n\n redis_graph.commit()\n\n # Sanity check against single-hop traversal\n def test01_conditional_traverse(self):\n query = \"\"\"MATCH (a)-[e]->(b) RETURN a.name, e.connects, b.name ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n expected_result = [['A', 'AB', 'B'],\n ['B', 'BC', 'C'],\n ['C', 'CD', 'D']]\n self.env.assertEquals(actual_result.result_set == expected_result)\n\n # Traversal with no labels\n def test02_unlabeled_traverse(self):\n query = \"\"\"MATCH (a)-[*]->(b) RETURN a.name, b.name ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n self.env.assertEquals(len(actual_result.result_set), max_results)\n\n query = \"\"\"MATCH (a)<-[*]-(b) RETURN a, b ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n self.env.assertEquals(len(actual_result.result_set), max_results)\n\n # Traversal with labeled source\n def test03_source_labeled(self):\n query = \"\"\"MATCH (a:node)-[*]->(b) RETURN a.name, b.name ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n self.env.assertEquals(len(actual_result.result_set), max_results)\n\n query = \"\"\"MATCH (a:node)<-[*]-(b) RETURN a.name, b.name ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n self.env.assertEquals(len(actual_result.result_set), max_results)\n\n # Traversal with labeled dest\n def test04_dest_labeled(self):\n query = \"\"\"MATCH (a)-[*]->(b:node) RETURN a.name, b.name ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n self.env.assertEquals(len(actual_result.result_set), max_results)\n\n query = \"\"\"MATCH (a)<-[*]-(b:node) RETURN a.name, b.name ORDER BY a.name, b.name\"\"\"\n actual_result = redis_graph.query(query)\n self.env.assertEquals(len(actual_result.result_set), max_results)\n","repo_name":"simpletonDL/RedisGraph","sub_path":"tests/flow/test_variable_length_traversals.py","file_name":"test_variable_length_traversals.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"44906100257","text":"import torch\nimport torch.nn as nn\n\n\nclass TIRG(nn.Module):\n \"\"\"The TIGR model.\n The method is described in\n Nam Vo, Lu Jiang, Chen Sun, Kevin Murphy, Li-Jia Li, Li Fei-Fei, James Hays.\n \"Composing Text and Image for Image Retrieval - An Empirical Odyssey\"\n CVPR 2019. arXiv:1812.07119\n \"\"\"\n\n def __init__(self, text_channel, img_channel):\n super().__init__()\n\n self.a = torch.nn.Parameter(torch.tensor([1.0, 10.0, 1.0, 1.0]))\n self.gated_feature_composer = torch.nn.Sequential(\n torch.nn.BatchNorm1d(img_channel + text_channel),\n torch.nn.ReLU(),\n torch.nn.Linear(img_channel + text_channel, img_channel),\n )\n self.res_info_composer = torch.nn.Sequential(\n torch.nn.BatchNorm1d(img_channel + text_channel),\n torch.nn.ReLU(),\n torch.nn.Linear(img_channel + text_channel, img_channel + text_channel),\n torch.nn.ReLU(),\n torch.nn.Linear(img_channel + text_channel, img_channel),\n )\n\n def forward(self, img_features, text_features):\n x = torch.cat((img_features, text_features), dim=1)\n f1 = self.gated_feature_composer(x)\n f2 = self.res_info_composer(x)\n f = torch.sigmoid(f1) * img_features * self.a[0] + f2 * self.a[1]\n return f\n\n\ndef build_tirg(text_channel, img_channel):\n return TIRG(text_channel, img_channel)\n","repo_name":"BrandonHanx/CompFashion","sub_path":"lib/models/composition/tirg.py","file_name":"tirg.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"23272689856","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\" Train from the replay files through python pickle file\"\n\nimport os\nimport sys\nimport time\nimport traceback\nimport argparse\nfrom datetime import datetime\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.optim import Adam, RMSprop\n\nfrom tensorboardX import SummaryWriter\n\nfrom absl import flags\nfrom absl import app\nfrom tqdm import tqdm\n\nfrom alphastarmini.core.arch.agent import Agent\n\nfrom alphastarmini.core.sl.feature import Feature\nfrom alphastarmini.core.sl.label import Label\nfrom alphastarmini.core.sl import sl_loss as Loss\nfrom alphastarmini.core.sl.dataset_pickle import OneReplayDataset, AllReplayDataset\n\nfrom alphastarmini.lib.utils import load_latest_model, initial_model_state_dict\nfrom alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP\nfrom alphastarmini.lib.hyper_parameters import SL_Training_Hyper_Parameters as SLTHP\n\n__author__ = \"Ruo-Ze Liu\"\n\ndebug = False\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", \"--path\", default=\"./data/replay_data/\", help=\"The path where data stored\")\nparser.add_argument(\"-r\", \"--restore\", action=\"store_true\", default=False, help=\"whether to restore model or not\")\nparser.add_argument(\"-t\", \"--type\", choices=[\"val\", \"test\", \"deploy\"], default=\"val\", help=\"Train type\")\nparser.add_argument(\"-m\", \"--model\", choices=[\"sl\", \"rl\"], default=\"sl\", help=\"Choose model type\")\nparser.add_argument(\"-n\", \"--norm\", choices=[True, False], default=False, help=\"Use norm for data\")\nargs = parser.parse_args()\n\n# training paramerters\nPATH = args.path\nMODEL = args.model\nTYPE = args.type\nRESTORE = args.restore\nNORM = args.norm\n\n# hyper paramerters\nBATCH_SIZE = AHP.batch_size\nprint('BATCH_SIZE:', BATCH_SIZE) if debug else None\nSEQ_LEN = AHP.sequence_length\nprint('SEQ_LEN:', SEQ_LEN) if debug else None\n\nNUM_EPOCHS = 100 # SLTHP.num_epochs\nLEARNING_RATE = 1e-4 # SLTHP.learning_rate\nWEIGHT_DECAY = 1e-5 # SLTHP.weight_decay\nCLIP = 0.5 # SLTHP.clip\n\nNUM_ITERS = 100 # 100\nFILE_SIZE = 100 # 100\n\n# set random seed\ntorch.manual_seed(SLTHP.seed)\nnp.random.seed(SLTHP.seed)\n\n# gpu setting\nON_GPU = torch.cuda.is_available()\nDEVICE = torch.device(\"cuda:0\" if ON_GPU else \"cpu\")\ntorch.autograd.set_detect_anomaly(True)\n\n# model path\nMODEL_PATH = \"./model/\"\nif not os.path.exists(MODEL_PATH):\n os.mkdir(MODEL_PATH)\nSAVE_PATH = os.path.join(MODEL_PATH, MODEL + \"_\" + time.strftime(\"%y-%m-%d_%H-%M-%S\", time.localtime()))\n\n\ndef train_for_val(replays, replay_data, agent):\n if RESTORE:\n # agent.model = load_latest_model(model_type=MODEL, path=MODEL_PATH)\n # use state dict to replace\n initial_model_state_dict(model_type=MODEL, path=MODEL_PATH, model=agent.model)\n\n print('torch.cuda.device_count():', torch.cuda.device_count())\n # if torch.cuda.device_count() > 1:\n # used_gpu_nums = 2 # torch.cuda.device_count()\n # print(\"Let's use\", used_gpu_nums, \"GPUs!\")\n # # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\n # agent.model = nn.DataParallel(agent.model, device_ids=[0, 1])\n\n agent.model.to(DEVICE)\n # agent.model.cuda()\n\n now = datetime.now()\n summary_path = \"./log/\" + now.strftime(\"%Y%m%d-%H%M%S\") + \"/\"\n writer = SummaryWriter(summary_path)\n\n train_replays = AllReplayDataset.get_training_for_val_data(replays)\n val_replays = AllReplayDataset.get_val_data(replays)\n\n train_set = AllReplayDataset(train_replays)\n val_set = AllReplayDataset(val_replays)\n\n print('len(train_set)', len(train_set))\n train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)\n print('len(train_loader)', len(train_loader))\n\n val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)\n\n optimizer = Adam(agent.model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)\n\n train_loss = 0\n batch_iter = 0\n for epoch in range(NUM_EPOCHS):\n agent.model.train()\n\n loss_sum = 0.0\n i = 0\n last_traj = None\n\n for j in range(NUM_ITERS):\n traj = next(iter(train_loader))\n\n print('traj.shape:', traj.shape) if debug else None\n\n traj = traj.to(DEVICE).float()\n\n # if last_traj is not None:\n # print(\"traj == last_traj ?\", torch.equal(traj, last_traj)) if debug else None\n\n # with torch.autograd.detect_anomaly():\n try:\n loss, loss_list, acc_num_list = Loss.get_sl_loss(traj, agent.model)\n optimizer.zero_grad()\n loss.backward() # note, we don't need retain_graph=True if we set hidden_state.detach()\n\n action_accuracy = acc_num_list[0] / (acc_num_list[1] + 1e-9)\n move_camera_accuracy = acc_num_list[2] / (acc_num_list[3] + 1e-9)\n non_camera_accuracy = acc_num_list[4] / (acc_num_list[5] + 1e-9)\n\n # add a grad clip\n parameters = [p for p in agent.model.parameters() if p is not None and p.requires_grad]\n torch.nn.utils.clip_grad_norm_(parameters, CLIP)\n\n optimizer.step()\n loss_sum += loss.item()\n\n print(\"One batch loss: {:.6f}.\".format(loss.item()))\n writer.add_scalar('OneBatch/Loss', loss.item(), batch_iter)\n\n if True:\n print(\"One batch action_type_loss loss: {:.6f}.\".format(loss_list[0].item()))\n writer.add_scalar('OneBatch/action_type_loss', loss_list[0].item(), batch_iter)\n\n print(\"One batch delay_loss loss: {:.6f}.\".format(loss_list[1].item()))\n writer.add_scalar('OneBatch/delay_loss', loss_list[1].item(), batch_iter)\n\n print(\"One batch queue_loss loss: {:.6f}.\".format(loss_list[2].item()))\n writer.add_scalar('OneBatch/queue_loss', loss_list[2].item(), batch_iter)\n\n print(\"One batch units_loss loss: {:.6f}.\".format(loss_list[3].item()))\n writer.add_scalar('OneBatch/units_loss', loss_list[3].item(), batch_iter)\n\n print(\"One batch target_unit_loss loss: {:.6f}.\".format(loss_list[4].item()))\n writer.add_scalar('OneBatch/target_unit_loss', loss_list[4].item(), batch_iter)\n\n print(\"One batch target_location_loss loss: {:.6f}.\".format(loss_list[5].item()))\n writer.add_scalar('OneBatch/target_location_loss', loss_list[5].item(), batch_iter)\n\n print(\"One batch action_accuracy: {:.6f}.\".format(action_accuracy))\n writer.add_scalar('OneBatch/action_accuracy', action_accuracy, batch_iter)\n\n print(\"One batch move_camera_accuracy: {:.6f}.\".format(move_camera_accuracy))\n writer.add_scalar('OneBatch/move_camera_accuracy', move_camera_accuracy, batch_iter)\n\n print(\"One batch non_camera_accuracy: {:.6f}.\".format(non_camera_accuracy))\n writer.add_scalar('OneBatch/non_camera_accuracy', non_camera_accuracy, batch_iter)\n\n #last_traj = traj.clone().detach()\n except Exception as e: \n print(traceback.format_exc())\n\n i += 1\n batch_iter += 1\n\n train_loss = loss_sum / (i + 1e-9)\n val_loss, val_acc = eval(agent, val_loader)\n\n print(\"Train loss: {:.6f}.\".format(train_loss))\n writer.add_scalar('Train/Loss', train_loss, epoch)\n print(\"Val loss: {:.6f}.\".format(val_loss))\n writer.add_scalar('Val/Loss', val_loss, epoch)\n\n # for accuracy of actions in val\n print(\"Val action acc: {:.6f}.\".format(val_acc[0]))\n writer.add_scalar('Val/action Acc', val_acc[0], epoch)\n print(\"Val move_camera acc: {:.6f}.\".format(val_acc[1]))\n writer.add_scalar('Val/move_camera Acc', val_acc[1], epoch)\n print(\"Val non_camera acc: {:.6f}.\".format(val_acc[2]))\n writer.add_scalar('Val/non_camera Acc', val_acc[2], epoch)\n\n print(\"beign to save model in \" + SAVE_PATH)\n\n # torch.save(agent.model, SAVE_PATH + \"\" + \".pkl\")\n # we use new save ways, only save the state_dict, and the extension changes to pt\n torch.save(agent.model.state_dict(), SAVE_PATH + \"\" + \".pth\")\n\n\ndef eval(agent, val_loader):\n agent.model.eval()\n\n loss_sum = 0.0\n i = 0\n\n action_acc_num = 0.\n action_all_num = 0.\n\n move_camera_action_acc_num = 0.\n move_camera_action_all_num = 0.\n\n non_camera_action_acc_num = 0.\n non_camera_action_all_num = 0.\n\n for traj in val_loader:\n traj = traj.to(DEVICE).float()\n\n loss, _, acc_num_list = Loss.get_sl_loss(traj, agent.model, use_eval=True)\n loss_sum += loss.item()\n\n action_acc_num += acc_num_list[0]\n action_all_num += acc_num_list[1]\n\n move_camera_action_acc_num += acc_num_list[2]\n move_camera_action_all_num += acc_num_list[3]\n\n non_camera_action_acc_num += acc_num_list[4]\n non_camera_action_all_num += acc_num_list[5]\n\n i += 1\n\n val_loss = loss_sum / (i + 1e-9)\n\n action_accuracy = action_acc_num / (action_all_num + 1e-9)\n move_camera_accuracy = move_camera_action_acc_num / (move_camera_action_all_num + 1e-9)\n non_camera_accuracy = non_camera_action_acc_num / (non_camera_action_all_num + 1e-9)\n\n return val_loss, [action_accuracy, move_camera_accuracy, non_camera_accuracy]\n\n\ndef test(on_server):\n # get all the data\n # Note: The feature here is actually feature+label\n\n agent = Agent()\n\n replays = AllReplayDataset.get_trainable_data(replay_data_path=PATH,\n max_file_size=FILE_SIZE, shuffle=True)\n\n '''\n for replay in replays:\n print('replay', replay)\n for tensor in replay:\n print('tensor:', tensor)\n '''\n\n if TYPE == 'val':\n train_for_val(replays, None, agent) # select the best hyper-parameters\n elif TYPE == 'test':\n train_for_test(replays, None, agent) # for test the performance in real life\n elif TYPE == 'deploy':\n train_for_deploy(replays, None, agent) # only used for production\n else:\n train_for_val(replays, None, agent)\n","repo_name":"liuruoze/mini-AlphaStar","sub_path":"alphastarmini/core/sl/sl_train_by_pickle.py","file_name":"sl_train_by_pickle.py","file_ext":"py","file_size_in_byte":10283,"program_lang":"python","lang":"en","doc_type":"code","stars":267,"dataset":"github-code","pt":"61"} +{"seq_id":"20803035954","text":"# coding: utf-8\n\n\"\"\"\n Wavefront REST API Documentation\n\n

The REST API enables you to interact with the Wavefront service by using standard REST API tools. You can use the REST API to automate commonly executed operations, for example to tag sources automatically.

When you make REST API calls outside the REST API documentation UI, to authenticate to the service, you must use an API token associated with a user account or a service account. For information on how to get the API token and examples, see Use the Wavefront REST API.

# noqa: E501\n\n OpenAPI spec version: v2\n Contact: chitimba@wavefront.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom wavefront_api_client.configuration import Configuration\n\n\nclass MaintenanceWindow(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'created_epoch_millis': 'int',\n 'creator_id': 'str',\n 'customer_id': 'str',\n 'end_time_in_seconds': 'int',\n 'event_name': 'str',\n 'host_tag_group_host_names_group_anded': 'bool',\n 'id': 'str',\n 'point_tag_filter': 'str',\n 'point_tag_filter_anded': 'bool',\n 'reason': 'str',\n 'relevant_customer_tags': 'list[str]',\n 'relevant_customer_tags_anded': 'bool',\n 'relevant_host_names': 'list[str]',\n 'relevant_host_tags': 'list[str]',\n 'relevant_host_tags_anded': 'bool',\n 'running_state': 'str',\n 'sort_attr': 'int',\n 'start_time_in_seconds': 'int',\n 'targets': 'list[str]',\n 'title': 'str',\n 'updated_epoch_millis': 'int',\n 'updater_id': 'str'\n }\n\n attribute_map = {\n 'created_epoch_millis': 'createdEpochMillis',\n 'creator_id': 'creatorId',\n 'customer_id': 'customerId',\n 'end_time_in_seconds': 'endTimeInSeconds',\n 'event_name': 'eventName',\n 'host_tag_group_host_names_group_anded': 'hostTagGroupHostNamesGroupAnded',\n 'id': 'id',\n 'point_tag_filter': 'pointTagFilter',\n 'point_tag_filter_anded': 'pointTagFilterAnded',\n 'reason': 'reason',\n 'relevant_customer_tags': 'relevantCustomerTags',\n 'relevant_customer_tags_anded': 'relevantCustomerTagsAnded',\n 'relevant_host_names': 'relevantHostNames',\n 'relevant_host_tags': 'relevantHostTags',\n 'relevant_host_tags_anded': 'relevantHostTagsAnded',\n 'running_state': 'runningState',\n 'sort_attr': 'sortAttr',\n 'start_time_in_seconds': 'startTimeInSeconds',\n 'targets': 'targets',\n 'title': 'title',\n 'updated_epoch_millis': 'updatedEpochMillis',\n 'updater_id': 'updaterId'\n }\n\n def __init__(self, created_epoch_millis=None, creator_id=None, customer_id=None, end_time_in_seconds=None, event_name=None, host_tag_group_host_names_group_anded=None, id=None, point_tag_filter=None, point_tag_filter_anded=None, reason=None, relevant_customer_tags=None, relevant_customer_tags_anded=None, relevant_host_names=None, relevant_host_tags=None, relevant_host_tags_anded=None, running_state=None, sort_attr=None, start_time_in_seconds=None, targets=None, title=None, updated_epoch_millis=None, updater_id=None, _configuration=None): # noqa: E501\n \"\"\"MaintenanceWindow - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._created_epoch_millis = None\n self._creator_id = None\n self._customer_id = None\n self._end_time_in_seconds = None\n self._event_name = None\n self._host_tag_group_host_names_group_anded = None\n self._id = None\n self._point_tag_filter = None\n self._point_tag_filter_anded = None\n self._reason = None\n self._relevant_customer_tags = None\n self._relevant_customer_tags_anded = None\n self._relevant_host_names = None\n self._relevant_host_tags = None\n self._relevant_host_tags_anded = None\n self._running_state = None\n self._sort_attr = None\n self._start_time_in_seconds = None\n self._targets = None\n self._title = None\n self._updated_epoch_millis = None\n self._updater_id = None\n self.discriminator = None\n\n if created_epoch_millis is not None:\n self.created_epoch_millis = created_epoch_millis\n if creator_id is not None:\n self.creator_id = creator_id\n if customer_id is not None:\n self.customer_id = customer_id\n self.end_time_in_seconds = end_time_in_seconds\n if event_name is not None:\n self.event_name = event_name\n if host_tag_group_host_names_group_anded is not None:\n self.host_tag_group_host_names_group_anded = host_tag_group_host_names_group_anded\n if id is not None:\n self.id = id\n if point_tag_filter is not None:\n self.point_tag_filter = point_tag_filter\n if point_tag_filter_anded is not None:\n self.point_tag_filter_anded = point_tag_filter_anded\n self.reason = reason\n self.relevant_customer_tags = relevant_customer_tags\n if relevant_customer_tags_anded is not None:\n self.relevant_customer_tags_anded = relevant_customer_tags_anded\n if relevant_host_names is not None:\n self.relevant_host_names = relevant_host_names\n if relevant_host_tags is not None:\n self.relevant_host_tags = relevant_host_tags\n if relevant_host_tags_anded is not None:\n self.relevant_host_tags_anded = relevant_host_tags_anded\n if running_state is not None:\n self.running_state = running_state\n if sort_attr is not None:\n self.sort_attr = sort_attr\n self.start_time_in_seconds = start_time_in_seconds\n if targets is not None:\n self.targets = targets\n self.title = title\n if updated_epoch_millis is not None:\n self.updated_epoch_millis = updated_epoch_millis\n if updater_id is not None:\n self.updater_id = updater_id\n\n @property\n def created_epoch_millis(self):\n \"\"\"Gets the created_epoch_millis of this MaintenanceWindow. # noqa: E501\n\n\n :return: The created_epoch_millis of this MaintenanceWindow. # noqa: E501\n :rtype: int\n \"\"\"\n return self._created_epoch_millis\n\n @created_epoch_millis.setter\n def created_epoch_millis(self, created_epoch_millis):\n \"\"\"Sets the created_epoch_millis of this MaintenanceWindow.\n\n\n :param created_epoch_millis: The created_epoch_millis of this MaintenanceWindow. # noqa: E501\n :type: int\n \"\"\"\n\n self._created_epoch_millis = created_epoch_millis\n\n @property\n def creator_id(self):\n \"\"\"Gets the creator_id of this MaintenanceWindow. # noqa: E501\n\n\n :return: The creator_id of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._creator_id\n\n @creator_id.setter\n def creator_id(self, creator_id):\n \"\"\"Sets the creator_id of this MaintenanceWindow.\n\n\n :param creator_id: The creator_id of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n\n self._creator_id = creator_id\n\n @property\n def customer_id(self):\n \"\"\"Gets the customer_id of this MaintenanceWindow. # noqa: E501\n\n\n :return: The customer_id of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._customer_id\n\n @customer_id.setter\n def customer_id(self, customer_id):\n \"\"\"Sets the customer_id of this MaintenanceWindow.\n\n\n :param customer_id: The customer_id of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n\n self._customer_id = customer_id\n\n @property\n def end_time_in_seconds(self):\n \"\"\"Gets the end_time_in_seconds of this MaintenanceWindow. # noqa: E501\n\n The time in epoch seconds when this maintenance window will end # noqa: E501\n\n :return: The end_time_in_seconds of this MaintenanceWindow. # noqa: E501\n :rtype: int\n \"\"\"\n return self._end_time_in_seconds\n\n @end_time_in_seconds.setter\n def end_time_in_seconds(self, end_time_in_seconds):\n \"\"\"Sets the end_time_in_seconds of this MaintenanceWindow.\n\n The time in epoch seconds when this maintenance window will end # noqa: E501\n\n :param end_time_in_seconds: The end_time_in_seconds of this MaintenanceWindow. # noqa: E501\n :type: int\n \"\"\"\n if self._configuration.client_side_validation and end_time_in_seconds is None:\n raise ValueError(\"Invalid value for `end_time_in_seconds`, must not be `None`\") # noqa: E501\n\n self._end_time_in_seconds = end_time_in_seconds\n\n @property\n def event_name(self):\n \"\"\"Gets the event_name of this MaintenanceWindow. # noqa: E501\n\n The name of an event associated with the creation/update of this maintenance window # noqa: E501\n\n :return: The event_name of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._event_name\n\n @event_name.setter\n def event_name(self, event_name):\n \"\"\"Sets the event_name of this MaintenanceWindow.\n\n The name of an event associated with the creation/update of this maintenance window # noqa: E501\n\n :param event_name: The event_name of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n\n self._event_name = event_name\n\n @property\n def host_tag_group_host_names_group_anded(self):\n \"\"\"Gets the host_tag_group_host_names_group_anded of this MaintenanceWindow. # noqa: E501\n\n If true, a source/host must be in 'relevantHostNames' and have tags matching the specification formed by 'relevantHostTags' and 'relevantHostTagsAnded' in order for this maintenance window to apply. If false, a source/host must either be in 'relevantHostNames' or match 'relevantHostTags' and 'relevantHostTagsAnded'. Default: false # noqa: E501\n\n :return: The host_tag_group_host_names_group_anded of this MaintenanceWindow. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._host_tag_group_host_names_group_anded\n\n @host_tag_group_host_names_group_anded.setter\n def host_tag_group_host_names_group_anded(self, host_tag_group_host_names_group_anded):\n \"\"\"Sets the host_tag_group_host_names_group_anded of this MaintenanceWindow.\n\n If true, a source/host must be in 'relevantHostNames' and have tags matching the specification formed by 'relevantHostTags' and 'relevantHostTagsAnded' in order for this maintenance window to apply. If false, a source/host must either be in 'relevantHostNames' or match 'relevantHostTags' and 'relevantHostTagsAnded'. Default: false # noqa: E501\n\n :param host_tag_group_host_names_group_anded: The host_tag_group_host_names_group_anded of this MaintenanceWindow. # noqa: E501\n :type: bool\n \"\"\"\n\n self._host_tag_group_host_names_group_anded = host_tag_group_host_names_group_anded\n\n @property\n def id(self):\n \"\"\"Gets the id of this MaintenanceWindow. # noqa: E501\n\n\n :return: The id of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this MaintenanceWindow.\n\n\n :param id: The id of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n\n self._id = id\n\n @property\n def point_tag_filter(self):\n \"\"\"Gets the point_tag_filter of this MaintenanceWindow. # noqa: E501\n\n Query that filters on point tags of timeseries scanned by alert. # noqa: E501\n\n :return: The point_tag_filter of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._point_tag_filter\n\n @point_tag_filter.setter\n def point_tag_filter(self, point_tag_filter):\n \"\"\"Sets the point_tag_filter of this MaintenanceWindow.\n\n Query that filters on point tags of timeseries scanned by alert. # noqa: E501\n\n :param point_tag_filter: The point_tag_filter of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n\n self._point_tag_filter = point_tag_filter\n\n @property\n def point_tag_filter_anded(self):\n \"\"\"Gets the point_tag_filter_anded of this MaintenanceWindow. # noqa: E501\n\n Whether to AND point tags filter listed in pointTagFilter. If true, a timeseries must contain the point tags along with other filters in order for the maintenance window to apply.If false, the tags are OR'ed, the customer must contain one of the tags. Default: false # noqa: E501\n\n :return: The point_tag_filter_anded of this MaintenanceWindow. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._point_tag_filter_anded\n\n @point_tag_filter_anded.setter\n def point_tag_filter_anded(self, point_tag_filter_anded):\n \"\"\"Sets the point_tag_filter_anded of this MaintenanceWindow.\n\n Whether to AND point tags filter listed in pointTagFilter. If true, a timeseries must contain the point tags along with other filters in order for the maintenance window to apply.If false, the tags are OR'ed, the customer must contain one of the tags. Default: false # noqa: E501\n\n :param point_tag_filter_anded: The point_tag_filter_anded of this MaintenanceWindow. # noqa: E501\n :type: bool\n \"\"\"\n\n self._point_tag_filter_anded = point_tag_filter_anded\n\n @property\n def reason(self):\n \"\"\"Gets the reason of this MaintenanceWindow. # noqa: E501\n\n The purpose of this maintenance window # noqa: E501\n\n :return: The reason of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._reason\n\n @reason.setter\n def reason(self, reason):\n \"\"\"Sets the reason of this MaintenanceWindow.\n\n The purpose of this maintenance window # noqa: E501\n\n :param reason: The reason of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and reason is None:\n raise ValueError(\"Invalid value for `reason`, must not be `None`\") # noqa: E501\n\n self._reason = reason\n\n @property\n def relevant_customer_tags(self):\n \"\"\"Gets the relevant_customer_tags of this MaintenanceWindow. # noqa: E501\n\n List of alert tags whose matching alerts will be put into maintenance because of this maintenance window # noqa: E501\n\n :return: The relevant_customer_tags of this MaintenanceWindow. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._relevant_customer_tags\n\n @relevant_customer_tags.setter\n def relevant_customer_tags(self, relevant_customer_tags):\n \"\"\"Sets the relevant_customer_tags of this MaintenanceWindow.\n\n List of alert tags whose matching alerts will be put into maintenance because of this maintenance window # noqa: E501\n\n :param relevant_customer_tags: The relevant_customer_tags of this MaintenanceWindow. # noqa: E501\n :type: list[str]\n \"\"\"\n if self._configuration.client_side_validation and relevant_customer_tags is None:\n raise ValueError(\"Invalid value for `relevant_customer_tags`, must not be `None`\") # noqa: E501\n\n self._relevant_customer_tags = relevant_customer_tags\n\n @property\n def relevant_customer_tags_anded(self):\n \"\"\"Gets the relevant_customer_tags_anded of this MaintenanceWindow. # noqa: E501\n\n Whether to AND customer tags listed in relevantCustomerTags. If true, a customer must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a customer must contain one of the tags. Default: false # noqa: E501\n\n :return: The relevant_customer_tags_anded of this MaintenanceWindow. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._relevant_customer_tags_anded\n\n @relevant_customer_tags_anded.setter\n def relevant_customer_tags_anded(self, relevant_customer_tags_anded):\n \"\"\"Sets the relevant_customer_tags_anded of this MaintenanceWindow.\n\n Whether to AND customer tags listed in relevantCustomerTags. If true, a customer must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a customer must contain one of the tags. Default: false # noqa: E501\n\n :param relevant_customer_tags_anded: The relevant_customer_tags_anded of this MaintenanceWindow. # noqa: E501\n :type: bool\n \"\"\"\n\n self._relevant_customer_tags_anded = relevant_customer_tags_anded\n\n @property\n def relevant_host_names(self):\n \"\"\"Gets the relevant_host_names of this MaintenanceWindow. # noqa: E501\n\n List of source/host names that will be put into maintenance because of this maintenance window # noqa: E501\n\n :return: The relevant_host_names of this MaintenanceWindow. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._relevant_host_names\n\n @relevant_host_names.setter\n def relevant_host_names(self, relevant_host_names):\n \"\"\"Sets the relevant_host_names of this MaintenanceWindow.\n\n List of source/host names that will be put into maintenance because of this maintenance window # noqa: E501\n\n :param relevant_host_names: The relevant_host_names of this MaintenanceWindow. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._relevant_host_names = relevant_host_names\n\n @property\n def relevant_host_tags(self):\n \"\"\"Gets the relevant_host_tags of this MaintenanceWindow. # noqa: E501\n\n List of source/host tags whose matching sources/hosts will be put into maintenance because of this maintenance window # noqa: E501\n\n :return: The relevant_host_tags of this MaintenanceWindow. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._relevant_host_tags\n\n @relevant_host_tags.setter\n def relevant_host_tags(self, relevant_host_tags):\n \"\"\"Sets the relevant_host_tags of this MaintenanceWindow.\n\n List of source/host tags whose matching sources/hosts will be put into maintenance because of this maintenance window # noqa: E501\n\n :param relevant_host_tags: The relevant_host_tags of this MaintenanceWindow. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._relevant_host_tags = relevant_host_tags\n\n @property\n def relevant_host_tags_anded(self):\n \"\"\"Gets the relevant_host_tags_anded of this MaintenanceWindow. # noqa: E501\n\n Whether to AND source/host tags listed in relevantHostTags. If true, a source/host must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a source/host must contain one of the tags. Default: false # noqa: E501\n\n :return: The relevant_host_tags_anded of this MaintenanceWindow. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._relevant_host_tags_anded\n\n @relevant_host_tags_anded.setter\n def relevant_host_tags_anded(self, relevant_host_tags_anded):\n \"\"\"Sets the relevant_host_tags_anded of this MaintenanceWindow.\n\n Whether to AND source/host tags listed in relevantHostTags. If true, a source/host must contain all tags in order for the maintenance window to apply. If false, the tags are OR'ed, and a source/host must contain one of the tags. Default: false # noqa: E501\n\n :param relevant_host_tags_anded: The relevant_host_tags_anded of this MaintenanceWindow. # noqa: E501\n :type: bool\n \"\"\"\n\n self._relevant_host_tags_anded = relevant_host_tags_anded\n\n @property\n def running_state(self):\n \"\"\"Gets the running_state of this MaintenanceWindow. # noqa: E501\n\n\n :return: The running_state of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._running_state\n\n @running_state.setter\n def running_state(self, running_state):\n \"\"\"Sets the running_state of this MaintenanceWindow.\n\n\n :param running_state: The running_state of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"ONGOING\", \"PENDING\", \"ENDED\"] # noqa: E501\n if (self._configuration.client_side_validation and\n running_state not in allowed_values):\n raise ValueError(\n \"Invalid value for `running_state` ({0}), must be one of {1}\" # noqa: E501\n .format(running_state, allowed_values)\n )\n\n self._running_state = running_state\n\n @property\n def sort_attr(self):\n \"\"\"Gets the sort_attr of this MaintenanceWindow. # noqa: E501\n\n Numeric value used in default sorting # noqa: E501\n\n :return: The sort_attr of this MaintenanceWindow. # noqa: E501\n :rtype: int\n \"\"\"\n return self._sort_attr\n\n @sort_attr.setter\n def sort_attr(self, sort_attr):\n \"\"\"Sets the sort_attr of this MaintenanceWindow.\n\n Numeric value used in default sorting # noqa: E501\n\n :param sort_attr: The sort_attr of this MaintenanceWindow. # noqa: E501\n :type: int\n \"\"\"\n\n self._sort_attr = sort_attr\n\n @property\n def start_time_in_seconds(self):\n \"\"\"Gets the start_time_in_seconds of this MaintenanceWindow. # noqa: E501\n\n The time in epoch seconds when this maintenance window will start # noqa: E501\n\n :return: The start_time_in_seconds of this MaintenanceWindow. # noqa: E501\n :rtype: int\n \"\"\"\n return self._start_time_in_seconds\n\n @start_time_in_seconds.setter\n def start_time_in_seconds(self, start_time_in_seconds):\n \"\"\"Sets the start_time_in_seconds of this MaintenanceWindow.\n\n The time in epoch seconds when this maintenance window will start # noqa: E501\n\n :param start_time_in_seconds: The start_time_in_seconds of this MaintenanceWindow. # noqa: E501\n :type: int\n \"\"\"\n if self._configuration.client_side_validation and start_time_in_seconds is None:\n raise ValueError(\"Invalid value for `start_time_in_seconds`, must not be `None`\") # noqa: E501\n\n self._start_time_in_seconds = start_time_in_seconds\n\n @property\n def targets(self):\n \"\"\"Gets the targets of this MaintenanceWindow. # noqa: E501\n\n List of targets to notify, overriding the alert's targets. # noqa: E501\n\n :return: The targets of this MaintenanceWindow. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._targets\n\n @targets.setter\n def targets(self, targets):\n \"\"\"Sets the targets of this MaintenanceWindow.\n\n List of targets to notify, overriding the alert's targets. # noqa: E501\n\n :param targets: The targets of this MaintenanceWindow. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._targets = targets\n\n @property\n def title(self):\n \"\"\"Gets the title of this MaintenanceWindow. # noqa: E501\n\n Title of this maintenance window # noqa: E501\n\n :return: The title of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._title\n\n @title.setter\n def title(self, title):\n \"\"\"Sets the title of this MaintenanceWindow.\n\n Title of this maintenance window # noqa: E501\n\n :param title: The title of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and title is None:\n raise ValueError(\"Invalid value for `title`, must not be `None`\") # noqa: E501\n\n self._title = title\n\n @property\n def updated_epoch_millis(self):\n \"\"\"Gets the updated_epoch_millis of this MaintenanceWindow. # noqa: E501\n\n\n :return: The updated_epoch_millis of this MaintenanceWindow. # noqa: E501\n :rtype: int\n \"\"\"\n return self._updated_epoch_millis\n\n @updated_epoch_millis.setter\n def updated_epoch_millis(self, updated_epoch_millis):\n \"\"\"Sets the updated_epoch_millis of this MaintenanceWindow.\n\n\n :param updated_epoch_millis: The updated_epoch_millis of this MaintenanceWindow. # noqa: E501\n :type: int\n \"\"\"\n\n self._updated_epoch_millis = updated_epoch_millis\n\n @property\n def updater_id(self):\n \"\"\"Gets the updater_id of this MaintenanceWindow. # noqa: E501\n\n\n :return: The updater_id of this MaintenanceWindow. # noqa: E501\n :rtype: str\n \"\"\"\n return self._updater_id\n\n @updater_id.setter\n def updater_id(self, updater_id):\n \"\"\"Sets the updater_id of this MaintenanceWindow.\n\n\n :param updater_id: The updater_id of this MaintenanceWindow. # noqa: E501\n :type: str\n \"\"\"\n\n self._updater_id = updater_id\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(MaintenanceWindow, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, MaintenanceWindow):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, MaintenanceWindow):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"wavefrontHQ/python-client","sub_path":"wavefront_api_client/models/maintenance_window.py","file_name":"maintenance_window.py","file_ext":"py","file_size_in_byte":27110,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"23478461531","text":"#!/usr/bin/env python3\r\n\r\nwith open(\"A-large.in\") as f:\r\n N, *cases = f.read().splitlines()\r\n \r\ni = 0\r\n \r\nfor case in map(int, cases):\r\n i += 1\r\n if case == 0:\r\n print(\"Case #{}: INSOMNIA\".format(i))\r\n else:\r\n seen = set()\r\n n = 1\r\n while len(seen) != 10:\r\n seen |= set(str(n*case))\r\n n += 1\r\n print(\"Case #{}: {}\".format(i, (n-1)*case))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/1864.py","file_name":"1864.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28096933229","text":"class Solution:\n def __init__(self, str1, str2, len1, len2):\n self.string1 = str1\n self.string2 = str2\n self.m = len1\n self.n = len2\n self.memo = list()\n for i in range(self.m + 1):\n row = list()\n for j in range(self.n + 1):\n row.append(-1)\n self.memo.append(row)\n\n def solve(self):\n self.lcs_substring(self.m, self.n, 0)\n\n max_lcs = -1\n for i in self.memo:\n for j in i:\n max_lcs = max(max_lcs, j)\n return max_lcs\n\n def lcs_substring(self, x, y, count):\n if x == 0 or y == 0:\n self.memo[x][y] = count\n return count\n\n if self.string1[x-1] == self.string2[y-1]:\n count = self.lcs_substring(x-1, y-1, count+1)\n\n value = max(count, max(self.lcs_substring(x-1, y, 0), self.lcs_substring(x, y-1, 0)))\n self.memo[x][y] = value\n return value\n\n\nif __name__ == '__main__':\n a1 = 'abcd'\n a2 = 'abe'\n obj = Solution(a1, a2, len(a1), len(a2))\n ans = obj.solve()\n print(ans)\n","repo_name":"navkant/ds_algo_practice","sub_path":"dynamic_programming2/lcs_substring/lcs_substring_memo.py","file_name":"lcs_substring_memo.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23545219261","text":"def flip_pancake(c):\r\n if c == '-': return '+'\r\n if c == '+': return '-'\r\n\r\n\r\ndef flip_row(j):\r\n for k in range(K):\r\n S[j + k] = flip_pancake(S[j + k])\r\n\r\n\r\ndef has_flipped_pancake():\r\n for q in range(len(S)):\r\n if S[q] == '-':\r\n return True\r\n return False\r\n\r\nT = int(input())\r\nfor i in range(T):\r\n line = input().split(\" \")\r\n S = list(str(line[0]))\r\n K = int(line[1])\r\n c = 0\r\n for j in range(len(S) - K + 1):\r\n if S[j] == '-':\r\n c += 1\r\n flip_row(j)\r\n if has_flipped_pancake():\r\n print(\"Case #\" + str(i + 1) + \": IMPOSSIBLE\")\r\n else:\r\n print(\"Case #\" + str(i + 1) + \": \" + str(c))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3032.py","file_name":"3032.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14469998483","text":"def sum_2_number():\n l = (int(input(\"请输入第一个数\")))\n print(l)\n s = (int(input(\"请输入第二个数\")))\n print(s)\n print(\"%d+%d=%d\" % (l, s, l+s))\nsum_2_number()\n\n#如果想在定义的函数内部创建一个参数,可以把要创建的参数放入def后的函数名之后的括号里\n# 如果想创建多个参数,则要用“,”隔开,逗号是英文版的,而在以后如果想使用定义的参数,可以在调用函数时,在后面括号中输入参数名\n# 定义函数时输入的参数叫做形参,而调动函数时输入的参数叫做实参\ndef sum_2_number2(number1 , number2):\n\n result=number1 +number2\n print(\"%d+%d=%d\" %(number1,number2,result) )\nsum_2_number2(130,40)\n\n","repo_name":"yuanyunch23/ok","sub_path":"函数/定义一个函数,使得任意两个数都能求和.py","file_name":"定义一个函数,使得任意两个数都能求和.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18728322278","text":"\"\"\"\nThis file demonstrates a custom command.\n\"\"\"\nfrom datetime import datetime\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.db import DEFAULT_DB_ALIAS\nfrom django.template import Template, RequestContext\nfrom django.utils.translation import gettext_lazy as _\n\nfrom freppledb import VERSION\nfrom freppledb.execute.models import Task\n\n\nclass Command(BaseCommand):\n # Help text shown when you run \"frepplectl help my_command\"\n help = \"This command does ...\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--my_arg\",\n dest=\"my_arg\",\n type=int,\n default=0,\n help=\"an optional argument for the command\",\n )\n parser.add_argument(\n \"--database\",\n dest=\"database\",\n default=DEFAULT_DB_ALIAS,\n help=\"Nominates a specific database to load data from and export results into\",\n )\n parser.add_argument(\n \"--task\",\n dest=\"task\",\n type=int,\n help=\"Task identifier (generated automatically if not provided)\",\n )\n\n requires_model_validation = False\n\n def get_version(self):\n return VERSION\n\n # The busisness logic of the command goes in this method\n def handle(self, *args, **options):\n if \"database\" in options:\n database = options[\"database\"] or DEFAULT_DB_ALIAS\n else:\n database = DEFAULT_DB_ALIAS\n if database not in settings.DATABASES:\n raise CommandError(\"No database settings known for '%s'\" % database)\n task = None\n now = datetime.now()\n try:\n # Initialize the task\n if \"task\" in options and options[\"task\"]:\n try:\n task = Task.objects.all().using(database).get(pk=options[\"task\"])\n except Exception:\n raise CommandError(\"Task identifier not found\")\n if (\n task.started\n or task.finished\n or task.status != \"Waiting\"\n or task.name != \"my_command\"\n ):\n raise CommandError(\"Invalid task identifier\")\n task.status = \"0%\"\n task.started = now\n else:\n task = Task(name=\"my_command\", submitted=now, started=now, status=\"0%\")\n task.save(using=database)\n\n # Here goes the real business logic\n print(\"This command was called with argument %s\" % options[\"my_arg\"])\n\n # The task has finished successfully\n task.message = \"My task message\"\n task.processid = None\n task.status = \"Done\"\n task.finished = datetime.now()\n task.save(using=database)\n\n except Exception as e:\n # The task failed\n if task:\n task = Task.objects.all().using(database).get(pk=task.id)\n task.status = \"Failed\"\n task.message = \"%s\" % e\n task.finished = datetime.now()\n task.processid = None\n task.save(using=database)\n raise e\n\n # Label to display on the execution screen\n title = _(\"My own command\")\n\n # Sequence of the command on the execution screen\n index = 1\n\n # This method generates the text to display on the execution screen\n @staticmethod\n def getHTML(request):\n context = RequestContext(request)\n template = Template(\n \"\"\"\n {% load i18n %}\n
{% csrf_token %}\n \n \n \n \n \n
\n \n \n A description of my command\n
\n
\n \"\"\"\n )\n return template.render(context)\n","repo_name":"frePPLe/frepple-data-admin","sub_path":"docs/getting-started/my_app/management/commands/my_command.py","file_name":"my_command.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"25052251286","text":"#!/usr/bin/env python\n\nfrom datetime import datetime, timedelta\n\ndef twoDecimals(x):\n return int(x*100)/100\n\nclass book:\n def __init__(self, knownPayday):\n x = {}\n for m in range(-3, 32):\n x[m] = []\n self.book = x\n self.dt = datetime.strptime(knownPayday, \"%Y-%M-%d\")\n self.payCheck=1 # Set this manually before using\n self.savings=0\n self.saveRatio=0.5\n\n def addExpense(self, nm, dayNum, amt):\n self.book[dayNum].append({\"name\":nm, \"amount\":amt})\n\n def oneCycle(self):\n dStart = self.dt\n tot = 0\n rep = '' # Report string\n for m in range(0, 14):\n k = self.dt.day\n for n in self.book[k]:\n tot += n[\"amount\"]\n rep += str(k) + \" \" + n[\"name\"] + \" : \" + str(n[\"amount\"]) + \"\\n\"\n self.dt = self.dt + timedelta(1)\n deltaSavings = self.saveRatio * (self.payCheck-tot)\n self.savings += deltaSavings\n rep += \"Money Saved This Pay Cycle: \" + str(twoDecimals(deltaSavings)) + \"\\n\"\n rep += \"Total Saved: \" + str(twoDecimals(self.savings)) + \"\\n\"\n return [tot, rep]\n\n def nCycles(self, N):\n for m in range(0, N):\n print(\"For pay period starting: \", str(self.dt))\n x, y = self.oneCycle()\n print(y)\n print(\"Expenses This Pay Cycle: \", twoDecimals(x))\n print(\"\\n\\n\")\n\nif __name__==\"__main__\":\n b = book(\"2018-01-01\")\n b.addExpense(\"house\", 1, 1000)\n b.addExpense(\"car\", 2, 380)\n b.addExpense(\"credit 1\", 13, 300)\n b.addExpense(\"credit 2\", 17, 320)\n b.addExpense(\"phone\", 22, 90)\n b.nCycles(8)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jstr045329/paycheckPlanner","sub_path":"dueDateCalc.py","file_name":"dueDateCalc.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22005912493","text":"import sys; sys.stdin = open(\"2593.txt\", \"r\")\n\nN = int(input())\ntops = list(map(int, input().split()))\nresult = [0] * N\nstack = [[tops[-1], N-1]]\nfor i in range(N-2, -1, -1):\n while len(stack) > 0:\n if tops[i] >= stack[-1][0]:\n result[stack[-1][1]] = i + 1\n stack.pop()\n else:\n break\n stack.append([tops[i], i])\nfor num in result:\n print(num, end=' ')","repo_name":"vreez/APS","sub_path":"boj/boj_2593_탑.py","file_name":"boj_2593_탑.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17292690861","text":"#! /usr/bin/env python\n\nimport sys\nimport os.path\nfrom aubio import source, sink\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print('usage: %s ' % sys.argv[0])\n sys.exit(1)\n source_file = sys.argv[1]\n duration = float(sys.argv[2])\n source_base_name, source_ext = os.path.splitext(os.path.basename(source_file))\n\n hopsize = 256\n slice_n, total_frames_written, read = 0, 0, hopsize\n\n def new_sink_name(source_base_name, slice_n, duration = duration):\n return source_base_name + '_%02.3f' % (slice_n*duration) + '.wav'\n\n f = source(source_file, 0, hopsize)\n samplerate = f.samplerate\n g = sink(new_sink_name(source_base_name, slice_n), samplerate)\n\n #print \"new slice:\", slice_n, 0, \"+\", 0, \"=\", 0\n while read == hopsize:\n vec, read = f()\n start_of_next_region = int(duration * samplerate * (slice_n + 1))\n remaining = start_of_next_region - total_frames_written\n # number of samples remaining is less than what we got\n if remaining <= read:\n # write remaining samples from current region\n g(vec[0:remaining], remaining)\n # close this file\n del g\n #print \"new slice\", slice_n, total_frames_written, \"+\", remaining, \"=\", start_of_next_region\n slice_n += 1\n # create a new file for the new region\n g = sink(new_sink_name(source_base_name, slice_n), samplerate)\n # write the remaining samples in the new file\n g(vec[remaining:read], read - remaining)\n else:\n g(vec[0:read], read)\n total_frames_written += read\n total_duration = total_frames_written / float(samplerate)\n slice_n += 1\n outstr = 'created %(slice_n)s slices from %(source_base_name)s%(source_ext)s' % locals()\n outstr += ' (total duration %(total_duration).2fs)' % locals()\n print(outstr)\n # close source and sink files\n del f, g\n","repo_name":"sonic-pi-net/sonic-pi","sub_path":"app/external/aubio-0.4.9/python/demos/demo_slicing.py","file_name":"demo_slicing.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":10272,"dataset":"github-code","pt":"61"} +{"seq_id":"72331177153","text":"import argparse\nimport os\n\nparser = argparse.ArgumentParser(description = 'Split up vcf file into equally sized chunks')\nparser.add_argument('vcf_file', type = str, nargs = 1, help = 'vcf file to split up')\nparser.add_argument('out_dir', type = str, nargs = 1, help = 'directory to write each new vcf file')\nparser.add_argument('prefix', type = str, nargs = 1, help = 'file prefix for each new vcf file')\nparser.add_argument('nchunks', type = int, nargs = 1, help = 'number of chunks')\nargs = parser.parse_args()\n\nif not os.path.isdir(args.out_dir[0]):\n os.mkdir(args.out_dir[0])\n\nwith open(args.vcf_file[0], 'r') as data:\n all_file = data.readlines()\n lines_of_header = 0\n for line in all_file:\n lines_of_header += 1\n if line[0:len('#CHROM')] == '#CHROM':\n break\n num_varients = (len(all_file) - lines_of_header)\n lines_per_file = int((num_varients - 1) / args.nchunks[0] + 1)\n\nfor i in range(args.nchunks[0]):\n with open(args.out_dir[0] + '/' + args.prefix[0] + '_' + str(i) + '.vcf', 'w') as data:\n data.writelines(all_file[0:lines_of_header])\n for j in range((i * lines_per_file), (min((i + 1) * lines_per_file, num_varients))):\n data.write(all_file[j + lines_of_header])\n","repo_name":"d-henness/bioinfo_workflows","sub_path":"scripts_dir/split_up_vcf.py","file_name":"split_up_vcf.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41090959334","text":"from nltk.tokenize import word_tokenize\nimport os\n\n\nclass FileReader:\n def __init__(self, file):\n self.line_count = None\n self.word_count = None\n self.file = file\n\n def read(self):\n try:\n with open(self.file) as f:\n return f.read()\n except FileNotFoundError:\n return ''\n\n def write(self, data):\n self.write(data)\n\n def count(self):\n with open(self.file) as f:\n line_count = 0\n words = []\n\n for line in f:\n line_count += 1\n\n for word in word_tokenize(line):\n words.append(word)\n\n self.word_count = len(words)\n self.line_count = line_count\n\n def __add__(self, other):\n new_file = self.read() + other.read()\n with open('new_file.txt', 'w') as file:\n file.write(new_file)\n new_file = FileReader(new_file)\n return new_file\n\n def __repr__(self):\n p = os.path.abspath(self.file)\n return p\n","repo_name":"stasiakozlova/Python-basics","sub_path":"HW3/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41992717275","text":"from main import Controller\nfrom page import MainPage, NewBooksPage\n\n\ndef test_get_page(mocker):\n mocker.patch(\"main.Printer\", return_value=\"test\")\n controller = Controller()\n controller.change_page(\"main\")\n assert isinstance(controller.page_stack[-1], MainPage)\n controller.change_page(\"new_books\")\n assert isinstance(controller.page_stack[-1], NewBooksPage)\n\n\ndef test_handle_event(mocker):\n mocker.patch(\"main.Printer\", return_value=\"test\")\n mocker.patch(\"main.Controller.exit\", return_value=None)\n mocker.patch(\"main.Controller.get_page_by_name\", return_value=\"test\")\n controller = Controller()\n controller.handle_event(\"test1\")\n assert len(controller.page_stack) == 2\n controller.handle_event(\"back\")\n assert len(controller.page_stack) == 1\n controller.handle_event(\"back\")\n assert len(controller.page_stack) == 1\n controller.handle_event(\"exit\")\n controller.exit.assert_called_once()\n","repo_name":"likelion-backend-6th/PythonProject_YubinMoon","sub_path":"tests/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26898839576","text":"import requests\n\ncost_magnifier = 1.2\n\nclass distanceCalculator():\n\n\tdef __init__(self, lat_a, lon_a, lat_b, lon_b):\n\t\tself.lat_a = lat_a\n\t\tself.lon_a = lon_a\n\t\tself.lat_b = lat_b\n\t\tself.lon_b = lon_b\n\n\tdef mapboxRequest(self):\n\t\tMAPBOX_URL = 'https://api.mapbox.com/directions/v5/mapbox/driving/'\n\t\tMAPBOX_ACCESS_TOKEN = 'pk.eyJ1IjoiZXNoLWFkbWluIiwiYSI6IkltMGhSWTgifQ.bj-sVen7BY-wC_MNwPd0uQ'\n\t\tMAPBOX_URL_PARAMS = {'access_token': MAPBOX_ACCESS_TOKEN}\n\n\t\tr = requests.get(\"{0}{1},{2};{3},{4}.json\".format(\tMAPBOX_URL,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.lon_a,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.lat_a,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.lon_b,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.lat_b), params = MAPBOX_URL_PARAMS)\n\t\tif r.json()['code'] == 'Ok':\n\t\t distance = r.json()['routes'][0]['distance'] * 0.000621371\n\t\telse: #distance too close to calculate route\n\t\t distance = 0\n\t\treturn distance\n\nclass buildCostCalculator():\n\n\tdef __init__(self, latitude, longitude, remotespeedmbps, assumedtypicalbuilddistancemi, latitude2, longitude2):\n\t\tself.latitude = latitude\n\t\tself.longitude = longitude\n\t\tself.remotespeedmbps = remotespeedmbps\n\t\tself.assumedtypicalbuilddistancemi = assumedtypicalbuilddistancemi\n\t\tself.latitude2 = latitude2\n\t\tself.longitude2 = longitude2\n\n#to-do: make this class return either total cost (which it is doing now) OR nearest_central_office for IA builds\n#don't know how to use r.json() in multiple functions within this class if it is created using a function\n\tdef costquestRequest(self):\n\t\tCOSTQUEST_URL = 'https://apps.costquest.com/esh/api'\n\t\tCOSTQUEST_USER_ID = 'Costquest\\ESHUser'\n\t\tCOSTQUEST_PASS = '8kB9cQht'\n\t\tMAPBOX_URL_PARAMS = {\t'latitude': self.latitude,\n\t\t\t\t\t\t\t\t'longitude': self.longitude,\n\t\t\t\t\t\t\t\t'remotespeedmbps': self.remotespeedmbps,\n\t\t\t\t\t\t\t\t'assumedtypicalbuilddistancemi': self.assumedtypicalbuilddistancemi,\n\t\t\t\t\t\t\t\t'latitude2': self.latitude2,\n\t\t\t\t\t\t\t\t'longitude2': self.longitude2}\n\n\t\tr = requests.get(\"{0}\".format(COSTQUEST_URL), params = MAPBOX_URL_PARAMS, auth=(COSTQUEST_USER_ID, COSTQUEST_PASS))\n\t\tif r.json()['Message'] == 'Success':\n\t\t\tschool_node = r.json()['ModelData']['EstimatedElectronicsSchoolNode']\n\t\t\thigh_outside_plant_cost = r.json()['ModelData']['EstimatedOSPHigh']\n\t\t\thigh_yearly_maintenance = r.json()['ModelData']['EstimatedYearlyMaintenanceHigh']\n\t\t\thigh_replacement_cost = r.json()['ModelData']['EstimatedReplacementCapitalHigh']\n\t\t\tbuild_cost = school_node + high_outside_plant_cost + high_yearly_maintenance + high_replacement_cost\n\t\telse: build_cost = -1\n\t\treturn build_cost\n\n#to-do: make class that distributes costs and incorporate into xxx_build_cost_calculator.py","repo_name":"mtejas88/esh","sub_path":"Projects/funding_the_gap/qa/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32095784945","text":"def answer(lisa):\r\n lisa = sorted(lisa)\r\n pairs = []\r\n for i in lisa:\r\n for j in lisa:\r\n if j > i and j % i == 0:\r\n pairs.append([i,j])\r\n\r\n countlist = []\r\n for item in lisa:\r\n count = 0\r\n for element in pairs:\r\n if element[1] == item:\r\n count += 1\r\n countlist.append(count)\r\n\r\n result = []\r\n for i in range(len(lisa)):\r\n count = 0\r\n \r\n for element in pairs:\r\n if element[1] == lisa[i]:\r\n \r\n count += countlist[lisa.index(element[0])]\r\n \r\n result.append(count)\r\n suma = 0\r\n for item in result:\r\n suma += item\r\n return suma\r\nprint(answer([1,2,3,4,5,6]))\r\n","repo_name":"Aditya510/PythonProjectsv1.3","sub_path":"trial4foobarlevel3.py","file_name":"trial4foobarlevel3.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25622872985","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nimport cv2\n\nimg = None\ngray = None\nrgb_gray = None\n\ndef onTrackbarSlide(pos):\n global gray, rgb_gray\n printContours('Contours', gray)\n printContours('Contours RGB', rgb_gray)\n\ndef printContours(window, img):\n #ret, dst = cv2.threshold(gray, int(pos), 255, cv2.THRESH_BINARY)\n low = cv2.getTrackbarPos('Low', 'Contours')\n high = cv2.getTrackbarPos('High', 'Contours')\n dst = cv2.Canny(img, low, high)\n contours, hierarchy = cv2.findContours(dst, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n if contours:\n color = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n cv2.drawContours(color, contours, -1, (0, 0, 255))\n cv2.imshow(window, color)\n\ndef main(args):\n global img, gray, rgb_gray\n if len(args) != 2:\n return -1\n img = cv2.pyrDown(cv2.imread(args[1]))\n gray = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)\n rgb = cv2.split(img)\n rgb_gray = cv2.max(cv2.max(rgb[2], rgb[1]), rgb[0])\n\n cv2.namedWindow('Contours')\n cv2.createTrackbar('Low', 'Contours', 10, 255, onTrackbarSlide)\n cv2.createTrackbar('High', 'Contours', 100, 255, onTrackbarSlide)\n cv2.namedWindow('Contours RGB')\n\n onTrackbarSlide(100)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return 0\n\nif __name__ == '__main__':\n args = sys.argv\n main(args)\n","repo_name":"justice3120/opencv_sample","sub_path":"8/ex2/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70164362754","text":"from turtle import Turtle\nimport random\n\n\nclass Food(Turtle):\n def __init__(self, screen_w, screen_h):\n super().__init__()\n self.screen_w = screen_w\n self.screen_h = screen_h\n self.shape(\"circle\")\n self.penup()\n self.shapesize(stretch_len=0.5, stretch_wid=0.5)\n self.color(\"red\")\n self.speed(\"fastest\")\n self.refresh()\n\n def refresh(self):\n random_x = random.randint(int(-self.screen_w * .40), int(self.screen_w * .40))\n random_y = random.randint(int(-self.screen_h * .40), int(self.screen_h * .40))\n self.goto(random_x, random_y)\n","repo_name":"RobertoLJr/100-days-of-python","sub_path":"day-024-extra-snake-game-v2/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23628350963","text":"\r\n#http://pemrograman-sederhana.blogspot.com/2014/09/menghitung-singkatan\r\n# huruf-dan-konsonan-di_99.html\r\n'''\r\nalex merupakan seorang artis nama panggungnya adalah Bang Tatap buatlah sebuah program dimana \r\nketika alex menginput sebuah huruf maka akan keluar nama panggungnya dan hitunglah huruf sesuai inputan user\r\n\r\n\r\nkalimat = kalimat inputan user\r\nsingkatan = nama singkatan user\r\ndicari = singkatan yang akan di hitung jumlahnya \r\nhuruf = string/huruf disimpan lalu dihitung\r\n\r\n#input :kalimat = inputan user ; singkatan = nama singkatan ; dicari = huruf yang mau dihitung\r\n\r\n#proses :\r\nmembuat inputan kalimat,singkatan dan dicari\r\nmelakukan perulangan kalimat,singkatan dan dicari\r\nmelakukan percabangan mengecek nama panggung sama menghitung huruf sesuai inputan \r\n\r\n#output :\r\nmenampilkan hasil nama panggung \r\nmenampilkan jumlah huruf sesuai inputan \r\n\r\n'''\r\n\r\nkalimat = str(input(\"masukkan kalimat :\")).lower()\r\nsingkatan = str(input(\"masukkan singkatan :\"))\r\ndicari = str(input(\"huruf yang mau dihitung :\"))\r\nhuruf =\"\"\r\nfor jaja in kalimat:\r\n for zizi in singkatan:\r\n if zizi == singkatan:\r\n kalimat = kalimat.replace(zizi,\"Bang Tatap\")\r\n if jaja in dicari :\r\n huruf += jaja\r\nprint(\"hasilnya adalah\",kalimat)\r\nprint(len(huruf))\r\n\r\n","repo_name":"rihartito/hitung_huruf_-_mengganti","sub_path":"lab 7.py","file_name":"lab 7.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"533436663","text":"\r\nimport os #permet de géré des fichier\r\nimport pandas as pd\r\n\r\ndef liste_fichier():\r\n list =(os.listdir('../fichier_temporaires/'))\r\n MALISTE=[]\r\n for i in range(len(list)):\r\n if list[i].split('.')[1]=='xlsx':\r\n MALISTE.append(list[i].split('.')[0])\r\n return MALISTE\r\n\r\ndef lecture_fueilles():\r\n MALISTE=liste_fichier()\r\n data=[]\r\n for i in range(len(MALISTE)):\r\n if MALISTE[i]=='Rubrique':\r\n pass\r\n else:\r\n temps=None\r\n temps=pd.read_excel('../fichier_temporaires/'+MALISTE[i]+'.xlsx', sheet_name=0, usecols=\"B\")\r\n Np=temps.to_numpy()\r\n data.append([Np])\r\n return data\r\n\r\ndef lecture_data():\r\n MALISTE=liste_fichier()\r\n MALISTE.remove('Rubrique')\r\n data=lecture_fueilles()\r\n NewListe=[]\r\n for i in range(len(data)):\r\n if len(data[i][0]) >0:\r\n NewListe.append(MALISTE[i])\r\n for j in range(len(data[i][0])):\r\n if data[i][0][j][0]=='Data':\r\n data[i][0][j][0]=MALISTE[i]\r\n NewListe.append(data[i][0][j][0])\r\n return NewListe\r\n\r\n\r\ndef rangement():\r\n data=lecture_data()\r\n ListePrincipal=[]\r\n Nb=len(data)\r\n Nb=Nb/6\r\n Nb=int(Nb)\r\n for i in range(Nb):\r\n NewListe=[]\r\n for j in range(6):\r\n variable=data.pop(0)\r\n NewListe.append(variable)\r\n ListePrincipal.append(NewListe)\r\n return ListePrincipal\r\n\r\n\r\n\"\"\"\r\nici la structure est en 3 temps [][][][]\r\n[premier liste][tout les élément de la liuste][chaque case de la liste][contenue de chaque cases]\r\n\"\"\"\r\n","repo_name":"Leo-Malaubier/projet-de-g-n-ration-de-page-automatique","sub_path":"cote_server/fichier_python/lecture_xlsx.py","file_name":"lecture_xlsx.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31768861307","text":"\nimport math, copy, re\n\n\ndef part_1(data):\n\tsteps = 0\n\ti = 0\n\tL = len(data)\n\twhile i >= 0 and i < L:\n\t\tcurr = data[i]\n\t\tdata[i] += 1\n\t\ti += curr\n\t\tsteps += 1\n\tprint(steps)\n\treturn\n\ndef part_2(data):\n\tsteps = 0\n\ti = 0\n\tL = len(data)\n\twhile i >= 0 and i < L:\n\t\tcurr = data[i]\n\t\tif curr >= 3: data[i] -= 1\n\t\telse: data[i] += 1\n\t\ti += curr\n\t\tsteps += 1\n\tprint(steps)\n\treturn \n\n\nif __name__ == '__main__':\n\twith open('5_input') as f:\n\t\tdata = f.read()\n\t\t# data = data.split('\\n')\n\t\tdata = list(map(int, data.split()))\n\n\n\tpart_1(data)\n\tpart_2(data)\n\t","repo_name":"PiErr0r/aoc","sub_path":"2017/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2100913051","text":"from collections import deque\ndef solution(food_times, k):\n count = 0\n result = 0\n food_queue = deque()\n for i in range(len(food_times)):\n food_queue.append([i+1,food_times[i]])\n \n \n for i in range(k):\n if len(food_queue) == 0:\n result = -1 \n return result \n index , food_num = food_queue.popleft()\n food_num -= 1\n\n if food_num != 0 :\n food_queue.append([index,food_num]) \n else:\n continue\n index , food_num = food_queue.popleft()\n return index\n\n\n\nfood_times = [3,1,2]\nk = 5\nif solution(food_times,k) == -1:\n print(-1)\nelse:\n print(solution(food_times,k))\n\n\n","repo_name":"YuHyeonGeun-KOR/My-Algorithm-Journey","sub_path":"This is cote/Chapter 3.Greedy/muzi's eating(1).py","file_name":"muzi's eating(1).py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73467341953","text":"from fastapi import FastAPI, Request\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nimport pandas as pd\nfrom dataframes import df_train, df_crew, actor_financial, director_financial, df_movies, string_transformation\nfrom model import feat_matrix, get_recommendations\n\n## INITIAL VALUES\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"./templates\") # import use custom html templates\nfeature_matrix = None\ntfidf_fit = None\n\n### FUNCTIONS\n\n## ROOT\n@app.get('/', response_class=HTMLResponse) # output will be an HTML response\ndef welcome(request: Request):\n return templates.TemplateResponse(\"root.html\", {\"request\": request}) # this represent a HTML request\n\n## AMOUNT OF FILMS BY MONTH\n@app.get('/cantidad_filmaciones_mes/{mes}')\ndef cantidad_filmaciones_mes(mes: str):\n month = string_transformation(mes)\n if month in df_movies[\"release_month\"].unique():\n count_by_month = df_movies.groupby([\"release_month\"])[\"title\"].count()\n return {month: count_by_month[month].item()} # needs item() because fastapi doesn't process numpy.int64 type objects \n else:\n return \"Entered value is not valid.\" # if we use print() instead of return, the output will be null\n\n## AMOUNT OF FILMS BY DAY \n@app.get('/cantidad_filmaciones_dia/{dia}')\ndef cantidad_filmaciones_dia(dia: str):\n day = string_transformation(dia)\n if day in df_movies[\"release_day\"].unique():\n count_by_day = df_movies.groupby([\"release_day\"])[\"title\"].count()\n return {day:count_by_day[day].item()}\n else:\n return \"Entered value is not valid.\" \n\n## POPULARITY OF MOVIES \n@app.get('/score_titulo/{titulo}')\ndef score_titulo(titulo: str):\n title = string_transformation(titulo)\n df_grouped = df_movies.groupby(\"transformed_title\")[\"popularity\"].sum()\n if title in df_grouped.index: # values of the grouped column are the new index in a grouped df\n normal_index = (df_movies[\"transformed_title\"] == title).idxmax() # index for non transformed and non grouped values\n return {\n \"Title\": df_movies[\"title\"][normal_index], \n \"Year\": df_movies[\"release_year\"][normal_index].item(), \n \"Popularity\" : df_grouped[title].round(2).item()\n } \n else:\n return \"Entered value is not valid.\"\n\n## VOTES OF MOVIES \n@app.get('/votos_titulo/{titulo}')\ndef votos_titulo(titulo: str):\n title = string_transformation(titulo)\n df_grouped_total = df_movies.groupby(\"transformed_title\")[\"vote_count\"].sum()\n df_grouped_average = df_movies.groupby(\"transformed_title\")[\"vote_average\"].mean()\n if title in df_grouped_average.index: # values of the grouped column are the new index in a grouped df\n if df_grouped_total[title] >= 2000:\n normal_index = (df_movies[\"transformed_title\"] == title).idxmax() # index for non transformed and non grouped values\n return {\n \"Title\": df_movies[\"title\"][normal_index], \n \"Year\": df_movies[\"release_year\"][normal_index].item(), \n \"Total_Votes\" : df_grouped_total[title].item(), \n \"Average_Vote\" : df_grouped_average[title].item()\n }\n else:\n return \"Movie must have at least 2000 votes\"\n else:\n return \"Entered value is not valid.\"\n\n## FINANCIAL DATA FROM ACTORS \n@app.get('/get_actor/{nombre_actor}')\ndef get_actor(nombre_actor):\n name = string_transformation(nombre_actor)\n actor_financial[\"transformed_name\"] = [string_transformation(x) for x in actor_financial[\"name\"]]\n if name in actor_financial[\"transformed_name\"].unique():\n index = (actor_financial[\"transformed_name\"] == name).idxmax()\n return {\n 'actor':actor_financial[\"name\"][index], \n 'films':actor_financial[\"films\"][index].item(), \n 'total_return':actor_financial[\"total_return\"][index].round(2).item(), \n 'average_return':actor_financial[\"average_return\"][index].item()\n } \n else:\n return \"Entered value is not valid.\"\n\n## FINANCIAL DATA FROM DIRECTORS\n@app.get('/get_director/{nombre_director}')\ndef get_director(nombre_director):\n name = string_transformation(nombre_director)\n director_financial[\"transformed_name\"] = [string_transformation(x) for x in director_financial[\"name\"]]\n if name in director_financial[\"transformed_name\"].unique():\n index = (director_financial[\"transformed_name\"] == name).idxmax() # obtain index corresponding the name\n df_director = df_crew[df_crew[\"job\"] == \"Director\"] # filter jobs\n df_movies_director = df_director[[\"name\", \"id\"]].merge(df_movies, how=\"left\", on =\"id\") # join movies df with directors name\n df_movies_director.fillna(\"\", inplace=True) # replace nan with blank spaces\n\n ## transforming the directors name so it can match with the input\n df_movies_director[\"transformed_director\"] = [string_transformation(x) for x in df_movies_director[\"name\"]] # with pandas series, we need to use comprehension lists or apply()\n\n ## filter the movies that matches the input name with the transformed name\n movies = df_movies_director[df_movies_director[\"transformed_director\"] == name]\n movies = movies[[\"title\",\"release_year\", \"return\", \"budget\", \"revenue\"]]\n movies[\"release_year\"] = [int(x) for x in movies[\"release_year\"]] #change years like 1990.0 to 1990\n return {\n 'director':director_financial[\"name\"][index], \n 'total return':director_financial[\"total_return\"][index].round(2).item(), \n 'films': movies.to_dict(orient='records') # orient=records doesn't show id and objects type\n }\n else:\n return \"Entered value is not valid.\"\n \n## MOVIE RECOMMENDATION\n\n@app.get('/recomendacion/{titulo}')\ndef recommendations(titulo):\n global feature_matrix\n global tfidf_fit\n # setting this variables as global is useful because we only want to calculate this only one time\n # if this variables are calculated inside this function, they will replace the former values, in this case None\n if feature_matrix is None or tfidf_fit is None: \n parameters = feat_matrix(df_train[\"corpus\"])\n tfidf_fit = parameters[0]\n feature_matrix = parameters[1]\n\n recommendations = get_recommendations(titulo, tfidf_fit=tfidf_fit, feature_matrix=feature_matrix)\n return recommendations\n\n\n ","repo_name":"cristhianc001/Movie-Recommendation-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"14557800144","text":"plik = open(\"sygnaly.txt\",\"r\")\r\nstrings = []\r\nfor linia in plik:\r\n linia = linia.strip()\r\n strings.append(linia)\r\n\r\n\r\nwynik = []\r\nfor string in strings:\r\n for letter in string:\r\n ver = True\r\n for i in range(len(string)):\r\n diff = abs(ord(letter) - ord(string[i]))\r\n #print(f\"{diff} to roznica miedzy {letter} a {string[i]}\")\r\n if diff > 10:\r\n ver = False\r\n break\r\n if ver == True:\r\n wynik.append(string)\r\nprint(wynik)\r\n","repo_name":"artpods56/Matura-Informatyka-Python","sub_path":"2018 Maj - Rozszerzenie/zad4_3.py","file_name":"zad4_3.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5102353182","text":"#cleverLily\nage = int(input())\nlaundry_price = float(input())\ntoys_price = int(input())\nmoney_count = 0\ntoys_count = 0\nbrother_reket = 0\n\nfor i in range(1, age + 1):\n if i % 2 == 0:\n money_count += (10 * i)\n brother_reket += 1\n else:\n toys_count += 1\n \ntoys_count *= toys_price\nsubtotal = (toys_count + (money_count / 2)) - brother_reket\ntotal = abs(subtotal - laundry_price)\n\nif subtotal >= laundry_price:\n print(f'Yes! {total:.2f}')\nelse:\n print(f'No! {total:.2f}')\n\n#10, 170, 6","repo_name":"byAbaddon/Basics-Course-Python-March-2020","sub_path":"4.0 Loops/11-cleverLily.py","file_name":"11-cleverLily.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18597496823","text":"import fnmatch\nimport os\nimport re\nfrom random import random\nimport numpy as np\nimport functions as F\n\n\n\nclass TupleDataset:\n\n def __init__(self, filelist):\n self._graphs = [f[0] for f in filelist]\n self._labels = [f[1] for f in filelist]\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n return [([read_graph(g), read_label(l)]) \n for g, l in zip(self._graphs[index], self._labels[index])] \n else:\n return ([read_graph(self._graphs[index]),\n read_label(self._labels[index])])\n\n def __len__(self):\n return len(self._graphs)\n\n\n\nclass GraphDataset:\n\n def __init__(self, dirname):\n names = os.listdir(dirname)\n _graphs = natsorted(fnmatch.filter(names, '*_graph.txt'))\n self._filelist = [dirname+g for g in _graphs]\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n return [read_graph(f) for f in self._filelist[index]] \n else:\n return read_graph(self._filelist[index])\n \n def __len__(self):\n return len(self._filelist)\n\n\n\ndef read_graph(filepath):\n with open(filepath) as fd:\n dim = int(fd.readline().split()[0])\n Adj = np.zeros((dim, dim))\n row = fd.readline().split()\n for i in range(dim):\n Adj[i:] = np.array(row)\n row = fd.readline().split()\n return Adj\n\n\ndef read_label(filepath):\n with open(filepath) as fd:\n return int(fd.readline().split()[0])\n\n\ndef get_dataset(dirname, test_ratio=0):\n train_filelist = []\n test_filelist = []\n\n names = os.listdir(dirname)\n graphs = natsorted(fnmatch.filter(names, '*_graph.txt'))\n labels = natsorted(fnmatch.filter(names, '*_label.txt'))\n\n for g, l in zip(graphs, labels):\n if random() > test_ratio:\n train_filelist.append((dirname+g, dirname+l))\n else:\n test_filelist.append((dirname+g, dirname+l))\n\n train_datasets = TupleDataset(train_filelist)\n test_datasets = TupleDataset(test_filelist)\n\n if test_ratio == 0:\n return train_datasets\n else:\n return train_datasets, test_datasets\n\n\ndef natsorted(filelist):\n prog = re.compile('(\\d+)_\\w*.txt')\n try:\n return sorted(filelist, key=lambda l: int(re.match(prog, l).group(1)))\n except AttributeError:\n raise SyntaxError\n\n","repo_name":"IsseiNAKASONE/PFNinternship2019CodingTask","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23631570824","text":"#!/usr/bin/env python\n\nimport sys\n\nCC_RED_BEGIN = '\\033[0;31m'\nCC_GREEN_BEGIN = '\\033[0;32m'\nCC_YELLOW_BEGIN = '\\033[0;33m'\nCC_BLUE_BEGIN = '\\033[0;34m'\nCC_END = '\\033[0m'\n\n\ndef createMap(width, height):\n ret = []\n for ln in xrange(height):\n ret.append([0] * width)\n return ret\n\n\ndef printNode(node):\n s = '%02X' % (node)\n if node == 1:\n s = CC_RED_BEGIN + s + CC_END\n elif node == 2:\n s = CC_GREEN_BEGIN + s + CC_END\n elif node == 3:\n s = CC_YELLOW_BEGIN + s + CC_END\n elif node == 4:\n s = CC_BLUE_BEGIN + s + CC_END\n sys.stdout.write(s)\n\n\ndef printSpace():\n sys.stdout.write(' ')\n\n\ndef printLf():\n sys.stdout.write('\\n')\n\n\ndef printMap(mp):\n for ln in mp:\n for node in ln:\n printNode(node)\n printSpace()\n printLf()\n sys.stdout.flush()\n\n\ndef getHeight(mp):\n return len(mp)\n\n\ndef getWidth(mp):\n return len(mp[0])\n\n\ndef setNode(mp, x, y, v):\n mp[y][x] = v\n\n\ndef astar(mp, start, end):\n width = getWidth(mp)\n height = getHeight(mp)\n\n def getH(node, end):\n return (abs(node[0] - end[0]) + abs(node[1] - end[1])) * 10\n\n openNodes = dict()\n closeNodes = dict()\n\n openNodes[start] = {'parent': None, 'g': 0, 'h': getH(start, end)}\n openNodes[start]['f'] = openNodes[start]['g'] + openNodes[start]['h']\n\n while (not end in closeNodes) and len(openNodes) > 0:\n minFNode = None\n minFNodeInfo = None\n for openNode, openNodeInfo in openNodes.iteritems():\n if not minFNode:\n minFNode = openNode\n minFNodeInfo = openNodeInfo\n else:\n if openNodeInfo['f'] < openNodes[minFNode]['f']:\n minFNode = openNode\n minFNodeInfo = openNodeInfo\n\n closeNodes[minFNode] = openNodes.pop(minFNode)\n\n nextNodes = {\n (minFNode[0] - 1, minFNode[1] - 1): 14,\n (minFNode[0] - 1, minFNode[1]): 10,\n (minFNode[0] - 1, minFNode[1] + 1): 14,\n (minFNode[0], minFNode[1] - 1): 10,\n (minFNode[0], minFNode[1] + 1): 10,\n (minFNode[0] + 1, minFNode[1] - 1): 14,\n (minFNode[0] + 1, minFNode[1]): 10,\n (minFNode[0] + 1, minFNode[1] + 1): 14}\n\n for nextNode, incG in nextNodes.iteritems():\n if nextNode[0] < 0 or nextNode[0] >= height or nextNode[1] < 0 or nextNode[1] >= width:\n continue\n\n if mp[minFNode[0]][nextNode[1]] == 4 or mp[nextNode[0]][minFNode[1]] == 4 or mp[nextNode[0]][\n nextNode[1]] == 4 or nextNode in closeNodes:\n continue\n\n nextNodeG = minFNodeInfo['g'] + incG\n if not nextNode in openNodes:\n nextNodeInfo = {'parent': minFNode}\n nextNodeInfo['g'] = nextNodeG\n nextNodeInfo['h'] = getH(nextNode, end)\n nextNodeInfo['f'] = nextNodeInfo['g'] + nextNodeInfo['h']\n openNodes[nextNode] = nextNodeInfo\n else:\n nextNodeInfo = openNodes[nextNode]\n if nextNodeG < nextNodeInfo['g']:\n nextNodeInfo['parent'] = minFNode\n nextNodeInfo['g'] = nextNodeG\n nextNodeInfo['f'] = nextNodeInfo['g'] + nextNodeInfo['h']\n\n return closeNodes\n\n\ndef main():\n mp = createMap(20, 20)\n mp[1][3] = 4\n mp[2][3] = 4\n mp[3][3] = 4\n mp[12][15] = 4\n mp[12][14] = 4\n mp[12][13] = 4\n mp[11][13] = 4\n mp[10][13] = 4\n mp[13][12] = 4\n mp[14][11] = 4\n mp[15][10] = 4\n mp[9][13] = 4\n mp[8][13] = 4\n mp[7][13] = 4\n mp[6][13] = 4\n mp[8][5] = 4\n mp[8][6] = 4\n mp[8][7] = 4\n mp[8][4] = 4\n mp[8][3] = 4\n mp[9][7] = 4\n mp[15][8] = 4\n mp[15][7] = 4\n mp[15][6] = 4\n mp[12][7] = 4\n mp[12][8] = 4\n mp[12][9] = 4\n mp[12][10] = 4\n mp[5][10] = 4\n mp[5][9] = 4\n mp[5][8] = 4\n mp[5][7] = 4\n mp[4][7] = 4\n mp[3][7] = 4\n mp[2][7] = 4\n # printMap(mp)\n start = (0, 0)\n end = (17, 19)\n closeNodes = astar(mp, start, end)\n if end in closeNodes:\n curNode = end\n while curNode:\n curNodeInfo = closeNodes[curNode]\n mp[curNode[0]][curNode[1]] = 3\n curNode = curNodeInfo['parent']\n printMap(mp)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"t5w0rd/codelab","sub_path":"astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9272660182","text":"# Databricks notebook source\n# MAGIC %md-sandbox\n# MAGIC\n# MAGIC
\n# MAGIC \"Databricks\n# MAGIC
\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # ストアド・ビュー\n# MAGIC\n# MAGIC このノートブックでは、ストアド・ビューの作成方法と管理方法について簡単に説明します。ストアド・ビューは、データベースに永続化される(他のユーザが事前に定義されたロジックを利用して結果を具体化できる)点で、DataFramesやtemp viewとは異なることを思い出してください。ビューは、結果を計算するために必要なロジックを登録します。デルタ湖ソースに対して定義されたビューは、常に各データソースの最新バージョンにクエリすることが保証されています。\n# MAGIC\n# MAGIC このノートブックの目標は、パートナーのジムのアナリストが、Moovio デバイスの使用とワークアウトがジムのアクティビティにどのような影響を与えるかを調査するためのビューを生成することです。\n# MAGIC\n# MAGIC \n# MAGIC\n# MAGIC ## 学習目標\n# MAGIC このレッスンの終わりまでに、受講者は以下のことができるようになります:\n# MAGIC - ビューに関連するクエリプランを表示する\n# MAGIC - Delta Lakeテーブルからどのように結果がマテリアライズされるか説明する\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Setup\n\n# COMMAND ----------\n\n# MAGIC %run ../Includes/Classroom-Setup-5.1\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ジムのログを調べる\n# MAGIC\n# MAGIC ジムのログのスキーマを見直すことから始めましょう。\n\n# COMMAND ----------\n\ngymDF = spark.table(\"gym_mac_logs\")\ngymDF.printSchema()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Spark DataFrame とビューはほぼ同じ構造体です。DataFrame に対して **`explain`** を呼び出すことで、ソーステーブルがデータを含むファイルをデシリアライズするための命令セットであることがわかります。\n\n# COMMAND ----------\n\ngymDF.explain(\"formatted\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ワークアウトデータを調べる\n# MAGIC\n# MAGIC 可能な限りのメトリックをビューに取り込むのではなく、ジムのアナリストが興味を持ちそうな値のサマリーを作成します。\n# MAGIC\n# MAGIC 私たちがジムから受け取っているデータは、マックアドレスで示されるユーザーデバイスの記録された最初と最後のタイムスタンプを示しています。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM gym_mac_logs\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## クエリの作成\n# MAGIC\n# MAGIC **`completed_workouts`** テーブルは、各ユーザーのワークアウトの開始時刻と終了時刻を示しています。\n# MAGIC\n# MAGIC 以下のセルを使用して、以下のクエリを作成します:\n# MAGIC - ユーザが少なくとも 1 つのワークアウトを完了した各日付\n# MAGIC - ワークアウトの最も早い開始時刻\n# MAGIC - 各日のワークアウトの最新の **`end_time`** 時間\n# MAGIC - ユーザが毎日完了したすべてのワークアウトのリスト\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC -- TODO\n# MAGIC SELECT user_id, to_date(start_time) date, collect_set(workout_id), min(start_time), max(end_time)\n# MAGIC FROM completed_workouts\n# MAGIC GROUP BY user_id, to_date(start_time)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## クエリーの拡張\n# MAGIC\n# MAGIC さて、このデータをジムから送られてきたMACログと結合して、ビューを作成します。\n# MAGIC\n# MAGIC **`user_lookup`** テーブルから取得できる識別子として、 **`mac_address`** を保持します。\n# MAGIC\n# MAGIC また、ユーザーがジムを訪問している間の総経過分数と、最初のワークアウトの開始から最後のワークアウトの終了までの総経過分数を計算するためのカラムを追加します。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT gym, mac_address, date, workouts, (last_timestamp - first_timestamp)/60 minutes_in_gym, (to_unix_timestamp(end_workout) - to_unix_timestamp(start_workout))/60 minutes_exercising\n# MAGIC FROM gym_mac_logs c\n# MAGIC INNER JOIN (\n# MAGIC SELECT b.mac_address, to_date(start_time) date, collect_set(workout_id) workouts, min(start_time) start_workout, max(end_time) end_workout\n# MAGIC FROM completed_workouts a\n# MAGIC INNER JOIN user_lookup b\n# MAGIC ON a.user_id = b.user_id\n# MAGIC GROUP BY mac_address, to_date(start_time)\n# MAGIC ) d\n# MAGIC ON c.mac = d.mac_address AND to_date(CAST(c.first_timestamp AS timestamp)) = d.date\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## 最終ロジックにビューを登録する\n# MAGIC\n# MAGIC 上記のクエリを使用して、 **`gym_user_stats`** という(一時的でない)ビューを作成します。\n# MAGIC\n# MAGIC **`CREATE VIEW IF NOT EXISTS gym_user_stats AS (...)`**\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC -- TODO\n# MAGIC CREATE VIEW IF NOT EXISTS gym_user_stats AS (\n# MAGIC SELECT gym, mac_address, date, workouts, (last_timestamp - first_timestamp)/60 minutes_in_gym, (to_unix_timestamp(end_workout) - to_unix_timestamp(start_workout))/60 minutes_exercising\n# MAGIC FROM gym_mac_logs c\n# MAGIC INNER JOIN (\n# MAGIC SELECT b.mac_address, to_date(start_time) date, collect_set(workout_id) workouts, min(start_time) start_workout, max(end_time) end_workout\n# MAGIC FROM completed_workouts a\n# MAGIC INNER JOIN user_lookup b\n# MAGIC ON a.user_id = b.user_id\n# MAGIC GROUP BY mac_address, to_date(start_time)\n# MAGIC ) d\n# MAGIC ON c.mac = d.mac_address AND to_date(CAST(c.first_timestamp AS timestamp)) = d.date\n# MAGIC )\n\n# COMMAND ----------\n\n# Check your work\nassert spark.sql(\"SHOW TABLES\").filter(\"tableName='gym_user_stats'\").count() >= 1, \"View 'gym_user_stats' does not exist.\"\nassert spark.sql(\"SHOW TABLES\").filter(\"tableName='gym_user_stats'\").first()[\"isTemporary\"]==False, \"View 'gym_user_stats' should be not temporary.\"\nassert spark.sql(\"DESCRIBE EXTENDED gym_user_stats\").filter(\"col_name='Type'\").first()['data_type']=='VIEW', \"Found a table 'gym_user_stats' when a view was expected.\"\nassert spark.table(\"gym_user_stats\").count() == 304, \"Incorrect query used for view 'gym_user_stats'.\"\nprint(\"All tests passed.\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ビューは単にクエリのスパークプランを保存していることがわかります。\n\n# COMMAND ----------\n\nspark.table(\"gym_user_stats\").explain(\"formatted\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC このビューに対してクエリを実行すると、プランを処理して論理的に正しい結果を生成します。\n# MAGIC\n# MAGIC データはデルタキャッシュに格納されるかもしれませんが、この結果は永続化される保証はなく、現在アクティブなクラスタに対してのみキャッシュされることに注意してください。\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT *\n# MAGIC FROM gym_user_stats\n# MAGIC WHERE gym = 5\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## ACL に関する余談\n# MAGIC\n# MAGIC DatabricksはACLを広範囲にサポートしていますが、デフォルトでは標準的なデータエンジニアリングクラスタには適用されません。そのためSQL Warehouseで実行することが代替案です。\n\n# COMMAND ----------\n\nimport py4j\n\ntry:\n spark.sql(\"SHOW GRANT ON VIEW gym_user_stats\")\n \nexcept py4j.protocol.Py4JJavaError as e:\n print(\"Error: \" + e.java_exception.getMessage())\n print(\"Solution: Consider enabling Table Access Control to demonstrate this feature.\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC このビューの権限は特に重要ではありませんが、ブロンズテーブル(すべての生データを含む)も現在この方法で保存されていることがわかります。\n# MAGIC\n# MAGIC 繰り返しますが、ACL は主に BI やデータサイエンスのユースケースで Databricks ワークスペース内のデータアクセスを管理するためのものです。機密性の高いデータエンジニアリングデータについては、クラウドアイデンティティ管理を使用してストレージコンテナへのアクセスを制限することをお勧めします。\n\n# COMMAND ----------\n\ntry:\n spark.sql(\"SHOW GRANT ON TABLE bronze\")\n \nexcept py4j.protocol.Py4JJavaError as e:\n print(\"Error: \" + e.java_exception.getMessage())\n print(\"Solution: Consider enabling Table Access Control to demonstrate this feature.\")\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC 以下のセルを実行して、このレッスンに関連するテーブルとファイルを削除します。\n\n# COMMAND ----------\n\nDA.cleanup()\n\n# COMMAND ----------\n\n# MAGIC %md-sandbox\n# MAGIC © 2022 Databricks, Inc. All rights reserved.
\n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the Apache Software Foundation.
\n# MAGIC
\n# MAGIC Privacy Policy | Terms of Use | Support\n","repo_name":"skotani-db/partner-elevate","sub_path":"Advanced-Data-Engineering-with-Databricks-JA/04 - Gold Query Layer/ADE 4.1 - Stored Views.py","file_name":"ADE 4.1 - Stored Views.py","file_ext":"py","file_size_in_byte":9823,"program_lang":"python","lang":"ja","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"72806740993","text":"import duckdb\nimport numpy as np\nimport decorators\nimport torch\nimport duckcsr\n\nclass CSR:\n def __init__(self, v, e, w):\n self.v = v\n self.e = e\n self.w = w\n\ndef connect(path = \":memory:\", read_only = False, config = {}):\n return Connection(path, read_only, config)\n\nclass Connection:\n \"\"\" A wrapper for DuckDB connections\n \"\"\"\n\n def __init__(self, path = \":memory:\", read_only = False, config = {}):\n self.con = duckdb.connect(path, read_only, config)\n\n def sql(self, query, alias = \"query_relation\"):\n return self.con.sql(query, alias)\n\n def execute(self, query, parameters = None, multiple_parameter_sets = False):\n return self.con.execute(query, parameters, multiple_parameter_sets)\n\n def executemany(self, query, parameters = None):\n return self.con.executemany(query, parameters)\n\n def close(self):\n return self.con.close()\n\n def _get_csr_v(self, csr_id):\n query = \"SELECT csrv FROM get_csr_v({});\".format(csr_id)\n return self.sql(query).torch()['csrv']\n\n def _get_csr_e(self, csr_id):\n query = \"SELECT csre FROM get_csr_e({});\".format(csr_id)\n return self.sql(query).torch()['csre']\n\n def _get_csr_w(self, csr_id):\n query = f\"SELECT csr_get_w_type(csr_id)\"\n # TODO: call different table functions based on weight type.\n query = \"SELECT csrw FROM get_csr_w({});\".format(csr_id)\n return self.sql(query).torch()['csrw']\n\n def get_csr(self, csr_id):\n query = f\"SELECT * FROM get_csr_ptr({csr_id})\"\n v, e, w, vsize, wtype = self.sql(query).fetchall()\n v, e, w = duckcsr.get_csr(v[0], e[0], w[0], vsize[0], wtype[0])\n v = torch.from_numpy(v)\n e = torch.from_numpy(e)\n w = torch.from_numpy(w)\n return CSR(v,e,w)\n\n def _get_vtablenames(self, pgname):\n query = \"SELECT vtables FROM get_pg_vtablenames({});\".format(pgname)\n return self.sql(query).fetchall()#numpy()[\"vtables\"].data\n\n def _get_etablenames(self, pgname):\n query = \"SELECT etables FROM get_pg_etablenames({});\".format(pgname)\n return self.sql(query).fetchall()#numpy()[\"etables\"].data\n\n def _get_vcolnames(self, pgname, table_name):\n query = \"SELECT colnames FROM get_pg_vcolnames({}, {});\".format(pgname, table_name)\n return self.sql(query).fetchnumpy()[\"colnames\"].data\n\n def _get_ecolnames(self, pgname, table_name):\n query = \"SELECT colnames FROM get_pg_ecolnames({}, {});\".format(pgname, table_name)\n return self.sql(query).fetchnumpy()[\"colnames\"].data\n\n def _get_features(self, tablename, cols, drop_non_numeric = True):\n NUMERIC_TYPES = [duckdb.typing.BIGINT, duckdb.typing.DOUBLE, duckdb.typing.TINYINT, duckdb.typing.SMALLINT, duckdb.typing.UBIGINT, duckdb.typing.FLOAT, duckdb.typing.UINTEGER, duckdb.typing.BOOLEAN, duckdb.typing.USMALLINT, duckdb.typing.INTEGER, duckdb.typing.UTINYINT]\n colstr = \", \".join(cols)\n query = f\"SELECT {colstr} FROM {tablename};\"\n query_result = self.sql(query)\n cols_to_drop = []\n for i in range(len(cols)):\n if query_result.types[i] not in NUMERIC_TYPES:\n if drop_non_numeric:\n cols_to_drop.append(cols[i])\n else:\n raise Exception(f\"Found a non numeric column {query_result.columns[i]} in table {tablename}\")\n if len(cols_to_drop) == 0:\n return query_result.torch()\n else:\n cols = np.setdiff1d(cols, cols_to_drop)\n colstr = \", \".join(cols)\n query = f\"SELECT {colstr} FROM {tablename};\"\n return self.sql(query).torch()\n\n def get_pyg(self, pgname, csr_id, drop_non_numeric = True):\n import torch\n from torch_geometric.data import Data\n from torch_geometric.typing import SparseTensor\n\n vtables = self._get_vtablenames(pgname)\n etables = self._get_etablenames(pgname)\n csr = self.get_csr(csr_id)\n if len(vtables) == 1 and len(etables) == 1:\n if csr.w == None or len(csr.w) != len(csr.e):\n w = torch.ones(len(csr.e))\n csrtensor = torch.sparse_csr_tensor(csr.v, csr.e, w)\n #adj_t = SparseTensor(rowptr = csr.v, col = csr.e, value = csr.w)\n else:\n #adj_t = SparseTensor(rowptr = csr.v, col = csr.e)\n csrtensor = torch.sparse_csr_tensor(csr.v, csr.e, csr.w)\n coo = csrtensor.to_sparse_coo()\n cols = self._get_vcolnames(pgname, vtables[0][0])\n node_features = self._get_features(vtables[0][0], cols, drop_non_numeric)\n cols = self._get_ecolnames(pgname, etables[0][0])\n edge_features = self._get_features(etables[0][0], cols, drop_non_numeric)\n \n # lets try if there is a huge difference in exec time\n # I came up with two ways.\n # the first is to convert the dict to a 2-dim numpy array first and then cast this numpy array to tensor\n # this method runs faster for smaller data sets.\n # the second if to use torch.stack.\n # this method runs faster for bigger data sets.\n # I decided to use torch.stack\n # first, it leads to shorter and more readable code.\n # second, most of the runing time will be spent on building the SparseTensor.\n # So, especially for smaller data, there wont be some significant gains in performance.\n # For sf = 100, the first will take 27ms while the second will take 19ms.\n # TODO: maybe more bigger data sets?\n \"\"\"\n newvalues = []\n for i in node_features.values():\n newvalues.append(i.numpy())\n datax = torch.from_numpy(np.array(newvalues))\n newvalues = []\n for i in edge_features.values():\n newvalues.append(i.numpy())\n edge_attr = torch.from_numpy(np.array(newvalues))\"\"\"\n datax = torch.stack(list(node_features.values()), dim = 1)\n edge_attr = torch.stack(list(edge_features.values()), dim = 1)\n data = Data(x = datax,edge_index = coo.indices(), edge_attr = edge_attr)\n #data.adj_t = adj_t\n return data\n else:\n raise Exception(\"heterogeneous graph is not supported yet\")\n\n def get_dgl(self, pgname, csr_id, drop_non_numeric = True):\n import dgl\n vtables = self._get_vtablenames(pgname)\n etables = self._get_etablenames(pgname)\n csr = self.get_csr(csr_id)\n\n if len(vtables) == 1 and len(etables) == 1:\n ## homogeneous graph\n ## TODO: may add a new udf which returns types of each cols\n cols = self._get_vcolnames(pgname, vtables[0][0])\n node_features = self._get_features(vtables[0][0], cols, drop_non_numeric)\n cols = self._get_ecolnames(pgname, etables[0][0])\n edge_features = self._get_features(etables[0][0], cols, drop_non_numeric)\n g = dgl.graph(('csr', (csr.v[:-1], csr.e, [])))\n for k, v in node_features.items():\n g.ndata[k] = v.reshape((v.shape[0], 1))\n for k, v in edge_features.items():\n g.edata[k] = v.reshape((v.shape[0], 1))\n return g\n else:\n ## heterogeneous graph\n raise Exception(f\"Call get_dgl_hetero to get heterograph\")\n\n def get_dgl_hetero(self, pgname, csr_dict, drop_non_numeric = True):\n \"\"\" csr_dict is a dict.\n keys are csr_ids, values are string triplets (src_type, edge_type, dst_type)\n \"\"\"\n import dgl\n vtables = self._get_vtablenames(pgname)\n etables = self._get_etablenames(pgname)\n\n if len(vtables) == 1 and len(etables) == 1:\n raise Exception(f\"Call get_dgl_hetero to get homogeneous graph\")\n else:\n data_dict = {}\n for k, v in csr_dict.items():\n csr = self.get_csr(k)\n data_dict[v] = (\"csr\", (csr.v[:-1], csr.e, []))\n g = dgl.heterograph(data_dict)\n for tablename in vtables:\n tablename = tablename[0]\n cols = self._get_vcolnames(pgname, tablename)\n features = self._get_features(tablename, cols, drop_non_numeric)\n for k, v in features.items():\n g.ndata[tablename + '.' + k] = {tablename: v.reshape((v.shape[0], 1))}\n for tablename in etables:\n tablename = tablename[0]\n cols = self._get_ecolnames(pgname, tablename)\n features = self._get_features(tablename, cols, drop_non_numeric)\n for k, v in features.items():\n g.edata[tablename + '.' + k] = {tablename: v.reshape((v.shape[0], 1))}\n return g\n\n def _create_csr(self, csr_id, edge_table, src_key, dst_key, src_ref_table, src_ref_col, dst_ref_table, dst_ref_col):\n \"\"\" internal call \"\"\"\n query = f\"\"\"SELECT CREATE_CSR_EDGE(\n {csr_id},\n (SELECT count(srctable.{src_ref_col}) FROM {src_ref_table} srctable),\n CAST (\n (SELECT sum(CREATE_CSR_VERTEX(\n {csr_id},\n (SELECT count(srctable.{src_ref_col}) FROM {src_ref_table} srctable),\n sub.dense_id,\n sub.cnt)\n )\n FROM (\n SELECT srctable.rowid as dense_id, count(edgetable.{src_key}) as cnt\n FROM {src_ref_table} srctable\n LEFT JOIN {edge_table} edgetable ON edgetable.{src_key} = srctable.{src_ref_col}\n GROUP BY srctable.rowid) sub\n )\n AS BIGINT),\n srctable.rowid,\n dsttable.rowid,\n edgetable.rowid) as temp\n FROM {edge_table} edgetable\n JOIN {src_ref_table} srctable on srctable.{src_ref_col} = edgetable.{src_key}\n JOIN {dst_ref_table} dsttable on dsttable.{dst_ref_col} = edgetable.{dst_key} ;\"\"\"\n self.sql(query).fetchnumpy()\n\n @decorators.require_rowid\n def create_csr(self, edge_table, src_key, dst_key, src_ref_table, src_ref_col, dst_ref_table = None, dst_ref_col = None, csr_id = None):\n if csr_id == None:\n ## TODO: add a new udf to get the a possible csr id.\n csr_id = 1\n ### we allow the dst_ref_table and dst_ref_col to be none\n ### in the cases if src and dst are the same table and col\n if dst_ref_table == None:\n dst_ref_table = src_ref_table\n if dst_ref_col == None:\n dst_ref_col = src_ref_col\n if type(edge_table) == str:\n ## a duckdb table\n self._create_csr(csr_id, edge_table, src_key, dst_key, src_ref_table, src_ref_col, dst_ref_table, dst_ref_col)\n else:\n ## python objects\n _edgedf = edge_table\n _srcdf = src_ref_table\n _dstdf = dst_ref_table\n self._create_csr(csr_id, \"_edgedf\", src_key, dst_key, \"_srcdf\", src_ref_col, \"_dstdf\", dst_ref_col)\n return csr_id\n\n def save_dgl_homograph(self, g, node_table_name, edge_feature_table_name, edge_table_name):\n _data_dict = {}\n ## save node features\n for k, v in g.ndata.items():\n _data_dict[k] = v.numpy().reshape(len(v))\n query = \"CREATE TABLE {} AS SELECT * FROM _data_dict\"\n self.sql(query.format(node_table_name))\n\n ## save edge features\n _data_dict = {}\n for k, v in g.edata.items():\n _data_dict[k] = v.numpy().reshape(len(v))\n query = \"CREATE TABLE {} AS SELECT * FROM _data_dict\"\n self.sql(query.format(edge_feature_table_name))\n\n ## construct edge table\n _data_dict = {}\n _src, _dst = g.adj_tensors(\"coo\")\n _src = _src.numpy()\n _dst = _dst.numpy()\n _data_dict = {\"src\": _src, \"_dst\": _dst}\n query = \"CREATE TABLE {} AS SELECT * FROM _data_dict\"\n self.sql(query.format(edge_table_name))\n\n def save_dgl_heterograph(self, g):\n _data_dict = {}\n for fname, nested in g.ndata.items():\n for t, feat in nested.items():\n if t in _data_dict:\n _data_dict[t][fname] = v.numpy().reshape(len(feat))\n else:\n _data_dict[t] = {}\n _data_dict[t][fname] = v.numpy().reshape(len(feat))\n\n query = \"CREATE TABLE {} AS SELECT * FROM _data\"\n for k, _data in _data_dict.items():\n self.sql(query.format(k))\n\n _data_dict = {}\n for fname, nested in g.edata.items():\n for t, feat in nested.items():\n if t in _data_dict:\n _data_dict[t][fname] = v.numpy().reshape(len(feat))\n else:\n _data_dict[t] = {}\n _data_dict[t][fname] = v.numpy().reshape(len(feat))\n\n query = \"CREATE TABLE {} AS SELECT * FROM _data\"\n for k, _data in _data_dict.items():\n self.sql(query.format(k))\n\n _data_dict = {}\n for etype in g.etypes:\n _src, _dst = g.adj_tensors(\"coo\", etype)\n _src = _src.numpy()\n _dst = _dst.numpy()\n _data_dict = {\"src\": _src, \"_dst\": _dst}\n query = \"CREATE TABLE {} AS SELECT * FROM _data_dict\"\n self.sql(query.format(etype))\n","repo_name":"vlowingkloude/pyduckpgq","sub_path":"duckpgq.py","file_name":"duckpgq.py","file_ext":"py","file_size_in_byte":13521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23575040601","text":"def compute(cake):\r\n R, C = len(cake), len(cake[0])\r\n\r\n rows = [i for i in range(R) if set(cake[i]) != set([\"?\"])]\r\n \r\n # first fill the non-empty lines\r\n for i in rows: \r\n cols = [j for j in range(C) if cake[i][j] != \"?\"] + [C]\r\n \r\n for j in range(cols[0]):\r\n cake[i][j] = cake[i][cols[0]]\r\n \r\n for k in range(len(cols)-1):\r\n for j in range(cols[k]+1, cols[k+1]):\r\n cake[i][j] = cake[i][cols[k]]\r\n\r\n # then fill the empty lines\r\n for i in range(rows[0]):\r\n cake[i] = cake[rows[0]][:]\r\n\r\n rows.append(R)\r\n for k in range(len(rows)-1):\r\n for j in range(rows[k]+1, rows[k+1]):\r\n cake[j] = cake[rows[k]][:]\r\n \r\n return cake\r\n \r\n \r\nT = int(input())\r\nfor t in range(1,T+1):\r\n R,C = [int(x) for x in input().split()]\r\n cake = [input() for i in range(R)]\r\n cake = [list(x) for x in cake]\r\n\r\n cake = compute(cake)\r\n print(\"Case #%d:\"%t)\r\n for r in cake:\r\n print(\"\".join(r))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_203/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74650063235","text":"#https://leetcode.com/problems/sudoku-solver/solutions/1417073/97-faster-clean-concise-well-explained/\n#different than valid sudoku where we only check existing numbers, we are solving the sudoku here\n#meaning that we will have to put different numbers in blank space to check\n#this will be a backtracking problem because we will have to check all spaces to find a possible solution (go through all possible solutions)\n#was on the right track, but got confused as i didn't think straight, i thought if we had to go through all possible solution will be to inefficient\n#thought there was a \"smarter/faster\" to solve\n\n#93% speed, 90% memory\nfrom collections import defaultdict, deque\nclass Solution(object):\n def solveSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: None Do not return anything, modify board in-place instead.\n PAY ATTENTION TO DETAILS! (made 2 mistakes: first didn't notice the board is made of strings not numbers, \n second when i copy and paste, i messed up the condition (used 2 rows in stead of rows and columns))\n \"\"\"\n #mistake: pay attention to details!! the \"numbers\" stored are not numbers, they are string!!\n #use a hashset to store all existing numbers\n rows = defaultdict(set)\n columns = defaultdict(set)\n blocks = defaultdict(set)\n spaces = deque() #use a deque to store blank spaces so we can go back to previous step like the ones we do in backtracking (easier to append/pop)\n\n #add all existing numbers to the hashset\n for r in range(9):\n for c in range(9):\n if board[r][c] == \".\":\n spaces.append((r,c))\n else:\n rows[r].add(board[r][c])\n columns[c].add(board[r][c])\n blocks[(r//3,c//3)].add(board[r][c])\n\n\n def dfs(): #use this recursie function to check all possible solution for blank spaces(deque)\n #base condition when its a possible solution:\n if not spaces: #no more spaces to check (all the others passed)\n return True\n\n r,c = spaces[0] #don't use popleft here, this is because we will need to pop within the for loop for it to balance with the append when cases failed, and we are reverting.\n #if we only have append, and we don't pop, we will end up appending repeating coordinates\n #check a possible num to traverse down:\n for num in {'1','2','3','4','5','6','7','8','9'}:\n #we have repeated numbers, so skip\n if num in rows[r] or num in columns[c] or num in blocks[(r//3,c//3)]:\n continue\n #we found a possible number\n board[r][c] = num\n rows[r].add(num)\n columns[c].add(num)\n blocks[(r//3,c//3)].add(num)\n spaces.popleft()\n\n #if it succeeded, return True\n if dfs(): \n return True\n #failed this num, we will remove everything and add the spot back to q to try other numbers \n else:\n board[r][c] = \".\"\n rows[r].remove(num)\n columns[c].remove(num)\n blocks[(r//3,c//3)].remove(num)\n spaces.appendleft((r,c))\n \n #if we gone through everything and dont return a True, means we failed everything so return False\n return False\n \n dfs()\n\n\n \n\n","repo_name":"biancafu/Algorithm","sub_path":"pramp/sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23462846081","text":"import copy\r\ninFile = open(\"C:\\\\Users\\\\Eric\\\\Downloads\\\\infile.in\", \"r\")\r\noutFile = open(\"C:\\\\Users\\\\Eric\\\\Downloads\\\\outfile.out\", \"w\")\r\ninString = inFile.readline()\r\nnumOfTestCase = int(inString)\r\n\r\nmapping = [['1', 'i', 'j', 'k', '-1'], ['i', '-1', 'k', '-j', '-1'], ['j', '-k', '-1', 'i', '-j'],\r\n ['k', 'j', '-i', '-1', '-k'], ['-1', '-i', '-j', '-k', '1']]\r\nindex = ['1', 'i', 'j', 'k', '-1']\r\n\r\ndef mapFunc(a, b):\r\n neg = (a.count('-') % 2) * '-'\r\n if a.count('-') == 1:\r\n a = a[1:]\r\n if a.count('-') == 2:\r\n a = a[2:]\r\n\r\n result = neg + mapping[index.index(a)][index.index(b)]\r\n if result.count('-') == 2:\r\n result = result[2:]\r\n return result\r\n\r\nfor i in range(numOfTestCase):\r\n numChar, multi = inFile.readline()[:-1].split(' ')\r\n multi = int(multi)\r\n inputString = inFile.readline()[:-1] * multi\r\n resultList = list(inputString)\r\n inputList = list(inputString)\r\n tmpResult = ''\r\n result = ''\r\n #print(inputString)\r\n for j in resultList:\r\n if tmpResult == '':\r\n tmpResult = j\r\n else:\r\n tmpResult = mapFunc(tmpResult, j)\r\n del inputList[0]\r\n if tmpResult == 'i':\r\n result += 'i'\r\n break\r\n \r\n tmpResult = ''\r\n resultList = copy.deepcopy(inputList)\r\n for j in resultList:\r\n if tmpResult == '':\r\n tmpResult = j\r\n else:\r\n tmpResult = mapFunc(tmpResult, j)\r\n del inputList[0]\r\n if tmpResult == 'j':\r\n result += 'j'\r\n break\r\n tmpResult = ''\r\n resultList = copy.deepcopy(inputList)\r\n for j in resultList:\r\n if tmpResult == '':\r\n tmpResult = j\r\n else:\r\n tmpResult = mapFunc(tmpResult, j)\r\n del inputList[0]\r\n if tmpResult == 'k':\r\n result += 'k'\r\n break\r\n tmpResult = ''\r\n resultList = copy.deepcopy(inputList)\r\n for j in resultList:\r\n if tmpResult == '':\r\n tmpResult = j\r\n else:\r\n tmpResult = mapFunc(tmpResult, j)\r\n del inputList[0]\r\n if len(inputList) == 0:\r\n result += tmpResult\r\n break\r\n print(\"Result:\", result)\r\n if result == \"ijk1\":\r\n outFile.write(\"Case #\"+str(i+1)+\": \"+\"YES\" + '\\n')\r\n elif len(resultList) == 0 and result == \"ijk\":\r\n outFile.write(\"Case #\"+str(i+1)+\": \"+\"YES\" + '\\n')\r\n else:\r\n outFile.write(\"Case #\"+str(i+1)+\": \"+\"NO\" + '\\n')\r\n \r\noutFile.close()\r\ninFile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_157/776.py","file_name":"776.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26927189353","text":"from BinaryTree.BT_Node import Node\ndef maxValue(root):\n if root is None:\n return\n temp=root\n while temp.right!=None:\n temp=temp.right\n\n return temp.val\nif __name__=='__main__':\n root = Node(10)\n root.left = Node(5)\n root.right = Node(15)\n root.right.left = Node(13)\n root.right.right = Node(18)\n root.left.left=Node(3)\n root.left.right=Node(7)\n root.left.right.left = Node(6)\n root.left.left.left = Node(1)\n max_value=maxValue(root)\n print(max_value)\n","repo_name":"Aditi-Here/Python-Codes","sub_path":"BST/max value in BST.py","file_name":"max value in BST.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73149259075","text":"#!/usr/bin/env python3\n\nimport tensorflow as tf\nfrom typing import Array\nimport numpy as np\nimport logging\nimport math\nimport pickle as pkl\nLOGGER = logging.getLogger(__name__)\n\n\nclass audio2text_GAN(object):\n AUDIO_HOLDER = 'audio_holder'\n TEXT_HOLDER = 'text_holder'\n LR_HOLDER = 'lr_holder'\n OP_D_TRAIN = 'op_d_train'\n OP_G_TRAIN = 'op_g_train'\n\n def __init__(\n self,\n audio_dim: int,\n text_dim: int,\n D_struct: Array[int],\n batch_size: int,\n logger=LOGGER,\n ):\n self.audio_dim = audio_dim\n self.text_dim = text_dim\n self.graph = tf.Graph()\n self.batch_size = batch_size\n self.D_struct = D_struct\n self.logger = logger\n with self.graph.as_default():\n self._build_graph()\n # assert audio_k_way == text_n_vocab\n assert audio_dim == text_dim\n self.dim = audio_dim\n self.config = tf.ConfigProto(log_device_placement=False)\n self.config.gpu_options.allow_growth = True\n self.sess = tf.Session(graph=self.graph, config=self.config)\n self.sess.run(tf.variables_initializer(\n varlist=self.graph.get_collection('variables')))\n self.saver = tf.train.Saver(\n varlist=self.graph.get_collection('variables'),\n max_to_keep=None)\n\n def _build_graph(self):\n # None stands for the batch\n self.text_holder = tf.placeholder(\n tf.float32,\n [None, self.text_dim],\n name=self.AUDIO_HOLDER,\n )\n self.audio_holder = tf.placeholder(\n tf.float32,\n [None, self.audio_dim],\n name=self.TEXT_HOLDER,\n )\n self.lr = tf.placeholder(\n tf.float32,\n [1],\n name=self.LR_HOLDER,\n )\n\n self.x_trans = self.Generator(self.text_holder)\n # instead of sampling, we concate the x_trans and audio_holder\n # together to generate the Discriminator's input\n batch_size = tf.shape(self.x_trans)[0]\n label_zero = tf.zeros([batch_size, 1], dtype=tf.float32)\n label_one = tf.ones([batch_size, 1], dtype=tf.float32)\n D_input = tf.concat(\n [self.x_trans, self.audio_holder],\n axis=0,\n name='input_concat')\n D_label = tf.concat([label_zero, label_one], axis=0, name='D_label')\n G_label = tf.concat([label_one, label_zero], axis=0, name='G_label')\n D_out = self.Discriminator(D_input)\n t_vars = tf.trainable_variables()\n self.d_vars = [var for var in t_vars if 'D_' in var.name]\n self.g_vars = [var for var in t_vars if 'G_' in var.name]\n\n self.D_Loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=D_label,\n logits=D_out,\n name='D_loss')\n self.G_Loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=G_label,\n logits=D_out,\n name='G_loss')\n\n self.train_D_op = tf.train.AdamOptimizer(self.lr).minimize(\n self.D_Loss,\n var_list=self.d_vars,\n name=self.OP_D_TRAIN,\n )\n\n self.train_G_op = tf.train.AdamOptimizer(self.lr).minimize(\n self.G_Loss,\n var_list=self.g_vars,\n name=self.OP_G_TRAIN,\n )\n return\n\n # Generator holds the Matrix W such that WX = Y\n def Generator(self, X):\n with tf.variable_scope('generator') as scope: # noqa\n W = tf.get_variable(\n 'G_W',\n shape=[None, self.text_dim, self.audio_dim],\n initializer=tf.keras.initializers.lecun_normal,\n )\n self.W = W\n self._update_w(0.99)\n x_trans = self.W @ X\n return x_trans\n\n # Discriminator holds the NN to discriminate two different sources\n def Discriminator(self, X):\n res = None\n with tf.variable_scope('discriminator') as scope: # noqa\n for order in range(len(self.D_struct)):\n if order == 0:\n W = tf.get_variable(\n 'D_W1',\n shape=[self.text_dim, self.D_struct[order]],\n initializer=tf.keras.initializers.lecun_normal,\n )\n b = tf.get_variable(\n 'D_b1',\n shape=[self.D_struct[order]],\n initializer=tf.keras.initializers.lecun_normal,\n )\n res = tf.nn.selu(W @ X + b)\n else:\n W = tf.get_variable(\n 'D_W'+str(order),\n shape=[self.D_struct[order-1],\n self.D_struct[order]],\n initializer=tf.keras.initializers.lecun_normal,\n )\n b = tf.get_variable(\n 'D_b'+str(order),\n shape=[self.D_struct[order]],\n initializer=tf.keras.initializers.lecun_normal,\n )\n res = tf.nn.selu(W @ res + b)\n\n W = tf.get_variable(\n 'D_W_out',\n shape=[self.D_struct[-1], 1],\n initializer=tf.keras.initializers.lecun_normal,\n )\n b = tf.get_variable(\n 'D_b_out', shape=[1],\n initializer=tf.keras.initializers.lecun_normal,\n )\n res = W @ res + b\n return res\n\n def _update_w(self, beta):\n self.W = (1. + beta) * self.W - beta * (self.W @ self.W) @ self.W\n\n def gen_batch(self):\n np.random.shuffle(self.text_embeds)\n np.random.shuffle(self.audio_embeds)\n cnt = 0\n num = math.ceil(float(self.text_embeds) / self.batch_size)\n while True:\n if cnt == num:\n cnt = 0\n np.random.shuffle(self.text_embeds)\n np.random.shuffle(self.audio_embeds)\n start = cnt * self.batch_size\n end = start + self.batch_size\n yield self.text_embeds[start:end], self.audio_embeds[start:end]\n cnt += 1\n\n def train(\n self,\n audio_embeds: np.ndarray,\n text_embeds: np.ndarray,\n lr: float,\n batch_size: int,\n epoch: int,\n G_num_per_batch: int=1,\n D_num_per_batch: int=1,\n early_stopping: bool=True,\n ):\n\n self.text_embeds = text_embeds\n self.audio_embeds = audio_embeds\n batch_per_epoch = len(text_embeds) / batch_size\n batch_generator = self.gen_batch()\n audio_place = self.graph.get_tensor_by_name(self.AUDIO_HOLDER + ':0')\n text_place = self.graph.get_tensor_by_name(self.TEXT_HOLDER + ':0')\n lr_place = self.graph.get_tensor_by_name(self.LR_HOLDER + ':0')\n G_loss_tensor = self.graph.get_tensor_by_name('G_Loss' + ':0')\n D_loss_tensor = self.graph.get_tensor_by_name('D_Loss' + ':0')\n G_OP = self.graph.get_operation_by_name(self.OP_G_TRAIN)\n D_OP = self.graph.get_operation_by_name(self.OP_D_TRAIN)\n G_loss_op = self.graph.get_operation_by_name('G_Loss')\n D_loss_op = self.graph.get_operation_by_name('D_Loss')\n for epoch_num in range(epoch):\n for batch_cnt in batch_per_epoch:\n batch_text_embeds, batch_audio_embeds = next(batch_generator)\n for G_num in range(G_num_per_batch):\n _, G_batch_loss = self.sess.run(\n [G_OP, G_loss_tensor],\n feed_dict={\n audio_place: batch_audio_embeds,\n text_place: batch_text_embeds,\n lr_place: lr,\n })\n for D_num in range(D_num_per_batch):\n _, D_batch_loss = self.sess.run(\n [D_OP, D_loss_tensor],\n feed_dict={\n audio_place: batch_audio_embeds,\n text_place: batch_text_embeds,\n lr_place: lr,\n })\n self.logger.debug('batch train G_loss:{}, D_loss:{}'.format(\n G_batch_loss, D_batch_loss))\n\n _, G_all_loss = self.sess.run(\n [G_loss_op, G_loss_tensor],\n feed_dict={\n audio_place: self.audio_embeds,\n text_place: self.text_embeds,\n })\n _, D_all_loss = self.sess.run(\n [D_loss_op, D_loss_tensor],\n feed_dict={\n audio_place: self.audio_embeds,\n text_place: self.text_embeds,\n })\n self.logger.info('Epoch {}, G loss:{}, D loss:{}'.format(\n epoch_num,\n G_all_loss,\n D_all_loss,\n ))\n\n @staticmethod\n def gen_hyper_model_path(path: str):\n hyper_path = path + '_hyper.pkl'\n var_path = path + '_var.mdl'\n return hyper_path, var_path\n\n def save(self, path: str):\n hyper_path, var_path = self.gen_hyper_model_path(path)\n with open(hyper_path, 'wb') as hpb:\n params = {\n 'audio_dim': self.audio_dim,\n 'text_dim': self.text_dim,\n 'D_struct': self.D_struct,\n 'batch_size': self.batch_size,\n }\n pkl.dump(params, hpb)\n self.saver.save(self.sess, path)\n\n @classmethod\n def load(cls, path: str):\n hyper_path, var_path = cls.gen_hyper_model_path(path)\n with open(hyper_path, 'rb') as f:\n params = pkl.load(f)\n mdl = cls(**params)\n mdl.saver.restore(mdl.sess, var_path)\n return mdl\n","repo_name":"grtzsohalf/Audio-Phonetic-and-Semantic-Embedding","sub_path":"parallelizing/audio2text_GAN.py","file_name":"audio2text_GAN.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"6881575792","text":"# 18peakvalley.py\n# Version 1:\n\ndata = [1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 5, 6, 7, 8, 9, 8, 7, 6, 7, 8, 9]\n# peaks at 6 and 14\n# valleys at 9, 17\n\n# define functions here-------------------------------------------\n\n\ndef peaks(data): # returns indices of peaks\n\n return [i for i in range(1, (len(data)-1)) if data[i] > data[i-1]\n and data[i] > data[i+1]]\n\n\ndef valleys(data): # returns indices of valleys\n return [i for i in range(1, (len(data)-1)) if data[i] < data[i-1]\n and data[i] < data[i+1]]\n\n\ndef peaksnvalleys(data): # compile list of peaks and valleys in order\n return sorted(valleys(data) + peaks(data))\n\n\ndef printxs(data):\n x = max(data)\n while x > 0:\n print(' ', end='')\n for y in data:\n if y >= x:\n print('X ', end='')\n if y < x:\n print('- ', end='')\n x -= 1\n print('')\n\n\ndef main(): # do the stuff\n\n printxs(data)\n print(data)\n print(f\"Peaks = {peaks(data)}\")\n print(f\"Valleys = {valleys(data)}\")\n print(f\"Peaks and Valleys = {peaksnvalleys(data)}\")\n\n # ----------------------------------------------------------------\n\n\nif __name__ == '__main__':\n\n run = 1\n while run:\n\n # --------------------------------------------------------\n\n main()\n\n# ----------------------------------------------------------------\n\n ask = input('Quit? Y/N > ').strip().lower()\n if ask == 'y':\n run = 0\n","repo_name":"billhowland/PythonLabs","sub_path":"python/18peakvalley.py","file_name":"18peakvalley.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1515375497","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 7 16:46:41 2022\n\n@author: Nithish Kumar\n\"\"\"\n# import necessary libraries\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nfrom sklearn.metrics import ConfusionMatrixDisplay, classification_report\n\nfrom pickle import dump\n\n# load data\nincident_df = pd.read_csv('incident_event_log.csv')\nincident_df.head()\n\n# separate features and target (choose only reqd cols after analysis)\ntarget = 'impact' # target column\nsel_features = ['urgency', 'priority', 'number', 'opened_by']\n# Note: Chosen columns/features have a direct relationship with target \n\nX = incident_df.loc[:, sel_features]\ny = incident_df.loc[:, target]\n\n# Data preprocessing fuctions.\n\n# 1. Function to remove all null columns.\ndef null_value_handler(Xdf):\n # Replace all '?' with np.nan\n Xdf['opened_by'] = Xdf['opened_by'].replace(to_replace='?', value=np.nan)\n # impute null values with mode\n Xdf['opened_by'] = Xdf['opened_by'].fillna(value=Xdf['opened_by'].mode()[0])\n \n return Xdf \n\n# 2. Function to encode the urgency and priority columns\ndef categorical_encoder1(Xdf):\n # scale:\n urgency_scale = {'1 - High':1, '2 - Medium':2, '3 - Low':3}\n priority_scale = {'1 - Critical':1, '2 - High':2, '3 - Moderate':3, '4 - Low':4}\n \n Xdf['urgency'] = Xdf['urgency'].map(urgency_scale)\n Xdf['priority'] = Xdf['priority'].map(priority_scale)\n \n return Xdf\n\n# 3. Function to ordinal encode high cardinality columns\ndef categorical_encoder2(Xdf):\n \n Xdf['number'] = Xdf['number'].apply(lambda x: x.strip('INC')).astype('int')\n Xdf['opened_by'] = Xdf['opened_by'].apply(lambda x: x.strip('Opened by ')).astype('int')\n\n return Xdf\n\n# 4.Final data preprocessing function that includes the above preprocessing steps.\ndef data_preprocessor(Xdf):\n Xdf = null_value_handler(Xdf)\n Xdf = categorical_encoder1(Xdf)\n Xdf = categorical_encoder2(Xdf) \n \n return Xdf\n\n# 5. Function to encode the labels of the target column.\ndef out_label_encoder(ydf):\n impact_scale = {'1 - High':1, '2 - Medium':2, '3 - Low':3}\n ydf = ydf.map(impact_scale)\n return ydf \n \n# Preprocessing input data(initial).\n# There is no information leakage in the above preprocessing steps.\n# Mode, when taken either separately for train and test or for full dataset\n# is the same if the test size is 20% or 0.2 (from observations).\n\nX_prep = data_preprocessor(X.copy())\ny_prep = out_label_encoder(y.copy())\n\n# Train test split\nX_train, X_test, y_train, y_test = train_test_split(X_prep, y_prep, test_size=0.2, stratify=y, random_state=42)\n\n# Constructing the model\ntransformer = Pipeline(steps=[\n ('scaler', MinMaxScaler())])\n\npreprocessor = ColumnTransformer(transformers=[\n ('cat_trf', transformer, sel_features)])\n\nrf_clf = RandomForestClassifier(random_state=42)\n\nclf = Pipeline(steps=[\n ('preprocessor', preprocessor),\n ('classifier', rf_clf)\n ])\n\n# Function to initialize, train and give predictions \ndef clf_model(Xtrain, Xtest, ytrain, ytest, classifier):\n \"\"\" Fits the model and generates predictions, returns\n the predictions for train and test set \n along with fitted model. \n input\n -----\n Xtrain, Xtest, ytrain, ytest, classifier\n \n output\n ------\n ypred_train, ypred_test, clf\n \"\"\"\n clf = classifier\n clf.fit(Xtrain, ytrain)\n \n ypred_train = clf.predict(Xtrain)\n ypred_test = clf.predict(Xtest)\n \n # Train data performance\n #display_results(ytrain, ypred_train, clf)\n #display_results(ytest, ypred_test, clf) \n return ypred_train, ypred_test, clf\n\n# Function to evaluate model\ndef display_results(y_test, y_pred, clf):\n \"\"\"Displays model evaluation/performance report that includes\n accuracy_score, confusion_matrix, precision_score, and \n recall_score.\n input\n -----\n y_test, y_pred\n \n output\n ------\n Model evaluation/performance report\"\"\"\n print(classification_report(y_test, y_pred))\n #print(\"Confusion matrix:\\n\", confusion_matrix(y_test, y_pred))\n fig, ax = plt.subplots()\n ConfusionMatrixDisplay.from_predictions(y_test, y_pred, labels=clf.classes_, ax=ax)\n \n# Model training\ny_pred_train, y_pred_test, clf =clf_model(Xtrain=X_train,\n Xtest=X_test,\n ytrain=y_train,\n ytest=y_test,\n classifier=clf)\n\n# Model evaluation: Train data\ndisplay_results(y_train, y_pred_train, clf)\nplt.title('model performance: Train data')\nplt.show()\n\n# Model evaluation: Test data\ndisplay_results(y_test, y_pred_test, clf)\nplt.title('model performance: Test data')\nplt.show()\n\n# generate pickle file of the model\ndump(clf, open('incident_impact_rf.pkl', 'wb'))","repo_name":"niti42/incident_impact_v1.1","sub_path":"incident_impact.py","file_name":"incident_impact.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21897064209","text":"import pickle\nimport copy\nimport numpy as np\nimport argparse\nimport os\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.distributions.multivariate_normal as torchdist\n\nfrom utils.utils import get_device,get_dataloader\nfrom models.task_detector.graph_autoencoder import GraphAutoEncoder\nfrom utils.train_eval import task_test_with_given_expert\nfrom utils.metrics import ade,fde,seq_to_nodes,nodes_rel_to_nodes_abs,closer_to_zero\nfrom models.social_stgcnn import social_stgcnn\nfrom models.social_stgcnn_dem import social_stgcnn_dem\nfrom scenarios.benchmark import get_continual_scenario_benchmark\nfrom launch_utils import set_seeds\n\n# Arguments\nparser = argparse.ArgumentParser(description=\"test autoencoder\")\n\nparser.add_argument('--experiment_name', type=str, default='test_GAE')\nparser.add_argument('--is_demo', type=int, default=0)\nparser.add_argument('--seed', type=int, default=0)\nparser.add_argument('--task_detect', type=int, default=0)\nparser.add_argument('--task_predict', type=int, default=1)\nparser.add_argument('--deterministic', type=int, default=1)\nparser.add_argument('--vis_detect_res', type=int, default=0)\n\n# scenario parameters\nparser.add_argument('--task_seq', type=str, default=\"1-2-3-4\")\nparser.add_argument('--datasets_split_num', type=int, default=1)\nparser.add_argument('--obs_seq_len', type=int, default=20)\nparser.add_argument('--pred_seq_len', type=int, default=40)\nparser.add_argument('--train_start_task', type=int, default=0)\nparser.add_argument('--test_start_task', type=int, default=0)\n\n# Model parameters\nparser.add_argument('--input_size', type=int, default=2)\nparser.add_argument('--output_size', type=int, default=5)\nparser.add_argument('--n_stgcnn', type=int, default=1,help='Number of ST-GCNN layers')\nparser.add_argument('--n_txpcnn', type=int, default=5, help='Number of TXPCNN layers')\nparser.add_argument('--kernel_size', type=int, default=3)\n\n# test parameters\nparser.add_argument('--test_times', type=int, default=1)\nparser.add_argument('--test_case_num', type=int, default=1000)\nparser.add_argument('--task_free', type=int, default=0)\n\nargs = parser.parse_args()\n\nos.environ[\"CUBLAS_WORKSPACE_CONFIG\"] = \":4096:8\"\nif args.deterministic: set_seeds(args.seed)\n\n# Data\nscenarios = get_continual_scenario_benchmark(args)\ntask_num = len(args.task_seq.split('-')) * args.datasets_split_num\n\n# Model\ntask_gaes = []\nfor tid in range(task_num):\n task_gaes.append(GraphAutoEncoder(seq_len=args.obs_seq_len))\n task_gaes[tid].load_state_dict(torch.load(\"./logs/FAE/layer-2-latent-32/checkpoint/GAE_task_{}.pth\".format(tid)))\n task_gaes[tid].to(get_device())\nloss_func = nn.MSELoss()\n\n# Task detect\ndef task_detect_test(model, test_loader):\n all_loss = []\n model.eval()\n for step, case in enumerate(test_loader):\n case = [tensor.to(get_device()) for tensor in case]\n _, _, _, _, _, _,V_obs,A_obs,V_tr,A_tr = case\n V_obs_tmp = V_obs.permute(0,3,1,2)\n ev,dv = model(V_obs_tmp,A_obs.squeeze())\n loss = loss_func(dv,V_obs_tmp) \n all_loss.append(loss.item())\n all_loss = np.array(all_loss)\n return all_loss\n\ntask_detect_res = []\n\nif args.task_detect:\n res = np.zeros((task_num,task_num))\n for tid, test_task in enumerate(scenarios.test_stream):\n tid += args.test_start_task\n test_loader = get_dataloader(test_task,False)\n # get loss for each gae\n for gae_id, gae in enumerate(task_gaes):\n task_loss = task_detect_test(gae, test_loader)\n if gae_id == 0:\n all_gae_loss = np.zeros_like(task_loss)[np.newaxis,:].repeat(task_num,0)\n all_gae_loss[gae_id] = task_loss\n # get gae indices with minimum loss\n max_indices = np.argmin(all_gae_loss,axis=0)\n for i in range(task_num):\n res[tid,i] = np.count_nonzero(max_indices==i)/len(max_indices)\n # x label: task true value, y label: task_detected value, z label: Percentage\n task_detect_res.append(max_indices)\n # display the detect result\n print(\"[Task {}] Total: {}; Detected as [0: {}, 1: {}, 2: {}, 3: {}, 4: {}]\".format(tid,\n len(max_indices),np.count_nonzero(max_indices==0),np.count_nonzero(max_indices==1),\n np.count_nonzero(max_indices==2),np.count_nonzero(max_indices==3),np.count_nonzero(max_indices==4))) \n # visualization\n if args.vis_detect_res:\n fig = plt.figure()\n ax = fig.add_subplot(111,projection=\"3d\")\n x,y = np.meshgrid(np.arange(1,task_num+1), np.arange(1,task_num+1))\n x = x.flatten()\n y = y.flatten()\n z = res.flatten()\n \n colors = ['lightblue' if i==j else 'lightcoral' for i,j in zip(x,y)]\n ax.bar3d(x,y,np.zeros(len(z)), dx=0.8, dy=0.8, dz=z, color=colors)\n \n ax.set_xlabel(\"Task label detect value\")\n ax.set_ylabel(\"Task label ground truth\")\n ax.set_zlabel(\"Percentage\")\n \n plt.show()\n\n# test predict models\ndef load_previous_knowledge(model, task_id):\n root_path = \"./logs/DEM/experiment-1\"\n ckpt_fname = root_path+'/checkpoint/checkpoint_task_{}.pth'.format(task_id-1)\n print(f\"Load model in {ckpt_fname}\")\n assert os.path.exists(ckpt_fname)\n checkpoint = torch.load(ckpt_fname)\n \n # load expert selector\n assert \"expert_selector\" in checkpoint.keys()\n model.expert_selector = checkpoint['expert_selector']\n prev_expert_num = len(checkpoint['expert_selector'])\n \n # load model state dict only when expert number equals to column num \n if prev_expert_num == len(model.columns):\n model.load_state_dict(checkpoint['model_state_dict'])\n assert \"batch_norm_para\" in checkpoint.keys()\n model.batch_norm_para = copy.deepcopy(checkpoint[\"batch_norm_para\"])\n model.freeze_columns()\n model.load_batch_norm_para(task_id-1)\n return model\n\ndef load_best_model(model, task_id):\n root_path = \"./logs/DEM/experiment-1\"\n ckpt_fname = root_path+'/checkpoint/checkpoint_task_{}.pth'.format(task_id)\n assert os.path.exists(ckpt_fname), \"No checkpoint named {}\".format(ckpt_fname)\n checkpoint = torch.load(ckpt_fname)\n model.load_state_dict(checkpoint['model_state_dict'])\n # load batch_norm parameters\n assert \"batch_norm_para\" in checkpoint.keys()\n model.batch_norm_para = checkpoint[\"batch_norm_para\"]\n # load expert_select dict\n assert \"expert_selector\" in checkpoint.keys()\n model.expert_selector = checkpoint[\"expert_selector\"]\n return model\n\nif args.task_predict:\n model = social_stgcnn_dem(n_stgcnn=args.n_stgcnn, n_txpcnn=args.n_txpcnn,\n output_feat=args.output_size, seq_len=args.obs_seq_len,\n kernel_size=args.kernel_size, pred_seq_len=args.pred_seq_len)\n model.set_task_detector(use_task_detector=True)\n # model = load_previous_knowledge(model,task_num-1)\n model = load_previous_knowledge(model,9)\n \n expand_times = len(model.expert_selector) - len(model.columns) + 1\n for t in range(expand_times):\n # if t == expand_times - 1:\n # model = load_previous_knowledge(model, task_num-1)\n model.add_column()\n model = load_best_model(model, 9)\n \n print(\"columns num: {}, task num: {}, GAE num: {}, expert_selector: {}\".format(\n len(model.columns), task_num, len(model.task_gaes), model.expert_selector))\n \n model.set_task_detector(use_task_detector=True)\n for tid in range(task_num):\n model.task_gaes[tid].load_state_dict(torch.load(\"./logs/FAE/layer-2-latent-32/checkpoint/GAE_task_{}.pth\".format(tid)))\n model = model.to(get_device())\n \n if not args.task_free:\n model.set_task_detector(use_task_detector=False)\n for id, task in enumerate(scenarios.test_stream):\n id += args.test_start_task\n if id >= task_num:\n break\n eid = model.select_expert(id)\n ade, fde = task_test_with_given_expert(model,id,expert_id=eid,test_task=task,\n args=args, save_dir=\"./logs/FAE/layer-2-latent-32//evaluation\")\n print(\"[Test] columns num:{}, task_{}, expert_{}, ADE:{:.2f}, fde:{:.2f}\".format(\n len(model.columns), id, eid, ade.mean(), fde.mean()))","repo_name":"lhquixotic/DECIBL","sub_path":"test/test_dem.py","file_name":"test_dem.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3238836976","text":"class Solution:\n def findNumberOfLIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums: return 0\n dp = [(1,1)] * len(nums)\n _max = 1\n for i in range(1, len(nums)):\n for j in range(i):\n if nums[i] > nums[j] and dp[i][0] < dp[j][0] + 1:\n dp[i] = dp[j][0]+1, dp[j][1]\n _max = max(_max, dp[i][0])\n elif nums[i] > nums[j] and dp[i][0] == dp[j][0] + 1:\n dp[i] = dp[i][0], dp[i][1] + dp[j][1]\n ans = 0\n for tu in dp:\n if (tu[0] == _max):\n ans += tu[1]\n return ans","repo_name":"chien-wei/LeetCode","sub_path":"0673_Number_of_Longest_Increasing_Subsequence.py","file_name":"0673_Number_of_Longest_Increasing_Subsequence.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38356404995","text":"import requests\n\nBASE_URL = \"http://seanmcapp.herokuapp.com/broadcast\"\n\n\ndef broadcast_photo(secret_key, chat_id, photo_file, caption=None):\n with open(photo_file.name, \"rb\") as f:\n headers = {\"secretKey\": secret_key}\n files = {\"photo\": f}\n values = {\"caption\": caption, \"chat_id\": chat_id}\n\n requests.post(BASE_URL, files=files, data=values, headers=headers)\n return None\n","repo_name":"redhoyasa/seanmclambda","sub_path":"functions/seanmcapp.py","file_name":"seanmcapp.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"738697175","text":"\n\n\nimport scipy.signal as ssig\nfrom os.path import join\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nbase_folder = r'F:\\Data\\George\\Projects\\SpikeSorting\\Neuroseeker\\Neuroseeker_Chronic_Rat_22.1\\2017_05_26\\13_28_10\\Analysis\\Kilosort'\ndata_folder = r'F:\\Data\\George\\Projects\\SpikeSorting\\Neuroseeker\\Neuroseeker_Chronic_Rat_22.1\\2017_05_26\\13_28_10\\Data'\nbinary_data_filename = join(data_folder, r'2017_05_26T13_28_10_Amp_S16_LP3p5KHz_uV.bin')\npulse_data_trace_filename = r'2017_05_26T13_28_10_Sync_U16.bin'\n\nnumber_of_channels_in_binary_file = 1440\n\nraw_data = np.memmap(binary_data_filename, dtype=np.int16, mode='r')\nnumber_of_timepoints_in_raw = int(raw_data.shape[0] / number_of_channels_in_binary_file)\nraw_data = np.reshape(raw_data, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')\n\npulse_data = np.memmap(join(data_folder, pulse_data_trace_filename), dtype=np.uint16, mode='r')\n\n\ndef plot_both_pulses(pulse_data, pulse_square=None, pulse_freq=None, sampling_frequency = 20000, start_time=0, end_time=3600, step_time=1, time_window=0.5):\n plt.interactive(True)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n start_time = start_time\n end_time = end_time\n step_time = step_time\n time_window = time_window\n num_of_windows = int((end_time - start_time) / step_time)\n\n for win in range(num_of_windows):\n st = start_time + step_time * win\n et = st + time_window\n stp = int(st * sampling_frequency)\n etp = int(et * sampling_frequency)\n t = np.linspace(st, et, etp - stp)\n if pulse_square is None:\n square = ssig.square(2 * np.pi * pulse_freq * (t + 20/sampling_frequency), duty=1.0-0.0434) / 2 + 0.5 + 65278\n else:\n square = pulse_square[stp:etp]\n\n ax.clear()\n ax.plot(t, square, t, pulse_data[stp:etp])\n\n plt.waitforbuttonpress()\n\n\n\ntop_of_pulse_points = np.argwhere(pulse_data==65279)\nbottom_of_pulse_points = np.argwhere(pulse_data==65278)\nstarting_pulse = top_of_pulse_points[0][0]\nend_pulse = top_of_pulse_points[-1][0] - 10\n\n\npulse_freq = 119.607091 # generated by the csv file\nsampling_frequency_corrected = 19998.7 # found so that the traces match\nfull_time = pulse_data.shape[0] / sampling_frequency_corrected\n\nplot_both_pulses(pulse_data, pulse_freq=pulse_freq, sampling_frequency=sampling_frequency_corrected,\n start_time=13, end_time=full_time, step_time=20)\n\n\n\n\n\n\n\nt = np.linspace(0, full_time, pulse_data.shape[0])\n\nsquare = ssig.square(2 * np.pi * pulse_freq * (t + 20/sampling_frequency_corrected), duty=1.0-0.0434) / 2 + 0.5 + 65278\nsquare[:starting_pulse] = 65278\nsquare[end_pulse:] = 65278\n\nplot_both_pulses(pulse_data, pulse_square=square, start_time=13, end_time=full_time+1, step_time=30)\n\nstp = int(10 * sampling_frequency_corrected)\netp = int(14 * sampling_frequency_corrected)\nplt.plot(t[stp:etp], square[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\nstp = int((full_time - 5) * sampling_frequency_corrected)\netp = int((full_time) * sampling_frequency_corrected)\nplt.plot(t[stp:etp], square[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\nnp.save(join(data_folder, r'corrected_camera_ttl_pulses.npy'), square)\nsquare = np.load(join(data_folder, r'corrected_camera_ttl_pulses.npy'))\n\n\n\n\ntransitions = np.diff(square)\nnum_of_pulses = np.sum(transitions==-1)\n\n\n\n\n\n\nframe_times = np.load(join(data_folder, r'frame_times.npy'))\n\ncsv_frames = np.ones(pulse_data.shape[0])*65278\nframe_times_offseted = frame_times + 15.9348\nfor frame_time in frame_times_offseted:\n csv_frames[int(frame_time*sampling_frequency_corrected)] = 65279\n\nstp = int(44 * sampling_frequency_corrected)\netp = int(48 * sampling_frequency_corrected)\nplt.plot(t[stp:etp], csv_frames[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\nstp = int(4255 * sampling_frequency_corrected)\netp = int(full_time * sampling_frequency_corrected)\nplt.plot(t[stp:etp], csv_frames[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\n\nplot_both_pulses(pulse_data, pulse_square=csv_frames, start_time=0, end_time=full_time+1, step_time=2)","repo_name":"georgedimitriadis/themeaningofbrain","sub_path":"ExperimentSpecificCode/_2017_05_Neuroseeker_Chronic_Rat_22.1/2017_05_26/13_28_10/reconstructing_camera_ttl.py","file_name":"reconstructing_camera_ttl.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"73738984515","text":"from abc import ABC, abstractmethod\nimport altair as alt\nimport datetime\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom pyathena.pandas.cursor import PandasCursor\nfrom pathlib import Path\n\nfrom . import util\n\n\nclass AbstractChart(ABC):\n def __init__(self, athena, output_dir, output_formats=frozenset(['json'])):\n self.athena = athena\n self.output_dir = output_dir\n self.output_formats = output_formats\n self.name = type(self).__name__\n\n def __call__(self, bulletin_dates):\n self.render(bulletin_dates)\n return self.__class__.__name__\n\n def render(self, bulletin_dates):\n df = self.fetch_data(self.athena, bulletin_dates)\n logging.info(\"%s dataframe: %s\", self.name, util.describe_frame(df))\n\n logging.info(f'Writing {self.name} charts to {self.output_dir}...')\n for bulletin_date in bulletin_dates:\n self.render_bulletin_date(df, bulletin_date)\n\n def render_bulletin_date(self, df, bulletin_date):\n bulletin_dir = Path(f'{self.output_dir}/{bulletin_date}')\n bulletin_dir.mkdir(exist_ok=True)\n filtered = self.filter_data(df, bulletin_date)\n util.save_chart(self.make_chart(filtered, bulletin_date),\n f\"{bulletin_dir}/{bulletin_date}_{self.name}\",\n self.output_formats)\n\n def save_csv(self, df):\n \"\"\"Utility method to save a CSV file to a standardized location.\"\"\"\n csv_dir = Path(f'{self.output_dir}/csv')\n csv_dir.mkdir(exist_ok=True)\n csv_file = f'{csv_dir}/{self.name}.csv'\n df.to_csv(csv_file, index=False)\n logging.info('Wrote %s', csv_file)\n\n @abstractmethod\n def make_chart(self, df, bulletin_date):\n pass\n\n @abstractmethod\n def fetch_data(self, athena, bulletin_dates):\n pass\n\n def filter_data(self, df, bulletin_date):\n \"\"\"Filter dataframe according to given bulletin_date. May want to override.\"\"\"\n return df.loc[df['bulletin_date'] == pd.to_datetime(bulletin_date)]\n\n\nclass CurrentDeltas(AbstractChart):\n def make_chart(self, df, bulletin_date):\n base = alt.Chart(df).encode(\n x=alt.X('date(datum_date):O',\n title=\"Día del mes\", sort=\"descending\",\n axis=alt.Axis(format='%d')),\n y=alt.Y('yearmonth(datum_date):O',\n title=None, sort=\"descending\",\n axis=alt.Axis(format='%B')),\n tooltip=[alt.Tooltip('datum_date:T', title='Fecha de muestra o muerte'),\n alt.Tooltip('bulletin_date:T', title='Fecha de boletín'),\n alt.Tooltip('value:Q', title='Casos añadidos (o restados)')]\n )\n\n heatmap = base.mark_rect().encode(\n color=alt.Color('value:Q', title=None, legend=None,\n scale=alt.Scale(scheme=\"redgrey\", domainMid=0,\n # WORKAROUND: Set the domain manually to forcibly\n # include zero or else we run into\n # https://github.com/vega/vega-lite/issues/6544\n domain=alt.DomainUnionWith(unionWith=[0])))\n )\n\n text = base.mark_text(fontSize=7).encode(\n text=alt.Text('value:Q'),\n color=util.heatmap_text_color(df, 'value')\n ).transform_filter(\"(datum.value !== 0) & (datum.value !== null)\")\n\n return (heatmap + text).properties(\n width=580, height=200\n ).facet(\n columns=1,\n facet=alt.Facet('variable', title=None,\n sort=['Confirmados',\n 'Probables',\n 'Muertes'])\n )\n\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\nSELECT\n bulletin_date,\n datum_date,\n delta_confirmed_cases AS \"Confirmados\",\n delta_probable_cases AS \"Probables\",\n delta_deaths AS \"Muertes\"\nFROM bulletin_cases\nWHERE %(min_bulletin_date)s <= bulletin_date\nAND bulletin_date <= %(max_bulletin_date)s\"\"\"\n df = util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n return pd.melt(df, [\"bulletin_date\", \"datum_date\"]) \\\n .replace(0, np.nan)\n\n\nclass DailyDeltas(AbstractChart):\n def make_chart(self, df, bulletin_date):\n base = alt.Chart(df).mark_rect().encode(\n x=alt.X('yearmonthdate(datum_date):O',\n title=\"Fecha evento\", sort=\"descending\",\n axis=alt.Axis(format='%d/%m')),\n y=alt.Y('yearmonthdate(bulletin_date):O',\n title=None, sort=\"descending\",\n axis=alt.Axis(format='%d/%m')),\n tooltip=[alt.Tooltip('datum_date:T', title='Fecha de muestra o muerte'),\n alt.Tooltip('bulletin_date:T', title='Fecha de boletín'),\n alt.Tooltip('value:Q', title='Casos añadidos (o restados)')]\n )\n\n heatmap = base.mark_rect().encode(\n color=alt.Color('value:Q', title=None, legend=None,\n scale=alt.Scale(scheme=\"redgrey\", domainMid=0,\n # WORKAROUND: Set the domain manually to forcibly\n # include zero or else we run into\n # https://github.com/vega/vega-lite/issues/6544\n domain=alt.DomainUnionWith(unionWith=[0])))\n )\n\n text = base.mark_text(fontSize=3).encode(\n text=alt.Text('value:Q'),\n color=util.heatmap_text_color(df, 'value')\n )\n\n return (heatmap + text).properties(\n width=585, height=120\n ).facet(\n columns=1,\n facet=alt.Facet('variable', title=None,\n sort=['Confirmados',\n 'Probables',\n 'Muertes'])\n )\n\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\nSELECT\n bulletin_date,\n datum_date,\n delta_confirmed_cases AS \"Confirmados\",\n delta_probable_cases AS \"Probables\",\n delta_deaths AS \"Muertes\"\nFROM bulletin_cases\nWHERE %(min_bulletin_date)s - INTERVAL '14' DAY <= bulletin_date\nAND bulletin_date <= %(max_bulletin_date)s\"\"\"\n df = util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n return pd.melt(df, [\"bulletin_date\", \"datum_date\"])\n\n def filter_data(self, df, bulletin_date):\n since_date = pd.to_datetime(bulletin_date - datetime.timedelta(days=14))\n until_date = pd.to_datetime(bulletin_date)\n filtered = df.loc[(since_date < df['bulletin_date'])\n & (df['bulletin_date'] <= until_date)]\\\n .replace(0, np.nan)\\\n .dropna()\n return filtered\n\n\nclass WeekdayBias(AbstractChart):\n def make_chart(self, df, bulletin_date):\n confirmed = self.one_variable(df, 'Confirmados', 'Día muestra', 'oranges')\n probable = self.one_variable(df, 'Probables', 'Día muestra', 'reds')\n deaths = self.one_variable(df, 'Muertes', 'Día muerte', 'teals')\n\n data_date = alt.Chart(df).mark_text(baseline='middle').encode(\n text=alt.Text('bulletin_date',\n type='temporal',\n aggregate='max',\n timeUnit='yearmonthdate',\n format='Datos hasta: %A %d de %B, %Y'),\n ).properties(\n width=330, height=40\n )\n\n row1 = alt.hconcat(confirmed, probable, spacing=20).resolve_scale(\n color='independent'\n )\n\n return alt.vconcat(row1, data_date, deaths, center=True).resolve_scale(\n color='independent'\n )\n\n def one_variable(self, df, variable,\n axis_title,\n color_scheme):\n base = alt.Chart(df).transform_filter(\n alt.datum.variable == variable\n ).transform_filter(\n alt.datum.value > 0\n ).encode(\n color=alt.Color('sum(value):Q', title=None,\n scale=alt.Scale(type='log', base=2, scheme=color_scheme))\n )\n\n heatmap = base.mark_rect().encode(\n x=alt.X('day(datum_date):O', title=axis_title),\n y=alt.Y('day(bulletin_date):O', title='Día boletín'),\n tooltip=[alt.Tooltip('variable:N', title='Variable'),\n alt.Tooltip('day(bulletin_date):O', title='Día de boletín'),\n alt.Tooltip('day(datum_date):O', title='Día de muestra o muerte'),\n alt.Tooltip('value:Q', aggregate='sum', title='Casos')]\n )\n\n right = base.mark_bar().encode(\n x=alt.X('sum(value):Q', title=None, axis=None),\n y=alt.Y('day(bulletin_date):O', title=None, axis=None),\n tooltip=[alt.Tooltip('variable:N', title='Variable'),\n alt.Tooltip('day(bulletin_date):O', title='Día de boletín'),\n alt.Tooltip('value:Q', aggregate='sum', title='Casos')]\n )\n\n top = base.mark_bar().encode(\n x=alt.X('day(datum_date):O', title=None, axis=None),\n y=alt.Y('sum(value):Q', title=None, axis=None),\n tooltip=[alt.Tooltip('variable:N', title='Variable'),\n alt.Tooltip('day(datum_date):O', title='Día de muestra o muerte'),\n alt.Tooltip('value:Q', aggregate='sum', title='Casos')]\n )\n\n heatmap_size = 150\n histogram_size = 40\n return alt.vconcat(\n top.properties(\n width=heatmap_size, height=histogram_size,\n # This title should logically belong to the whole chart,\n # but assigning it to the concat chart anchors it wrong.\n # See: https://altair-viz.github.io/user_guide/generated/core/altair.TitleParams.html\n title=alt.TitleParams(\n text=variable,\n anchor='middle',\n align='center',\n fontSize=14,\n fontWeight='normal'\n )\n ),\n alt.hconcat(\n heatmap.properties(\n width=heatmap_size, height=heatmap_size\n ),\n right.properties(\n width=histogram_size, height=heatmap_size\n ),\n spacing=3),\n spacing=3\n )\n\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\nSELECT\n bulletin_date,\n datum_date,\n delta_confirmed_cases AS \"Confirmados\",\n delta_probable_cases AS \"Probables\",\n delta_deaths AS \"Muertes\"\nFROM weekday_bias\nWHERE %(min_bulletin_date)s - INTERVAL '22' DAY <= bulletin_date\nAND bulletin_date <= %(max_bulletin_date)s\"\"\"\n df = util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n return pd.melt(df, ['bulletin_date', 'datum_date']).dropna()\n\n def filter_data(self, df, bulletin_date):\n # We exclude the current bulletin_date because this chart's\n # main use is to compare the current bulletin's data to trends\n # established **before** it.\n cutoff_date = bulletin_date - datetime.timedelta(days=1)\n since_date = pd.to_datetime(cutoff_date - datetime.timedelta(days=21))\n until_date = pd.to_datetime(cutoff_date)\n return df.loc[(since_date < df['bulletin_date'])\n & (df['bulletin_date'] <= until_date)]\n\n\nclass Municipal(AbstractChart):\n def make_chart(self, df, bulletin_date):\n WIDTH = 525\n return alt.Chart(df).transform_calculate(\n new_cases_1m='1e6 * datum.new_cases / datum.pop2020'\n ).transform_impute(\n impute='new_cases_1m',\n groupby=['bulletin_date', 'municipality'],\n key='sample_date',\n value=0\n ).transform_window(\n groupby=['bulletin_date', 'municipality'],\n sort=[{'field': 'sample_date'}],\n frame=[-6, 0],\n mean_cases='mean(new_cases)',\n mean_cases_1m='mean(new_cases_1m)'\n ).transform_window(\n groupby=['bulletin_date', 'municipality'],\n sort=[{'field': 'sample_date'}],\n frame=[-20, 0],\n mean_cases_1m_21day='sum(new_cases_1m)'\n ).transform_window(\n groupby=['bulletin_date', 'municipality'],\n sort=[{'field': 'sample_date'}],\n frame=[None, None],\n order_value='last_value(mean_cases_1m_21day)'\n ).transform_filter(\n alt.datum.sample_date >= util.altair_date_expr(bulletin_date - datetime.timedelta(days=84))\n ).mark_rect().encode(\n x=alt.X('sample_date:T', timeUnit='yearmonthdate', title='Fecha de muestra',\n axis=alt.Axis(format='%-d/%-m', labelFontSize=10,\n labelBound=True, labelAlign='right', labelOffset=4)),\n y=alt.Y('municipality:N', title=None,\n axis=alt.Axis(tickBand='extent', labelFontSize=10),\n sort=alt.Sort(op='sum', field='order_value', order='descending')),\n color=alt.Color('mean_cases_1m:Q', title='Casos diarios por millón',\n scale=alt.Scale(scheme='spectral', reverse=True, type='symlog', constant=25),\n legend=alt.Legend(orient='top', gradientLength=WIDTH,\n labelOverlap='greedy', labelSeparation=5,\n values=[10, 25, 50, 100, 250, 500, 1000])),\n tooltip=[alt.Tooltip('bulletin_date:T', title='Datos hasta'),\n alt.Tooltip('sample_date:T', title='Fecha de muestra'),\n alt.Tooltip('municipality:N', title='Municipio'),\n alt.Tooltip('pop2020:Q', format=',d', title='Population'),\n alt.Tooltip('new_cases:Q', format=',d', title='Casos crudos'),\n alt.Tooltip('mean_cases:Q', format=',.1f', title='Casos diarios (prom. 7)'),\n alt.Tooltip('mean_cases_1m:Q', format=',d', title='Casos por millón (prom. 7)')]\n ).properties(\n width=WIDTH\n ).facet(\n columns=1,\n row=alt.Row('region:N', title=None,\n header=alt.Header(orient='right'),\n # TODO: compute and use a regional population weighted mean for sorting\n sort=alt.Sort(op='mean', field='order_value', order='descending'))\n ).resolve_scale(\n x='independent', y='independent'\n )\n\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\nSELECT\n bulletin_date,\n sample_date,\n region,\n municipality,\n pop2020,\n new_cases\nFROM cases_municipal_agg\nWHERE %(min_bulletin_date)s - INTERVAL '97' DAY <= sample_date\nAND sample_date <= %(max_bulletin_date)s\"\"\"\n return util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n\n\nclass MunicipalMap(AbstractChart):\n WIDTH = 600\n\n def make_chart(self, df, bulletin_date):\n new_cases = self.make_cases_chart(df)\n growth = self.make_trend_chart(df)\n return alt.vconcat(new_cases, growth).configure_view(\n strokeWidth=0\n ).configure_concat(\n spacing=40\n ).resolve_scale(\n color='independent'\n )\n\n def make_cases_chart(self, df):\n return self.make_subchart(\n df,\n alt.Color('daily_cases_100k', type='quantitative',\n scale=alt.Scale(type='sqrt', scheme='redgrey', reverse=True,\n clamp=True, domainMid=0,\n # WORKAROUND: Set the domain manually to forcibly\n # include zero or else we run into\n # https://github.com/vega/vega-lite/issues/6544\n #\n # Also, we union with 40 so that the scale always\n # goes at least that high, but if values exceed\n # that then we let the data determine the top\n # of the domain.\n domain=alt.DomainUnionWith(unionWith=[0, 40])),\n legend=alt.Legend(orient='top', titleLimit=400, titleOrient='top',\n title='Casos diarios (por 100k de población, promedio 7 días)',\n offset=-15, labelSeparation=10,\n format=',~r', gradientLength=self.WIDTH)))\n\n def make_trend_chart(self, df):\n return self.make_subchart(\n df,\n alt.Color('trend:Q', type='quantitative', sort='descending',\n scale=alt.Scale(type='symlog', scheme='redgrey',\n domainMid=0.0, clamp=True,\n domain=alt.DomainUnionWith(unionWith=[-1.0, 10.0])),\n legend=alt.Legend(orient='top', titleLimit=400, titleOrient='top',\n title='Cambio (7 días más recientes vs. 7 anteriores)',\n offset=-15, labelSeparation=10,\n format='+,.0%', gradientLength=self.WIDTH)))\n\n\n def make_subchart(self, df, color):\n return alt.Chart(df).transform_lookup(\n lookup='municipality',\n from_=alt.LookupData(self.geography(), 'properties.NAME', ['type', 'geometry'])\n ).transform_calculate(\n daily_cases=alt.datum.new_7day_cases / 7.0,\n daily_cases_100k=((alt.datum.new_7day_cases * 1e5) / alt.datum.pop2020) / 7.0,\n trend='(datum.new_7day_cases / if(datum.previous_7day_cases == 0, 1, datum.previous_7day_cases)) - 1.0'\n ).mark_geoshape(stroke='black', strokeWidth=0.25).encode(\n color=color,\n tooltip=[alt.Tooltip(field='bulletin_date', type='temporal', title='Fecha de boletín'),\n alt.Tooltip(field='municipality', type='nominal', title='Municipio'),\n alt.Tooltip(field='pop2020', type='quantitative', format=',d', title='Población'),\n alt.Tooltip(field='daily_cases', type='quantitative', format=',.1f',\n title='Casos (prom. 7 días)'),\n alt.Tooltip(field='daily_cases_100k', type='quantitative', format=',.1f',\n title='Casos/100k (prom. 7 días)'),\n alt.Tooltip(field='trend', type='quantitative', format='+,.0%', title='Cambio')]\n ).properties(\n width=self.WIDTH,\n height=250\n )\n\n\n def geography(self):\n return alt.InlineData(values=util.get_geojson_resource('municipalities.topojson'),\n format=alt.TopoDataFormat(type='topojson', feature='municipalities'))\n\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\nSELECT\n bulletin_date,\n municipality,\n pop2020,\n new_7day_cases,\n previous_7day_cases\nFROM municipal_map\nWHERE %(min_bulletin_date)s <= bulletin_date\nAND bulletin_date <= %(max_bulletin_date)s\"\"\"\n return util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n\n def filter_data(self, df, bulletin_date):\n return df.loc[df['bulletin_date'] == pd.to_datetime(bulletin_date)]\n\n\nclass LatenessTiers(AbstractChart):\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\nSELECT\n bulletin_date,\n tier,\n tier_order,\n count\nFROM lateness_tiers\nWHERE bulletin_date <= %(max_bulletin_date)s\"\"\"\n return util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n\n def filter_data(self, df, bulletin_date):\n return df.loc[df['bulletin_date'] <= pd.to_datetime(bulletin_date)]\n\n def make_chart(self, df, bulletin_date):\n max_y = df.groupby(['bulletin_date'])['count'].sum()\\\n .rolling(7).mean().max()\n\n base = alt.Chart(df).transform_joinaggregate(\n groupby=['bulletin_date'],\n total='sum(count)'\n ).transform_window(\n groupby=['tier'],\n sort=[{'field': 'bulletin_date'}],\n frame=[-6, 0],\n mean_count='mean(count)',\n mean_total='mean(total)'\n ).transform_calculate(\n percent=alt.datum.count / alt.datum.total,\n mean_percent=alt.datum.mean_count / alt.datum.mean_total\n ).mark_area(opacity=0.85).encode(\n color=alt.Color('tier:N', title='Renglón (días)',\n legend=alt.Legend(orient='top', titleOrient='left', offset=5),\n scale=alt.Scale(scheme='redyellowgreen', reverse=True)),\n order=alt.Order('tier_order:O'),\n tooltip=[alt.Tooltip('bulletin_date:T', title='Fecha de boletín'),\n alt.Tooltip('tier:N', title='Renglón'),\n alt.Tooltip('count:Q', format=\",d\", title='Casos en renglón (crudo)'),\n alt.Tooltip('total:Q', format=\",d\", title='Casos total (crudo)'),\n alt.Tooltip('mean_count:Q', format=\".1f\", title='Casos en renglón (promedio 7)'),\n alt.Tooltip('mean_total:Q', format=\".1f\", title='Casos total (promedio 7)'),\n alt.Tooltip('mean_percent:Q', format=\".1%\", title='% de total (promedio 7)')]\n )\n\n absolute = base.encode(\n x=alt.X('bulletin_date:T', title=None, axis=alt.Axis(ticks=False, labels=False)),\n y=alt.Y('mean_count:Q', title='Casos confirmados (promedio 7 días)',\n scale=alt.Scale(domain=[0, max_y]),\n axis=alt.Axis(labelExpr=\"if(datum.value > 0, datum.label, '')\")),\n ).properties(\n width=575, height=275\n )\n\n normalized = base.encode(\n x=alt.X('bulletin_date:T', timeUnit='yearmonthdate', title='Fecha de boletín',\n axis=alt.Axis(\n labelExpr=\"[timeFormat(datum.value, '%b'),\"\n \" timeFormat(datum.value, '%m') == '01'\"\n \" ? timeFormat(datum.value, '%Y')\"\n \" : '']\")),\n y=alt.Y('mean_count:Q', stack='normalize', title='% renglón',\n axis=alt.Axis(format='%', labelExpr=\"if(datum.value < 1.0, datum.label, '')\"))\n ).properties(\n width=575, height=75\n )\n\n return alt.vconcat(absolute, normalized, spacing=5)\n\n\nclass RecentGenomicSurveillance(AbstractChart):\n def fetch_data(self, athena, bulletin_dates):\n query = \"\"\"\n SELECT\n bulletin_date,\n since,\n until,\n category AS variant,\n category_order,\n count\n FROM recent_genomic_surveillance\n WHERE bulletin_date <= %(max_bulletin_date)s\"\"\"\n return util.execute_pandas(athena, query, {\n 'min_bulletin_date': min(bulletin_dates),\n 'max_bulletin_date': max(bulletin_dates)\n })\n\n def filter_data(self, df, bulletin_date):\n return df.loc[df['bulletin_date'] == pd.to_datetime(bulletin_date)]\n\n def make_chart(self, df, bulletin_date):\n sinces = df['since'].unique()[1:]\\\n .strftime('%Y-%m-%dT00:00:00').tolist()\n\n base = alt.Chart(df).transform_joinaggregate(\n max_since='max(since)'\n ).transform_calculate(\n opacity=\"if(datum.since == datum.max_since, 0.4, 0.8)\"\n ).encode(\n # `scale=None` means that the data encodes the opacity values directly\n opacity=alt.Opacity('opacity:Q', scale=None, legend=None)\n )\n\n percentages = base.transform_calculate(\n variant=\"if(datum.variant == null, 'Otra', datum.variant)\",\n category_order=\"if(datum.category_order == null, -1, datum.category_order)\"\n ).transform_joinaggregate(\n weekly_count='sum(count)',\n groupby=['since']\n ).transform_calculate(\n fraction='datum.count / datum.weekly_count'\n ).transform_stack(\n stack='count',\n offset='normalize',\n as_=['y', 'y2'],\n groupby=['since'],\n sort=[alt.SortField('category_order')]\n ).mark_rect(stroke='white', strokeWidth=0.5, strokeDash=[2]).encode(\n # Weekly bar charts in Vega-Lite turn out to be a bit hellish, because it only supports\n # Sunday-based weeks whereas Trino only does Monday-based. So we do ranged rectangles.\n x=alt.X('since:T', title='Fecha de muestra', axis=None),\n x2=alt.X2('until:T'),\n y=alt.Y('y:Q', title='Porcentaje', axis=alt.Axis(format='%')),\n y2=alt.Y2('y2:Q'),\n color=alt.Color('variant:N', title='Variante',\n legend=alt.Legend(orient='top', columns=6, symbolOpacity=0.9)),\n tooltip=[\n alt.Tooltip('since:T', title='Muestras desde'),\n alt.Tooltip('until:T', title='Muestras hasta'),\n alt.Tooltip('variant:N', title='Variante'),\n alt.Tooltip('count:Q', format=\",d\", title='Secuencias'),\n alt.Tooltip('fraction:Q', format='.2p', title=\"Porcentaje\")\n ]\n ).properties(\n width=575, height=250\n )\n\n volumes = base.transform_aggregate(\n groupby=['bulletin_date', 'since', 'until'],\n sum_count='sum(count)',\n opacity='min(opacity)'\n ).mark_rect(color='gray', stroke='white', strokeWidth=0.5, strokeDash=[2]).encode(\n x=alt.X('since:T', title='Fecha de muestra',\n axis=alt.Axis(format='%d/%m', labelAngle=90, values=sinces)),\n x2=alt.X2('until:T'),\n y=alt.Y('sum_count:Q', title='Volumen'),\n tooltip = [\n alt.Tooltip('since:T', title='Desde'),\n alt.Tooltip('until:T', title='Hasta'),\n alt.Tooltip('sum_count:Q', format=\",d\", title='Volumen'),\n ]\n ).properties(\n width=575, height=100\n )\n\n return alt.vconcat(percentages, volumes)","repo_name":"sacundim/covid-19-puerto-rico","sub_path":"website/src/covid_19_puerto_rico/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":27117,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"21425918836","text":"import pygame\n\nfrom simulozza.data_file import data_file\n\n\nclass HealthIcon(pygame.sprite.Sprite):\n image_good = pygame.image.load(data_file('hud_heartFull.png'))\n image_bad = pygame.image.load(data_file('hud_heartEmpty.png'))\n\n def __init__(self, num, *groups):\n super().__init__(*groups)\n self.num = num\n self.image = self.image_good\n w = self.image.get_size()[0]\n self.rect = pygame.rect.Rect((num * w, 0), self.image.get_size())\n\n def update(self, dt, game):\n if game.player.lives > self.num:\n self.image = self.image_good\n else:\n self.image = self.image_bad\n","repo_name":"SimuLozza/simulozza","sub_path":"simulozza/health.py","file_name":"health.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7621809865","text":"import sys\n\nsys.path.append(\"..\")\n\nimport z3\nimport src.utils.utils as utils\n\n\ndef RTAS2018(DATA,\n TOPO,\n NUM_FLOW,\n INS=-1,\n OUTPUT=\"./\",\n workers=1,\n NUM_WINDOW=5):\n try:\n run_time = 0\n net, net_attr, link_in, link_out, link_to_index, index_to_link, es_set, sw_set, rate = utils.read_network(\n TOPO, utils.t_slot)\n task_attr, LCM = utils.read_task(DATA, utils.t_slot, net, rate)\n\n z3.set_param('parallel.enable', True)\n z3.set_param('parallel.threads.max', workers)\n\n s = z3.Solver()\n\n net_var = {}\n for link in net_attr:\n net_var.setdefault(link, {})\n net_attr[link]['W'] = NUM_WINDOW\n net_var[link]['phi'] = z3.Array(link + '_' + 'phi', z3.IntSort(),\n z3.IntSort())\n net_var[link]['tau'] = z3.Array(link + '_' + 'tau', z3.IntSort(),\n z3.IntSort())\n net_var[link]['k'] = z3.Array(link + '_' + 'k', z3.IntSort(),\n z3.IntSort())\n\n task_var = {}\n\n for i in task_attr:\n task_var.setdefault(i, {})\n route = task_attr[i]['s_path']\n for _i, a in enumerate(route[:-1]):\n link = str((a, route[_i + 1]))\n task_var[i].setdefault(link, {})\n task_var[i][link] = []\n for j in range(int(LCM / task_attr[i]['period'])):\n task_var[i][link].append(\n z3.Int('w_' + str(i) + '_' + str(link) + '_' + str(j)))\n\n for link in net_var:\n for k in range(net_attr[link]['W']):\n net_var[link]['tau'] = z3.Store(net_var[link]['tau'], k,\n net_var[link]['phi'][k])\n\n for i in task_var:\n for link in task_var[i]:\n for j in task_var[i][link]:\n net_var[link]['tau'] = z3.Store(\n net_var[link]['tau'], j,\n net_var[link]['tau'][j] + task_attr[i]['t_trans'])\n\n for link in net_var:\n s.add(net_var[link]['phi'][0] >= 0, net_var[link]['tau'][-1] < LCM)\n\n for link in net_var:\n for k in range(net_attr[link]['W']):\n s.add(net_var[link]['k'][k] >= 0,\n net_var[link]['k'][k] < net_attr[link]['q_num'])\n\n for i in task_var:\n for link in task_var[i]:\n for j in range(int(LCM / task_attr[i]['period'])):\n s.add(\n net_var[link]['phi'][task_var[i][link][j]] >=\n j * task_attr[i]['period'],\n net_var[link]['tau'][task_var[i][link][j]] <\n (j + 1) * task_attr[i]['period'])\n\n for link in net_var:\n for i in range(net_attr[link]['W'] - 1):\n s.add(net_var[link]['tau'][i] <= net_var[link]['phi'][i + 1])\n\n for i in task_var:\n for link in task_var[i]:\n for j in task_var[i][link]:\n s.add(0 <= j, j < net_attr[link]['W'])\n\n for i in task_var:\n hops = list(task_var[i].keys())\n for k, link in enumerate(hops[:-1]):\n for j in range(int(LCM / task_attr[i]['period'])):\n s.add(net_var[link]['tau'][task_var[i][link][j]] +\n net_attr[link]['t_proc'] + utils.delta <= net_var[\n hops[k + 1]]['phi'][task_var[i][hops[k + 1]][j]])\n\n for i, j in [(i, j) for i in task_var for j in task_var if i < j]:\n path_i = list(task_var[i].keys())\n path_j = list(task_var[j].keys())\n for x_a, y_a, a_b in [(path_i[_x - 1], path_j[_y - 1], i_a_b)\n for _x, i_a_b in enumerate(path_i)\n for _y, j_a_b in enumerate(path_j)\n if i_a_b == j_a_b]:\n for k, l in [(k, l)\n for k in range(int(LCM / task_attr[i]['period']))\n for l in range(int(LCM / task_attr[j]['period']))\n ]:\n s.add(\n z3.Or(\n net_var[a_b]['tau'][task_var[i][a_b][k]] +\n net_attr[y_a]['t_proc'] + utils.delta <\n net_var[y_a]['phi'][task_var[j][y_a][l]],\n net_var[a_b]['tau'][task_var[j][a_b][l]] +\n net_attr[x_a]['t_proc'] + utils.delta <\n net_var[x_a]['phi'][task_var[i][x_a][k]],\n net_var[a_b]['k'][task_var[i][a_b][k]] !=\n net_var[a_b]['k'][task_var[j][a_b][l]],\n task_var[i][a_b][k] == task_var[j][a_b][l]))\n\n for i in task_var:\n _hop_s = list(task_var[i].keys())[0]\n _hop_e = list(task_var[i].keys())[-1]\n for j in range(int(LCM / task_attr[i]['period'])):\n s.add(net_var[_hop_e]['tau'][task_var[i][_hop_e][j]] -\n net_var[_hop_s]['phi'][task_var[i][_hop_s][j]] <=\n task_attr[i]['deadline'] - utils.delta)\n\n if utils.check_time(utils.t_limit):\n return utils.rprint(INS, NUM_FLOW, \"unknown\", 0)\n\n s.set(\"timeout\", int(utils.t_limit - utils.time_log()) * 1000)\n res = s.check()\n info = s.statistics()\n run_time = info.time\n # run_memory = info.max_memory\n\n if res == z3.unsat:\n return utils.rprint(INS, NUM_FLOW, \"infeasible\", run_time)\n elif res == z3.unknown:\n return utils.rprint(INS, NUM_FLOW, \"unknown\", run_time)\n result = s.model()\n\n ## GCL\n GCL = []\n for link in net_var:\n for i in range(net_attr[link]['W']):\n start = result.eval(net_var[link]['phi'][i]).as_long()\n end = result.eval(net_var[link]['tau'][i]).as_long()\n queue = result.eval(net_var[link]['k'][i]).as_long()\n if end > start:\n GCL.append([\n eval(link), queue, start * utils.t_slot,\n end * utils.t_slot, LCM * utils.t_slot\n ])\n\n ## Offset\n OFFSET = []\n for i in task_var:\n link = list(task_var[i].keys())[0]\n for ins_id, ins_window in enumerate(task_var[i][link]):\n offset = result.eval(\n net_var[link]['phi'][ins_window]).as_long()\n OFFSET.append([\n i, ins_id, (task_attr[i]['period'] - offset) * utils.t_slot\n ])\n\n ROUTE = []\n for i in task_attr:\n route = task_attr[i]['s_path']\n for h, v in enumerate(route[:-1]):\n ROUTE.append([i, (v, route[h + 1])])\n\n QUEUE = []\n for i in task_var:\n for link in task_var[i]:\n for ins_id, ins_window in enumerate(task_var[i][link]):\n QUEUE.append([\n i, ins_id, link,\n result.eval(net_var[link]['k'][ins_window]).as_long()\n ])\n\n DELAY = []\n for i in task_var:\n link_start = list(task_var[i].keys())[0]\n link_end = list(task_var[i].keys())[-1]\n for ins_id in range(int(LCM / task_attr[i]['period'])):\n start_window = task_var[i][link_start][ins_id]\n start = result.eval(\n net_var[link_start]['phi'][start_window]).as_long()\n end_window = task_var[i][link_end][ins_id]\n end = result.eval(\n net_var[link_end]['tau'][end_window]).as_long()\n delay = (end - start) * utils.t_slot\n DELAY.append([i, ins_id, delay])\n\n utils.write_result(utils.myname(), DATA, GCL, OFFSET, ROUTE, QUEUE,\n DELAY, OUTPUT)\n\n return utils.rprint(INS, NUM_FLOW, \"sat\", run_time)\n except KeyboardInterrupt:\n return utils.rprint(INS, NUM_FLOW, \"unknown\", run_time)\n\n\nif __name__ == \"__main__\":\n DATA = \"../../data/utilization/utilization_%s_%s.csv\" % (5, 1)\n TOPO = \"../../data/utilization/utilization_topology.csv\"\n RTAS2018(DATA, TOPO, 10)","repo_name":"ChuanyuXue/tsnkit","sub_path":".raw_models/RTAS2018/RTAS2018.py","file_name":"RTAS2018.py","file_ext":"py","file_size_in_byte":8496,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"39785628826","text":"def check_len_palindromic_from_middle(s, left, right):\n if left > right:\n return 0\n while left >= 0 and right < len(s) and s[left] == s[right]:\n left -= 1\n right += 1\n return right - left - 1 # length of the palindrome substring\n # ( R - 1 ) - ( L + 1 ) + 1 (ex: case \"racecar\" will return R = 7, L = 0)\n # R - 1 - L - 1 + 1\n # R - L - 2 + 1\n # R - L - 1\n\n\ndef longest_palindrome(s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n n = len(s)\n start = 0\n end = 0\n if n <= 1:\n return s\n for i in range(0, n):\n # case that length of palindrome is odd, we check on the same character, ex \"abcba\"\n odd = check_len_palindromic_from_middle(s, i, i)\n # case that length of palindrome is even, we check on 2 characters from the middle, ex \"cbbd\"\n even = check_len_palindromic_from_middle(s, i, i + 1)\n palindrome_len = max(odd, even)\n if palindrome_len > end - start:\n start = i - (palindrome_len - 1) // 2\n end = i + palindrome_len // 2\n palindrome = s[start:(end + 1)]\n return palindrome\n\n\nprint(longest_palindrome(\"cbbd\"))\n","repo_name":"huannguyen2008/leet_code_practicing","sub_path":"Blind 75 LeetCode Questions Python/String/[medium]longest_palindromic_substring.py","file_name":"[medium]longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21143277848","text":"import numpy as np\n\n\ndef weight(yIn, target, threshold, learningRate, nodesValue, weights):\n if yIn < -threshold:\n yIn = -1\n\n elif yIn > threshold:\n yIn = 1\n\n else:\n yIn = 0\n\n if yIn != target:\n weights[-1] += (learningRate * target)\n for i in range(len(nodesValue)):\n weights[i] += target * nodesValue[i]\n\n return weights\n\n\ndef data(inputTable, target, threshold=0.2, learningRate=1):\n print(\"*---------------------------------*\")\n trainingSetLenght = len(inputTable)\n inputNodeLenght = len(inputTable[0])\n print(\"NodesVal:\", trainingSetLenght)\n print(\"sample training:\", inputNodeLenght)\n weights = np.zeros(len(inputTable[0]) + 1)\n print(\"*---------------------------------*\")\n print(\"initW\", weights)\n print(\"*---------------------------------*\")\n\n\n repeat = True\n while repeat:\n\n logicW = [[], [], [], []]\n for i in range(trainingSetLenght):\n\n yIn = weights[-1]\n for j in range(inputNodeLenght):\n yIn += (inputTable[i][j] * weights[j])\n\n a = weight(yIn, target[i], threshold, learningRate, inputTable[i], weights)\n logicW[i].extend(a)\n print(\"Weights: \", logicW)\n print(\"*---------------------------------*\")\n\n count = 0\n for k in range(len(logicW)):\n if round(np.linalg.norm(logicW[0])**2) == round(np.linalg.norm(logicW[k])**2):\n count += 1\n\n if count == trainingSetLenght:\n repeat = False\n\n\n\ndata(\n [\n [1, 1], [1, -1], [-1, 1], [-1, -1]\n ],\n\n [\n 1, -1, -1, -1\n ]\n)\n\n","repo_name":"maryamZare98/neural_network","sub_path":"newPerceptron.py","file_name":"newPerceptron.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18107782325","text":"\"\"\"Tests of fulfillment tasks.\"\"\"\n# pylint: disable=no-value-for-parameter\nfrom unittest import TestCase, mock\nfrom urllib.parse import urljoin\n\nimport ddt\nimport responses\nfrom celery.exceptions import Ignore\nfrom edx_rest_api_client import exceptions\nfrom requests.exceptions import HTTPError\n\n# Ensures that a Celery app is initialized when tests are run.\nfrom ecommerce_worker import celery_app # pylint: disable=unused-import\nfrom ecommerce_worker.fulfillment.v1.tasks import fulfill_order\nfrom ecommerce_worker.utils import get_configuration\n\n\n@ddt.ddt\nclass OrderFulfillmentTaskTests(TestCase):\n \"\"\"\n Tests of the order fulfillment task.\n \"\"\"\n ORDER_NUMBER = 'FAKE-123456'\n API_URL = urljoin(get_configuration('ECOMMERCE_API_ROOT') + '/', f'orders/{ORDER_NUMBER}/fulfill/')\n OAUTH_ACCESS_TOKEN_URL = urljoin(\n get_configuration('BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL') + '/', 'access_token/'\n )\n ACCESS_TOKEN = 'FAKE-access-token'\n\n def setUp(self):\n super().setUp()\n self.mock_access_token_api()\n\n def mock_access_token_api(self, status=200):\n \"\"\"\n Mock POST requests to retrieve an access token for this site's service user.\n \"\"\"\n responses.add(\n responses.POST,\n self.OAUTH_ACCESS_TOKEN_URL,\n status=status,\n json={\n 'access_token': self.ACCESS_TOKEN\n }\n )\n\n @ddt.data(\n 'ECOMMERCE_API_ROOT',\n 'MAX_FULFILLMENT_RETRIES',\n 'BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL',\n 'BACKEND_SERVICE_EDX_OAUTH2_KEY',\n 'BACKEND_SERVICE_EDX_OAUTH2_SECRET'\n )\n def test_requires_configuration(self, setting):\n \"\"\"\n Verify that the task refuses to run without the configuration it requires.\n \"\"\"\n with mock.patch('ecommerce_worker.configuration.test.' + setting, None):\n with self.assertRaises(RuntimeError):\n fulfill_order(self.ORDER_NUMBER)\n\n @responses.activate\n def test_fulfillment_success(self):\n \"\"\"\n Verify that the task exits without an error when fulfillment succeeds.\n \"\"\"\n responses.add(responses.PUT, self.API_URL, status=200, body=\"{}\")\n\n result = fulfill_order.delay(self.ORDER_NUMBER).get()\n self.assertIsNone(result)\n\n # Validate the value of the HTTP Authorization header.\n last_request = responses.calls[-1].request\n token = last_request.headers.get('Authorization').split()[1]\n self.assertEqual(token, self.ACCESS_TOKEN)\n\n @responses.activate\n def test_fulfillment_not_possible(self):\n \"\"\"\n Verify that the task exits without an error when fulfillment is not possible.\n \"\"\"\n responses.add(responses.PUT, self.API_URL, status=406, body=\"{}\")\n\n with self.assertRaises(Ignore):\n fulfill_order(self.ORDER_NUMBER)\n\n @responses.activate\n def test_fulfillment_connection_error(self):\n \"\"\"\n Verify that the task raises an exception when fulfillment fails because of connection error.\n \"\"\"\n responses.add(responses.PUT, self.API_URL, status=404, body=\"{}\")\n\n with self.assertRaises(HTTPError):\n fulfill_order(self.ORDER_NUMBER)\n\n @responses.activate\n def test_fulfillment_connection_error_retry_success(self):\n \"\"\"\n Verify that the task is capable of successfully retrying after connection error.\n \"\"\"\n responses.add(\n responses.Response(responses.PUT, self.API_URL, status=404, body=\"{}\"),\n )\n responses.add(\n responses.Response(responses.PUT, self.API_URL, status=200, body=\"{}\"),\n )\n\n result = fulfill_order.delay(self.ORDER_NUMBER).get()\n self.assertIsNone(result)\n\n @responses.activate\n def test_fulfillment_failure(self):\n \"\"\"\n Verify that the task raises an exception when fulfillment fails.\n \"\"\"\n responses.add(responses.PUT, self.API_URL, status=500, body=\"{}\")\n\n with self.assertRaises(HTTPError):\n fulfill_order.delay(self.ORDER_NUMBER).get()\n\n @responses.activate\n def test_fulfillment_timeout(self):\n \"\"\"\n Verify that the task raises an exception when fulfillment times out.\n \"\"\"\n responses.add(responses.PUT, self.API_URL, status=404, body=exceptions.Timeout())\n\n with self.assertRaises(exceptions.Timeout):\n fulfill_order.delay(self.ORDER_NUMBER).get()\n\n @responses.activate\n def test_fulfillment_retry_success(self):\n \"\"\"\n Verify that the task is capable of successfully retrying after fulfillment failure.\n \"\"\"\n responses.add(\n responses.Response(responses.PUT, self.API_URL, status=500, body=\"{}\"),\n )\n responses.add(\n responses.Response(responses.PUT, self.API_URL, status=200, body=\"{}\"),\n )\n\n result = fulfill_order.delay(self.ORDER_NUMBER).get()\n self.assertIsNone(result)\n\n @ddt.data(\n [True, 'True'],\n [False, 'False']\n )\n @ddt.unpack\n @responses.activate\n def test_email_opt_in_parameter_sent(self, email_opt_in_bool, email_opt_in_str):\n \"\"\"\n Verify that the task correctly adds the email_opt_in parameter to the request.\n \"\"\"\n email_opt_in_api_url = self.API_URL + '?email_opt_in=' + email_opt_in_str\n responses.add(responses.PUT, email_opt_in_api_url, status=200, body=\"{}\")\n\n result = fulfill_order.delay(self.ORDER_NUMBER, email_opt_in=email_opt_in_bool).get()\n self.assertIsNone(result)\n\n last_request = responses.calls[-1].request\n self.assertIn('?email_opt_in=' + email_opt_in_str, last_request.path_url)\n","repo_name":"openedx/ecommerce-worker","sub_path":"ecommerce_worker/fulfillment/v1/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"43267567523","text":"from Data_Structure.list_node import ListNode\n\nclass Palindrome:\n def isPalindrome(self, head: ListNode) -> bool:\n if not head:\n return True\n if not head.next:\n return True\n\n cur: ListNode = head\n stack_1: List = [-1]\n stack_2: List = []\n while cur:\n if cur.val != stack_1[-1]:\n if cur.next and cur.next.val == stack_1[-1]:\n stack_2.append(cur.val)\n cur = cur.next\n continue\n while stack_2:\n stack_1.append(stack_2.pop())\n stack_1.append(cur.val)\n cur = cur.next\n continue\n else:\n temp_val = stack_1.pop()\n for i in range(2):\n stack_2.append(temp_val)\n cur = cur.next\n continue\n\n if len(stack_1) == 1 and stack_1[-1] == -1:\n return True\n else:\n return False\n\n\n# # This is a sample Python script.\n\n# # Press ⌃R to execute it or replace it with your code.\n# # Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n# from String.palindrome import Palindrome\n# from Data_Structure.list_node import ListNode\n\n# if __name__ == '__main__':\n\n# head: ListNode = ListNode(val=1)\n# head.next = ListNode(val=0)\n# head.next.next = ListNode(val=1)\n\n# solution = Palindrome()\n# res = solution.isPalindrome(head)\n# print(res)\n","repo_name":"latree/leetcode","sub_path":"String/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71335696834","text":"\n#기본적인 개념형태\n\nclass Node():\n def __init__(self, data):\n self.data = data\n self.next = None\n\n# 각 노드들 생성\nn1 = Node(1)\nn2 = Node(2)\nn3 = Node(3)\nn4 = Node(4)\n\n#노드 연결\nn1.next = n2\nn2.next = n3\nn3.next = n4\n\n\n\nclass Linkdlist:\n def __init__(self,data):\n self.head = Node(data) #head가 노드의 객체가 된다.\n\n def append(self, data):\n pointer = self.head #head부터 출발\n while pointer.next is not None:\n pointer = pointer.next #None이 아니면 그다음 노드로 진행\n pointer.next = Node(data) #끝에 도달하고 거기에 append\n\n def print_(self):\n pointer = self.head\n while pointer.next is not None:\n print(pointer.data)\n pointer = pointer.next\n print(pointer.data) #앞에서 마지막것은 while문에 들어가지 못하므로 여기에서 마지막것도 출력\n \n def get_data(self, index):\n pointer = self.head\n while index> 0:\n pointer = pointer.next\n index -=1\n return pointer.data\n\n def add_(self,index, data):\n add_node = Node(data)\n pre_index = index-1\n pointer = self.head\n while pre_index>0:\n pointer = pointer.next\n index -=1\n after_node = pointer.next #끊기전에 원래 노드 \n pointer.next = add_node #노드 새로연결\n add_node.next = after_node #새로운노드의 next지정\n\n def delete_(self, index):\n pointer = self.head\n pre_index = index-1 #삭제할 인덱스 직전\n while pre_index>0:\n pointer = pointer.next\n index -= 1\n after_node = pointer.next.next #해당 인덱스에서 두칸(중간이 삭제할 인덱스이므로) 이동\n pointer.next = after_node #next를 해당 노드에 연결\n\n#######노드들 연결 \na = Linkdlist(1)\na.append(2)\na.append(3)\na.append(4)\na.append(5)\na.append(6)\na.print_()\n\n\n######두번째 노드값\na.get_data(1)\n\n#두번째 자리에 새로운 노드 삽입\na.add_(1,'k')\na.print_()\n\n#두번째 노드 삭제\na.delete_(1)\na.print_()\n\n\n\n\n\n","repo_name":"kokoko12334/TIL2","sub_path":"Algorithm/Linkedlist.py","file_name":"Linkedlist.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31687437567","text":"class Solution(object):\n @staticmethod\n def get_min(data):\n if data is None:\n return None\n if len(data)==0:\n return None\n if len(data)==1:\n return data[0]\n l=0\n h=len(data)-1\n mid=0\n while data[l]>=data[h]:\n if h-l==1:\n mid=h\n break\n mid=(l+h)//2\n if data[l]==data[h] and data[l]==data[mid]:\n return Solution.min_in_order(data,l,h)\n if data[mid]<=data[h]:\n h=mid\n elif data[mid]>=data[l]:\n l=mid\n return data[mid]\n @staticmethod\n def min_in_order(data,l,h):\n result=data[l]\n for i in range(l,h+1):\n if data[i]f:\n\tw=min(a,d)\n\tprof+=w*e\n\ta=a-w\n\td=d-w\n\tw=min(b,c,d)\n\tprof+=w*f\n\tb=b-w\n\tc=c-w\n\td=d-w\n\tprint(prof)\nelse:\n\tw=min(b,c,d)\n\tprof+=w*f\n\tb=b-w\n\tc=c-w\n\td=d-w\n\tw=min(a,d)\n\tprof+=w*e\n\ta=a-w\n\td=d-w\n\tprint(prof)\n","repo_name":"adityachaudhary147/py-codes","sub_path":"1271A.py","file_name":"1271A.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39614214024","text":"import logging\nfrom datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import JsonResponse, Http404\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DeleteView, FormView\n\nfrom zds.member.decorator import LoggedWithReadWriteHability\nfrom zds.tutorialv2.forms import ContainerForm, ExtractForm, MoveElementForm\nfrom zds.tutorialv2.mixins import (\n SingleContentFormViewMixin,\n FormWithPreview,\n SingleContentViewMixin,\n SingleContentPostMixin,\n)\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.tutorialv2.utils import (\n search_container_or_404,\n search_extract_or_404,\n try_adopt_new_child,\n TooDeepContainerError,\n)\nfrom zds.utils.misc import is_ajax\n\nlogger = logging.getLogger(__name__)\n\n\nclass CreateContainer(LoggedWithReadWriteHability, SingleContentFormViewMixin, FormWithPreview):\n template_name = \"tutorialv2/create/container.html\"\n form_class = ContainerForm\n content = None\n authorized_for_staff = True # former behaviour\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"container\"] = search_container_or_404(self.versioned_object, self.kwargs)\n context[\"gallery\"] = self.object.gallery\n return context\n\n def render_to_response(self, context, **response_kwargs):\n parent = context[\"container\"]\n if not parent.can_add_container():\n messages.error(self.request, _(\"Vous ne pouvez plus ajouter de conteneur à « {} ».\").format(parent.title))\n return redirect(parent.get_absolute_url())\n\n return super().render_to_response(context, **response_kwargs)\n\n def form_valid(self, form):\n parent = search_container_or_404(self.versioned_object, self.kwargs)\n\n sha = parent.repo_add_container(\n form.cleaned_data[\"title\"],\n form.cleaned_data[\"introduction\"],\n form.cleaned_data[\"conclusion\"],\n form.cleaned_data[\"msg_commit\"],\n )\n\n # then save:\n self.object.sha_draft = sha\n self.object.update_date = datetime.now()\n self.object.save()\n\n self.success_url = parent.children[-1].get_absolute_url()\n\n return super().form_valid(form)\n\n\nclass EditContainer(LoggedWithReadWriteHability, SingleContentFormViewMixin, FormWithPreview):\n template_name = \"tutorialv2/edit/container.html\"\n form_class = ContainerForm\n content = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n if \"preview\" not in self.request.POST:\n container = search_container_or_404(self.versioned_object, self.kwargs)\n context[\"container\"] = container\n context[\"gallery\"] = self.object.gallery\n\n return context\n\n def get_initial(self):\n \"\"\"rewrite function to pre-populate form\"\"\"\n initial = super().get_initial()\n container = search_container_or_404(self.versioned_object, self.kwargs)\n\n initial[\"title\"] = container.title\n initial[\"introduction\"] = container.get_introduction()\n initial[\"conclusion\"] = container.get_conclusion()\n initial[\"container\"] = container\n\n initial[\"last_hash\"] = container.compute_hash()\n\n return initial\n\n def form_valid(self, form, *args, **kwargs):\n container = search_container_or_404(self.versioned_object, self.kwargs)\n\n # check if content has changed:\n current_hash = container.compute_hash()\n if current_hash != form.cleaned_data[\"last_hash\"]:\n data = form.data.copy()\n data[\"last_hash\"] = current_hash\n data[\"introduction\"] = container.get_introduction()\n data[\"conclusion\"] = container.get_conclusion()\n form.data = data\n messages.error(self.request, _(\"Une nouvelle version a été postée avant que vous ne validiez.\"))\n return self.form_invalid(form)\n\n sha = container.repo_update(\n form.cleaned_data[\"title\"],\n form.cleaned_data[\"introduction\"],\n form.cleaned_data[\"conclusion\"],\n form.cleaned_data[\"msg_commit\"],\n update_slug=self.object.public_version is None,\n )\n\n # then save\n self.object.sha_draft = sha\n self.object.update_date = datetime.now()\n self.object.save()\n\n self.success_url = container.get_absolute_url()\n\n return super().form_valid(form)\n\n\nclass CreateExtract(LoggedWithReadWriteHability, SingleContentFormViewMixin, FormWithPreview):\n template_name = \"tutorialv2/create/extract.html\"\n form_class = ExtractForm\n content = None\n authorized_for_staff = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"container\"] = search_container_or_404(self.versioned_object, self.kwargs)\n context[\"gallery\"] = self.object.gallery\n\n return context\n\n def render_to_response(self, context, **response_kwargs):\n parent = context[\"container\"]\n if not parent.can_add_extract():\n messages.error(self.request, _(\"Vous ne pouvez plus ajouter de section à « {} ».\").format(parent.title))\n return redirect(parent.get_absolute_url())\n\n return super().render_to_response(context, **response_kwargs)\n\n def form_valid(self, form):\n parent = search_container_or_404(self.versioned_object, self.kwargs)\n\n sha = parent.repo_add_extract(\n form.cleaned_data[\"title\"], form.cleaned_data[\"text\"], form.cleaned_data[\"msg_commit\"]\n )\n\n # then save\n self.object.sha_draft = sha\n self.object.update_date = datetime.now()\n self.object.save()\n\n self.success_url = parent.children[-1].get_absolute_url()\n\n return super().form_valid(form)\n\n\nclass EditExtract(LoggedWithReadWriteHability, SingleContentFormViewMixin, FormWithPreview):\n template_name = \"tutorialv2/edit/extract.html\"\n form_class = ExtractForm\n content = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"gallery\"] = self.object.gallery\n\n extract = search_extract_or_404(self.versioned_object, self.kwargs)\n context[\"extract\"] = extract\n\n return context\n\n def get_initial(self):\n \"\"\"rewrite function to pre-populate form\"\"\"\n initial = super().get_initial()\n extract = search_extract_or_404(self.versioned_object, self.kwargs)\n\n initial[\"title\"] = extract.title\n initial[\"text\"] = extract.get_text()\n initial[\"extract\"] = extract\n\n initial[\"last_hash\"] = extract.compute_hash()\n\n return initial\n\n def form_valid(self, form):\n extract = search_extract_or_404(self.versioned_object, self.kwargs)\n\n # check if content has changed:\n current_hash = extract.compute_hash()\n if current_hash != form.cleaned_data[\"last_hash\"]:\n data = form.data.copy()\n data[\"last_hash\"] = current_hash\n data[\"text\"] = extract.get_text()\n form.data = data\n messages.error(self.request, _(\"Une nouvelle version a été postée avant que vous ne validiez.\"))\n return self.form_invalid(form)\n\n sha = extract.repo_update(\n form.cleaned_data[\"title\"], form.cleaned_data[\"text\"], form.cleaned_data[\"msg_commit\"]\n )\n\n # then save\n self.object.update(sha_draft=sha, update_date=datetime.now())\n\n self.success_url = extract.get_absolute_url()\n if is_ajax(self.request):\n return JsonResponse(\n {\"result\": \"ok\", \"last_hash\": extract.compute_hash(), \"new_url\": extract.get_edit_url()}\n )\n return super().form_valid(form)\n\n\nclass DeleteContainerOrExtract(LoggedWithReadWriteHability, SingleContentViewMixin, DeleteView):\n model = PublishableContent\n template_name = None\n http_method_names = [\"delete\", \"post\"]\n object = None\n versioned_object = None\n\n def delete(self, request, *args, **kwargs):\n \"\"\"delete any object, either Extract or Container\"\"\"\n self.object = self.get_object()\n self.versioned_object = self.get_versioned_object()\n parent = search_container_or_404(self.versioned_object, self.kwargs)\n\n # find something to delete and delete it\n to_delete = None\n if \"object_slug\" in self.kwargs:\n try:\n to_delete = parent.children_dict[self.kwargs[\"object_slug\"]]\n except KeyError:\n raise Http404(\"Impossible de récupérer le contenu pour le supprimer.\")\n\n sha = to_delete.repo_delete()\n\n # then save\n self.object.update(sha_draft=sha, update_date=datetime.now())\n\n return redirect(parent.get_absolute_url())\n\n\nclass MoveChild(LoginRequiredMixin, SingleContentPostMixin, FormView):\n model = PublishableContent\n form_class = MoveElementForm\n versioned = False\n\n def get(self, request, *args, **kwargs):\n raise PermissionDenied\n\n def form_valid(self, form):\n content = self.get_object()\n versioned = content.load_version()\n base_container_slug = form.data[\"container_slug\"]\n child_slug = form.data[\"child_slug\"]\n\n if not base_container_slug:\n raise Http404(\"Le slug du container de base est vide.\")\n\n if not child_slug:\n raise Http404(\"Le slug du container enfant est vide.\")\n\n if base_container_slug == versioned.slug:\n parent = versioned\n else:\n search_params = {}\n\n if \"first_level_slug\" in form.data and form.data[\"first_level_slug\"]:\n search_params[\"parent_container_slug\"] = form.data[\"first_level_slug\"]\n search_params[\"container_slug\"] = base_container_slug\n else:\n search_params[\"container_slug\"] = base_container_slug\n parent = search_container_or_404(versioned, search_params)\n\n try:\n child = parent.children_dict[child_slug]\n if form.data[\"moving_method\"] == MoveElementForm.MOVE_UP:\n parent.move_child_up(child_slug)\n logger.debug(f\"{child_slug} was moved up in tutorial id:{content.pk}\")\n elif form.data[\"moving_method\"] == MoveElementForm.MOVE_DOWN:\n parent.move_child_down(child_slug)\n logger.debug(f\"{child_slug} was moved down in tutorial id:{content.pk}\")\n elif form.data[\"moving_method\"][0 : len(MoveElementForm.MOVE_AFTER)] == MoveElementForm.MOVE_AFTER:\n target = form.data[\"moving_method\"][len(MoveElementForm.MOVE_AFTER) + 1 :]\n if not parent.has_child_with_path(target):\n if \"/\" not in target:\n target_parent = versioned\n else:\n target_parent = search_container_or_404(versioned, \"/\".join(target.split(\"/\")[:-1]))\n\n if target.split(\"/\")[-1] not in target_parent.children_dict:\n raise Http404(\"La cible n'est pas un enfant du parent.\")\n child = target_parent.children_dict[target.split(\"/\")[-1]]\n try_adopt_new_child(target_parent, parent.children_dict[child_slug])\n # now, I will fix a bug that happens when the slug changes\n # this one cost me so much of my hair\n # and makes me think copy/past are killing kitty cat.\n child_slug = target_parent.children[-1].slug\n parent = target_parent\n parent.move_child_after(child_slug, target.split(\"/\")[-1])\n logger.debug(f\"{child_slug} was moved after {target} in tutorial id:{content.pk}\")\n elif form.data[\"moving_method\"][0 : len(MoveElementForm.MOVE_BEFORE)] == MoveElementForm.MOVE_BEFORE:\n target = form.data[\"moving_method\"][len(MoveElementForm.MOVE_BEFORE) + 1 :]\n if not parent.has_child_with_path(target):\n if \"/\" not in target:\n target_parent = versioned\n else:\n target_parent = search_container_or_404(versioned, \"/\".join(target.split(\"/\")[:-1]))\n\n if target.split(\"/\")[-1] not in target_parent.children_dict:\n raise Http404(\"La cible n'est pas un enfant du parent.\")\n child = target_parent.children_dict[target.split(\"/\")[-1]]\n try_adopt_new_child(target_parent, parent.children_dict[child_slug])\n # now, I will fix a bug that happens when the slug changes\n # this one cost me so much of my hair\n child_slug = target_parent.children[-1].slug\n parent = target_parent\n parent.move_child_before(child_slug, target.split(\"/\")[-1])\n logger.debug(f\"{child_slug} was moved before {target} in tutorial id:{content.pk}\")\n versioned.slug = content.slug # we force not to change slug\n versioned.dump_json()\n parent.repo_update(\n parent.title,\n parent.get_introduction(),\n parent.get_conclusion(),\n _(\"Déplacement de \") + child_slug,\n update_slug=False,\n )\n content.sha_draft = versioned.sha_draft\n content.save()\n messages.info(self.request, _(\"L'élément a bien été déplacé.\"))\n except TooDeepContainerError:\n messages.error(\n self.request,\n _(\"Ce conteneur contient déjà trop d'enfants pour être\" \" inclus dans un autre conteneur.\"),\n )\n except KeyError:\n messages.warning(\n self.request,\n _(\n \"Vous n'avez pas complètement rempli le formulaire,\"\n \"ou bien il est impossible de déplacer cet élément.\"\n ),\n )\n except ValueError as e:\n raise Http404(\"L'arbre spécifié n'est pas valide.\" + str(e))\n except IndexError:\n messages.warning(self.request, _(\"L'élément se situe déjà à la place souhaitée.\"))\n logger.debug(f\"L'élément {child_slug} se situe déjà à la place souhaitée\")\n except TypeError:\n messages.error(self.request, _(\"L'élément ne peut pas être déplacé à cet endroit.\"))\n if base_container_slug == versioned.slug:\n return redirect(reverse(\"content:view\", args=[content.pk, content.slug]))\n else:\n return redirect(child.get_absolute_url())\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/tutorialv2/views/containers_extracts.py","file_name":"containers_extracts.py","file_ext":"py","file_size_in_byte":14997,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"} +{"seq_id":"17136721913","text":"from dataclasses import dataclass\nfrom typing import Dict, Optional, Union\n\nfrom string import Template\n\nfrom promg import Query\nfrom promg.data_managers.semantic_header import ConstructedNodes, ConstructedRelation\n\n\nclass InferDFInteractionsQueryLibrary:\n @staticmethod\n def delete_parallel_directly_follows_derived(entity: Union[ConstructedNodes, ConstructedRelation],\n original_entity: Union[ConstructedNodes, ConstructedRelation],\n event_label: str = 'Event'):\n query_str = '''\n CALL apoc.periodic.iterate(\n 'MATCH (e1:$event_label) -[df:$df_entity]-> (e2:$event_label)\n WHERE (e1) -[:$df_original_entity]-> (e2)\n RETURN df',\n 'WITH df DELETE df\n ',\n {batchSize: $batch_size})\n '''\n\n return Query(query_str=query_str,\n template_string_parameters={\n \"df_entity\": entity.get_df_label(),\n \"df_original_entity\": original_entity.get_df_label(),\n \"event_label\": event_label\n })\n","repo_name":"Ava-S/ekg_bpi_challenges","sub_path":"custom_modules/custom_queries/df_interactions.py","file_name":"df_interactions.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23377890375","text":"import copy\nimport warnings\n\nimport numpy as np\nimport torch\nfrom torchvision.ops.boxes import box_iou, nms\n\nfrom detectron2.config import get_cfg\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.utils.logger import setup_logger\n\nsetup_logger()\n\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\nclass Detectron2:\n def __init__(self, cfg_path, weights_path):\n self.cfg = get_cfg()\n\n if cfg_path is None:\n print(\"Getting default config path for detectron\")\n self.cfg.merge_from_file(\n \"detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"\n )\n else:\n self.cfg.merge_from_file(cfg_path)\n self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.65 # Set min conf threshold\n self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 # NMS Threshold\n\n if weights_path is None:\n print(\"Getting default weights path for detectron \")\n self.cfg.MODEL.WEIGHTS = \"detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl\"\n else:\n self.cfg.MODEL.WEIGHTS = weights_path\n self.cfg.MODEL.DEVICE = \"cuda\"\n self.predictor = DefaultPredictor(self.cfg)\n self.batch_ensemble_thresh = 0.3\n\n def bbox(self, img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n return cmin, rmin, cmax, rmax\n\n def detect(self, im):\n outputs = self.predictor(im)\n boxes = outputs[\"instances\"].pred_boxes.tensor.cpu().numpy()\n classes = outputs[\"instances\"].pred_classes.cpu().numpy()\n scores = outputs[\"instances\"].scores.cpu().numpy()\n\n bbox_xcycwh, cls_conf, cls_ids = [], [], []\n\n for (box, _class, score) in zip(boxes, classes, scores):\n\n if _class >= 0:\n x0, y0, x1, y1 = box\n bbox_xcycwh.append([(x1 + x0) / 2, (y1 + y0) / 2, (x1 - x0), (y1 - y0)])\n cls_conf.append(score)\n cls_ids.append(_class)\n\n return (\n np.array(bbox_xcycwh, dtype=np.float64),\n np.array(cls_conf),\n np.array(cls_ids),\n )\n\n def detect_batch(self, imgs, apply_batch_ensemble=False):\n batch_outputs = self.predictor.predict_batch_images(imgs)\n\n predictions = []\n for outputs in batch_outputs:\n boxes = outputs[\"instances\"].pred_boxes.tensor\n classes = outputs[\"instances\"].pred_classes\n scores = outputs[\"instances\"].scores\n foreground_boxes = torch.where(\n classes >= 0,\n torch.ones(classes.size()).bool().cuda(),\n torch.zeros(classes.size()).bool().cuda(),\n )\n boxes = boxes[foreground_boxes]\n classes = classes[foreground_boxes]\n scores = scores[foreground_boxes]\n\n # predictions.append([f_boxes,scores[foreground_boxes], classes[foreground_boxes]])\n predictions.append([boxes, scores, classes])\n if apply_batch_ensemble:\n threshold = 0.3\n # all_box_status = [np.ones(len(f_p[0]), dtype=bool) for f_p in predictions]\n num_imgs = len(predictions)\n if not num_imgs < 2:\n final_predictions = predictions[0]\n for idx in range(1, num_imgs):\n box1, sc1, conf1 = final_predictions\n box2, sc2, conf2 = predictions[idx]\n\n b_ious = box_iou(box1, box2)\n mask = torch.where(\n b_ious >= threshold,\n torch.ones(b_ious.size()).bool().cuda(),\n torch.zeros(b_ious.size()).bool().cuda(),\n )\n\n b1_mask = mask.any(axis=1)\n b2_mask = mask.any(axis=0)\n b1_boxs = box1[b1_mask]\n b2_boxs = box2[b2_mask]\n final_bbox = torch.cat([b1_boxs, b2_boxs], axis=0)\n\n b1_sc = sc1[b1_mask]\n b2_sc = sc2[b2_mask]\n final_scores = torch.cat([b1_sc, b2_sc], axis=0)\n\n b1_conf = conf1[b1_mask]\n b2_conf = conf2[b2_mask]\n final_conf = torch.cat([b1_conf, b2_conf], axis=0)\n\n final_predictions = [final_bbox, final_scores, final_conf]\n boxs, scores, confs = final_predictions\n b_mask = nms(boxs, scores, iou_threshold=0.5)\n boxs = boxs[b_mask]\n scores = scores[b_mask]\n confs = confs[b_mask]\n predictions = [[boxs, scores, confs]]\n final_preds = []\n for frame_predictions in predictions:\n # From xyxy to xcycwh\n f_boxes, scores, confs = frame_predictions\n f_boxes[:, 0] = (f_boxes[:, 0] + f_boxes[:, 2]) / 2\n f_boxes[:, 1] = (f_boxes[:, 1] + f_boxes[:, 3]) / 2\n f_boxes[:, 2] = (f_boxes[:, 2] - f_boxes[:, 0]) * 2\n f_boxes[:, 3] = (f_boxes[:, 3] - f_boxes[:, 1]) * 2\n f_boxes = f_boxes.cpu().numpy()\n scores = scores.cpu().numpy()\n confs = confs.cpu().numpy()\n final_preds.append([f_boxes, scores, confs])\n return final_preds\n\n\nif __name__ == \"__main__\":\n import cv2\n import time\n import os\n from argparse import ArgumentParser\n\n p = ArgumentParser()\n p.add_argument(\"--vpath\", default=\"../sample_videos/demo_2_40s.mp4\")\n p.add_argument(\"--imp\", default=None)\n p.add_argument(\n \"-wp\",\n \"--weight_path\",\n default=\"/nfs/gpu14_datasets/surveillance_weights/visdrone_t1/model_0111599.pth\",\n )\n p.add_argument(\n \"-cp\",\n \"--cfg_path\",\n default=\"/nfs/gpu14_datasets/surveillance_weights/visdrone_t1/test.yaml\",\n )\n args = p.parse_args()\n v_path = args.vpath\n\n w_p = args.weight_path\n cfg_p = args.cfg_path\n d = Detectron2(cfg_p, w_p)\n\n save_folder = \"/mnt/nfshome1/FRACTAL/vikash.challa/BMC/iff/people_counter/frames\"\n # save_folder = os.path.join(s_r, os.path.basename(v_path).replace(\".mp4\", \"_\") )\n os.system(\"mkdir -p {}\".format(save_folder))\n\n print(v_path)\n cap = cv2.VideoCapture(v_path)\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n try:\n im_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n im_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = 20\n except Exception as e:\n im_width = 1280\n im_height = 786\n fps = 20\n video_output = cv2.VideoWriter(\n \"/data/temp/sample.mp4\", fourcc, fps, (im_width * 2, im_height)\n )\n assert cap.isOpened()\n frame_count = 100000\n\n def get_3_frames():\n ret_frames = []\n for i in range(3):\n f, im = cap.read()\n if f:\n ret_frames.append(im)\n return ret_frames\n\n from util import bbox_cxywh_xywh, draw_bboxes_xywh\n\n count = 0\n while True:\n ims = get_3_frames()\n frame_count += len(ims)\n if len(ims) < 1:\n break\n b_outs_e = d.detect_batch(ims, apply_batch_ensemble=True)\n b_outs = d.detect_batch(ims, apply_batch_ensemble=False)\n\n s_ims = ims\n e_ims = [copy.deepcopy(i) for i in ims]\n\n for idx, each_im_ouputs in enumerate(b_outs):\n bbox_xcycwh, cls_conf, cls_ids = each_im_ouputs\n persons_count = 0\n\n bbox_xywh = []\n if bbox_xcycwh is not None and bbox_xcycwh != []:\n mask = cls_ids == 0\n bbox_xcycwh = bbox_xcycwh[mask]\n bbox_xcycwh[:, 3:] *= 1.2\n cls_conf = cls_conf[mask]\n persons_count = len(bbox_xcycwh)\n bbox_xywh = bbox_cxywh_xywh(bbox_xcycwh)\n if persons_count > 0:\n im = s_ims[idx]\n im = draw_bboxes_xywh(im, [bbox_xywh], None)\n s_ims[idx] = im\n\n eam_im_ouput = b_outs_e[0]\n bbox_xcycwh, cls_conf, cls_ids = eam_im_ouput\n persons_count = 0\n\n bbox_xywh = []\n if bbox_xcycwh is not None and bbox_xcycwh != []:\n mask = cls_ids == 0\n bbox_xcycwh = bbox_xcycwh[mask]\n bbox_xcycwh[:, 3:] *= 1.2\n cls_conf = cls_conf[mask]\n persons_count = len(bbox_xcycwh)\n bbox_xywh = bbox_cxywh_xywh(bbox_xcycwh)\n if persons_count > 0:\n im = e_ims[-1]\n im = draw_bboxes_xywh(im, [bbox_xywh], None)\n e_ims[-1] = im\n\n for i in range(len(e_ims)):\n im = np.concatenate([e_ims[-1], s_ims[i]], axis=1)\n video_output.write(im)\n print(count)\n count += 3\n video_output.release()\n","repo_name":"Asianatix/people_counter","sub_path":"detectron2_detection.py","file_name":"detectron2_detection.py","file_ext":"py","file_size_in_byte":8905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14571282421","text":"from random import randint\n\nron_words = [\n\t'Bayou', 'Exks', 'Ziso', 'Zeco','Thibaut','Bobbo', 'Simon', 'Voetsek',\n\t'Lollo', 'Lucia', 'Haibo', 'Kiki', 'Trevs', 'Danny', 'Makelele'\n]\n\ndef ronnyboy(word):\n\trandom_pos = randint(0, len(ron_words) -1)\n\treturn f'{word} {ron_words[random_pos]}'\n\n\nparagraphs = int(input('How paragraphs of ron ipsum: '))\n\nwith open('ipsum.txt') as ipsum_original:\n\titems = ipsum_original.read().split()\n\n\tfor n in range(paragraphs):\n\t\tron_text = list(map(ronnyboy, items))\n\t\twith open('ron_ipsum.txt', 'a') as ipsum_ron:\n\t\t\tipsum_ron.write(' '.join(ron_text) + '\\n\\n')","repo_name":"ronnynijimbere/basic-intro-python","sub_path":"programms/ipsum_gen.py","file_name":"ipsum_gen.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74333296193","text":"__author__ = \"fitrah.wahyudi.imam@gmail.com\"\n\nimport json\nfrom datetime import datetime\nimport os\nimport sys\nimport logging\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom _cConfig import _ConfigParser, _Common\nfrom _dDAO import _DAO\nfrom _tTools import _Helper\nfrom _nNetwork import _NetworkAccess\nfrom _nNetwork import _SFTPAccess\nfrom _dDevice import _QPROX, _EDC\nfrom time import sleep\n\n\nclass SettlementSignalHandler(QObject):\n __qualname__ = 'SettlementSignalHandler'\n SIGNAL_MANDIRI_SETTLEMENT = pyqtSignal(str)\n\n\nST_SIGNDLER = SettlementSignalHandler()\nLOGGER = logging.getLogger()\nBACKEND_URL = _ConfigParser.get_value('TERMINAL', 'backend^server')\nTID = _ConfigParser.get_value('TERMINAL', 'tid')\nSALT = '|KIOSK'\n# Hardcoded Setting for SMT -----------------------\nSMT_URL = 'https://smt.mdd.co.id:10000/c2c-api/'\nSMT_MID = 'eb03307c9f8e26c02595be6bbf682c0c'\nSMT_TOKEN = 'a34837228839a9af1d5d5d968ad1b885'\n# -------------------------------------------------\n_Common.SMT_CONFIG['url'] = SMT_URL\n_Common.SMT_CONFIG['mid'] = SMT_MID\n_Common.SMT_CONFIG['token'] = SMT_TOKEN\n_Common.SMT_CONFIG['full_url'] = SMT_URL + 'settlement/submit'\n\nHEADER = {\n 'Content-Type': 'application/json',\n}\nFILE_PATH = os.path.join(sys.path[0], '_rRemoteFiles')\n\nBID = _Common.BID\nGLOBAL_SETTLEMENT = []\n\n\ndef store_local_settlement(__param):\n try:\n param = {\n \"sid\": _Helper.get_uuid(),\n \"tid\": TID,\n \"bid\": BID[__param['bank']],\n \"filename\": __param['filename'],\n \"status\": 'TOPUP_PREPAID|OPEN',\n \"amount\": __param['amount'],\n \"row\": __param['row']\n }\n _DAO.insert_settlement(param=param)\n return param['sid']\n except Exception as e:\n LOGGER.warning(str(e))\n return None\n\n\ndef push_settlement_data(__param):\n global GLOBAL_SETTLEMENT\n \"\"\"\n \"bid\": \"1\",\n \"amount\": 999000,\n \"row\": 99,\n \"filename\": \"FILETEST123456789098765432100000006.txt\",\n \"settlement_created_at\": \"2018-11-26 11:00:00\"\n \"\"\"\n __url = SMT_URL + 'settlement/submit'\n if __param is None:\n LOGGER.warning(('push_settlement_data :', 'Missing __param'))\n return False\n __sid = store_local_settlement(__param)\n if __sid is None:\n LOGGER.warning(('push_settlement_data :', '__sid is None'))\n return False\n try:\n __param['mid'] = SMT_MID\n __param['token'] = SMT_TOKEN\n __param['tid'] = 'MDD-VM'+TID\n status, response = _NetworkAccess.post_to_url(url=__url, param=__param)\n # LOGGER.debug(('push_settlement_data :', str(status), str(response)))\n if status == 200 and response['response']['code'] == 200:\n _DAO.update_settlement({'sid': __sid, 'status': 'TOPUP_PREPAID|CLOSED'})\n for settle in GLOBAL_SETTLEMENT:\n settle['key'] = settle['rid']\n _DAO.mark_sync(param=settle, _table='TopUpRecords', _key='rid', _syncFlag=9)\n GLOBAL_SETTLEMENT = []\n return True\n else:\n return False\n except Exception as e:\n LOGGER.warning(('push_settlement_data :', e))\n return False\n\n\ndef upload_settlement_file(filename, local_path, remote_path=None):\n return _SFTPAccess.send_file(filename, local_path=local_path, remote_path=remote_path)\n\n\ndef get_response_settlement(filename, remote_path):\n return _SFTPAccess.get_file(filename, remote_path=remote_path)\n\n\nMANDIRI_LAST_TIMESTAMP = ''\nMANDIRI_LAST_FILENAME = ''\n\n\ndef create_settlement_file(bank='BNI', mode='TOPUP', output_path=None, force=False):\n global GLOBAL_SETTLEMENT, MANDIRI_LAST_TIMESTAMP, MANDIRI_LAST_FILENAME\n if bank == 'BNI' and mode == 'TOPUP':\n try:\n LOGGER.info(('Create Settlement File', bank, mode))\n if output_path is None:\n output_path = FILE_PATH\n settlements = _DAO.get_query_from('TopUpRecords', ' syncFlag=1 AND reportKA=\"N/A\" ')\n GLOBAL_SETTLEMENT = settlements\n if len(settlements) == 0:\n LOGGER.warning(('No Data For Settlement', str(settlements)))\n return False\n _filename = 'TOPMDD_'+_Common.MID_BNI + _Common.TID_BNI + datetime.now().strftime('%Y%m%d%H%M%S')+'.TXT'\n LOGGER.info(('Settlement Filename', bank, mode, _filename))\n _filecontent = ''\n _filecontent2 = ''\n _all_amount = 0\n _header = 'H01' + _Common.MID_BNI + _Common.TID_BNI + '|'\n _filecontent += _header\n _trailer = 'T' + str(len(settlements)).zfill(6) + '00000000'\n for settle in settlements:\n remarks = json.loads(settle['remarks'])\n _all_amount += int(remarks['value']) #Must Be Denom\n _filecontent += ('D' + settle['reportSAM']) + '|'\n # settle['key'] = settle['rid']\n # _DAO.mark_sync(param=settle, _table='TopUpRecords', _key='rid', _syncFlag=9)\n # Copy File Content Here to Update with the new CRC32\n _filecontent2 = _filecontent\n _filecontent += _trailer\n _file_created = os.path.join(output_path, _filename)\n with open(_file_created, 'w+') as f:\n __all_lines1 = _filecontent.split('|')\n for line in __all_lines1:\n if line != __all_lines1[-1]:\n f.write(line+'\\n')\n else:\n f.write(line)\n f.close()\n _crc = _Helper.file2crc32(_file_created)\n if _crc is False:\n LOGGER.warning(('Settlement Filename Failed in CRC', _filename))\n return False\n _filecontent2 += ('T' + str(len(settlements)).zfill(6) + _crc)\n with open(_file_created, 'w+') as f:\n __all_lines2 = _filecontent2.split('|')\n for line in __all_lines2:\n if line != __all_lines2[-1]:\n f.write(line+'\\n')\n else:\n f.write(line)\n f.close()\n _result = {\n 'path_file': _file_created,\n 'filename': _filename,\n 'row': len(settlements),\n 'amount': str(_all_amount),\n 'bank': bank,\n 'bid': BID[bank],\n 'settlement_created_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n # Insert Into DB\n # smid VARCHAR(100) PRIMARY KEY NOT NULL,\n # fileName TEXT,\n # fileContent TEXT,\n # status INT,\n # remarks TEXT,\n _DAO.insert_sam_record({\n 'smid': _Helper.get_uuid(),\n 'fileName': _filename,\n 'fileContent': _filecontent2,\n 'status': 1,\n 'remarks': json.dumps(_result)\n })\n # To Prevent Re-Create The Same File When Failed Push To SMT\n for settle in GLOBAL_SETTLEMENT:\n settle['key'] = settle['rid']\n _DAO.mark_sync(param=settle, _table='TopUpRecords', _key='rid', _syncFlag=9)\n return _result\n except Exception as e:\n LOGGER.warning((bank, mode, str(e)))\n return False\n elif bank == 'MANDIRI' and mode == 'TOPUP':\n try:\n # LOGGER.info(('Create Settlement File', bank, mode))\n if output_path is None:\n output_path = FILE_PATH\n settlements = _DAO.get_query_from('TopUpRecords', ' syncFlag=1 AND reportKA <> \"N/A\" ')\n GLOBAL_SETTLEMENT = settlements\n if len(settlements) == 0 and force is False:\n LOGGER.warning(('No Data For Settlement', bank, mode, str(settlements)))\n return False\n __shift = '0002'\n __seq = '02'\n __timestamp = datetime.now().strftime('%d%m%Y%H%M')\n MANDIRI_LAST_TIMESTAMP = __timestamp\n __raw = _Common.MID_MAN + __shift + _Common.TID_MAN + __seq + (__timestamp * 2) + 'XXXX' + '.txt'\n __ds = _Helper.get_ds(__raw, 4, True)\n _filename = _Common.MID_MAN + __shift + _Common.TID_MAN + __seq + (__timestamp * 2) + __ds + '.txt'\n MANDIRI_LAST_FILENAME = _filename\n LOGGER.info(('Create Settlement Filename', bank, mode, _filename))\n _filecontent = ''\n _all_amount = 0\n x = 0\n for settle in settlements:\n x += 1\n remarks = json.loads(settle['remarks'])\n _all_amount += (int(remarks['value'])-int(remarks['admin_fee']))\n _filecontent += _Helper.reverse_hexdec(settle['reportSAM']) + __shift + str(x).zfill(6) + chr(3) + '|'\n _header = 'PREPAID' + str(len(settlements) + 2).zfill(8) + str(_all_amount).zfill(12) + __shift + \\\n _Common.MID_MAN + datetime.now().strftime('%d%m%Y') + chr(3) + '|'\n _filecontent = _header + _filecontent\n _trailer = _Common.MID_MAN + str(len(settlements)).zfill(8)\n _filecontent += _trailer\n _file_created = os.path.join(output_path, _filename)\n with open(_file_created, 'w+') as f:\n __all_lines = _filecontent.split('|')\n for line in __all_lines:\n if line != __all_lines[-1]:\n f.write(line+'\\n')\n else:\n f.write(line)\n f.close()\n _file_created_ok = os.path.join(output_path, _filename.replace('.txt', '.ok'))\n with open(_file_created_ok, 'w+') as f_ok:\n f_ok.write('')\n f_ok.close()\n _result = {\n 'path_file': _file_created,\n 'filename': _filename,\n 'row': len(settlements),\n 'amount': str(_all_amount),\n 'bank': bank,\n 'bid': BID[bank],\n 'settlement_created_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n _DAO.insert_sam_record({\n 'smid': _Helper.get_uuid(),\n 'fileName': _filename,\n 'fileContent': _filecontent,\n 'status': 1,\n 'remarks': json.dumps(_result)\n })\n for settle in GLOBAL_SETTLEMENT:\n settle['key'] = settle['rid']\n _DAO.mark_sync(param=settle, _table='TopUpRecords', _key='rid', _syncFlag=3)\n return _result\n except Exception as e:\n LOGGER.warning((bank, mode, str(e)))\n return False\n elif bank == 'MANDIRI' and mode == 'KA':\n try:\n # LOGGER.info(('Create Settlement File', bank, mode))\n if output_path is None:\n output_path = FILE_PATH\n settlements = _DAO.get_query_from('TopUpRecords', ' syncFlag=3 AND reportKA <> \"N/A\" ')\n # GLOBAL_SETTLEMENT = settlements\n if len(settlements) == 0 and force is False:\n LOGGER.warning(('No Data For Settlement', bank, mode, str(settlements)))\n return False\n __shift = '0002'\n # __seq = '02'\n # __timestamp = MANDIRI_LAST_TIMESTAMP\n # __ds = __timestamp[-4:]\n # _filename = 'KA' + _Common.MID_MAN + __shift + _Common.TID_MAN + __seq + (__timestamp * 2) + __ds + '.TXT'\n _filename = 'KA' + MANDIRI_LAST_FILENAME\n LOGGER.info(('Create Settlement Filename', bank, mode, _filename))\n _filecontent = ''\n _all_amount = 0\n x = 0\n for settle in settlements:\n x += 1\n remarks = json.loads(settle['remarks'])\n _all_amount += int(remarks['value'])\n _filecontent += settle['reportKA'] + __shift + str(x).zfill(6) + chr(3) + '|'\n _header = 'ADMINCARD' + str(len(settlements) + 2).zfill(8) + str(_all_amount).zfill(12) + __shift + \\\n _Common.MID_MAN + datetime.now().strftime('%d%m%Y') + chr(3) + '|'\n _filecontent = _header + _filecontent\n _trailer = _Common.MID_MAN + str(len(settlements)).zfill(8)\n _filecontent += _trailer\n _file_created = os.path.join(output_path, _filename)\n with open(_file_created, 'w+') as f:\n __all_lines = _filecontent.split('|')\n for line in __all_lines:\n if line != __all_lines[-1]:\n f.write(line+'\\n')\n else:\n f.write(line)\n f.close()\n _file_created_ok = os.path.join(output_path, _filename.replace('.txt', '.ok'))\n with open(_file_created_ok, 'w+') as f_ok:\n f_ok.write('')\n f_ok.close()\n _result = {\n 'path_file': _file_created,\n 'filename': _filename,\n 'row': len(settlements),\n 'amount': str(_all_amount),\n 'bank': bank,\n 'bid': BID[bank],\n 'settlement_created_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n _DAO.insert_sam_record({\n 'smid': _Helper.get_uuid(),\n 'fileName': _filename,\n 'fileContent': _filecontent,\n 'status': 1,\n 'remarks': json.dumps(_result)\n })\n for settle in settlements:\n settle['key'] = settle['rid']\n _DAO.mark_sync(param=settle, _table='TopUpRecords', _key='rid', _syncFlag=9)\n return _result\n except Exception as e:\n LOGGER.warning((bank, mode, str(e)))\n return False\n else:\n LOGGER.warning(('Unknown bank/mode', bank, mode))\n return False\n\n# {\n# 'path_file': _file_created,\n# 'filename': _filename,\n# 'row': len(settlements),\n# 'amount': str(_all_amount),\n# 'bank': bank,\n# 'bid': BID[bank],\n# 'settlement_created_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n# }\n\ndef start_do_bni_topup_settlement():\n bank = 'BNI'\n _Helper.get_pool().apply_async(do_settlement_for, (bank,))\n\n\ndef start_do_mandiri_topup_settlement():\n if int(_Common.MANDIRI_ACTIVE_WALLET) <= int(_Common.MINIMUM_AMOUNT):\n bank = 'MANDIRI'\n _Common.MANDIRI_ACTIVE_WALLET = 0\n _Helper.get_pool().apply_async(do_settlement_for, (bank,))\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|TRIGGERED')\n else:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|NO_REQUIRED')\n\n\ndef start_reset_mandiri_settlement():\n bank = 'MANDIRI'\n _Common.MANDIRI_ACTIVE_WALLET = 0\n force = True\n _Helper.get_pool().apply_async(do_settlement_for, (bank, force,))\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|TRIGGERED')\n\n\ndef start_dummy_mandiri_topup_settlement():\n bank = 'MANDIRI'\n force = True\n _Helper.get_pool().apply_async(do_settlement_for, (bank, force,))\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|TRIGGERED')\n\n\ndef do_settlement_for(bank='BNI', force=False):\n if bank == 'BNI':\n _SFTPAccess.HOST_BID = 2\n if _Helper.is_online(source='bni_settlement') is False:\n return\n # if _SFTPAccess.SFTP is not None:\n # _SFTPAccess.close_sftp()\n # _SFTPAccess.init_sftp()\n # if _SFTPAccess.SFTP is None:\n # LOGGER.warning(('do_settlement_for', bank, 'failed cannot init SFTP'))\n # return\n _param = create_settlement_file(bank=bank)\n if _param is False:\n return\n _push = upload_settlement_file(_param['filename'], _param['path_file'])\n if _push is False:\n return\n return push_settlement_data(_param)\n elif bank == 'MANDIRI':\n _SFTPAccess.HOST_BID = 1\n if _Helper.is_online(source='mandiri_settlement') is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_NO_INTERNET_CONNECTION')\n return\n # _QPROX.auth_ka(_slot=_Common.get_active_sam(bank='MANDIRI', reverse=False), initial=False)\n # if _SFTPAccess.SFTP is not None:\n # _SFTPAccess.close_sftp()\n # _SFTPAccess.init_sftp()\n # if _SFTPAccess.SFTP is None:\n # LOGGER.warning(('do_settlement_for', bank, 'failed cannot init SFTP'))\n # return\n _param_sett = create_settlement_file(bank=bank, mode='TOPUP', force=force)\n if _param_sett is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_CREATE_FILE_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|CREATE_FILE_SETTLEMENT')\n _file_ok = _param_sett['filename'].replace('.TXT', '.OK')\n _push_file_sett = upload_settlement_file(filename=[_param_sett['filename'], _file_ok],\n local_path=_param_sett['path_file'],\n remote_path=_Common.SFTP_MANDIRI['path']+'/Sett_Macin_DEV')\n if _push_file_sett is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_UPLOAD_FILE_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|UPLOAD_FILE_SETTLEMENT')\n _param_ka = create_settlement_file(bank=bank, mode='KA', force=force)\n if _param_ka is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_CREATE_FILE_KA_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|CREATE_FILE_KA_SETTLEMENT')\n _param_ka_ok = _param_ka['filename'].replace('.TXT', '.OK')\n _push_file_kalog = upload_settlement_file(filename=[_param_ka['filename'], _param_ka_ok],\n local_path=_param_ka['path_file'],\n remote_path=_Common.SFTP_MANDIRI['path']+'/Kalog_Macin_DEV')\n if _push_file_kalog is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_UPLOAD_FILE_KA_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|UPLOAD_FILE_KA_SETTLEMENT')\n _rq1 = _QPROX.create_online_info()\n if _rq1 is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_GENERATE_RQ1_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|GENERATE_RQ1_SETTLEMENT')\n _file_rq1 = mandiri_create_rq1(content=_rq1)\n if _file_rq1 is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_CREATE_FILE_RQ1_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|CREATE_FILE_RQ1_SETTLEMENT')\n _push_rq1 = upload_settlement_file(filename=_file_rq1['filename'],\n local_path=_file_rq1['path_file'],\n remote_path=_Common.SFTP_MANDIRI['path']+'/UpdateRequestIn_DEV')\n if _push_rq1 is False:\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|FAILED_UPLOAD_FILE_RQ1_SETTLEMENT')\n return\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|UPLOAD_FILE_RQ1_SETTLEMENT')\n sleep(1)\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|WAITING_RSP_UPDATE')\n _QPROX.do_update_limit_mandiri(_file_rq1['rsp'])\n # _QPROX.auth_ka(_slot=_Common.get_active_sam(bank='MANDIRI', reverse=False), initial=False)\n # Move To QPROX Module\n else:\n return\n\n\ndef mandiri_create_rq1(content):\n try:\n _filename = MANDIRI_LAST_FILENAME.replace('.txt', '.RQ1')\n _file_rq1 = os.path.join(FILE_PATH, _filename)\n with open(_file_rq1, 'w+') as f:\n f.write(content)\n f.close()\n output = {\n 'rq1': content,\n 'filename': _filename,\n 'path_file': _file_rq1,\n 'rsp': MANDIRI_LAST_FILENAME.replace('.txt', '.RSP')\n }\n LOGGER.debug(str(output))\n return output\n except Exception as e:\n LOGGER.warning(str(e))\n return False\n\n\ndef start_validate_update_balance():\n _Helper.get_pool().apply_async(validate_update_balance)\n\n\ndef validate_update_balance():\n while True:\n daily_settle_time = _ConfigParser.get_set_value('QPROX', 'mandiri^daily^settle^time', '02:00')\n sync_time = int(_ConfigParser.get_set_value('QPROX', 'mandiri^daily^sync^time', '3600'))\n current_time = _Helper.now() / 1000\n LOGGER.debug(('MANDIRI_SAM_UPDATE_BALANCE', 'SYNC_TIME', sync_time, 'DAILY_SETTLEMENT', daily_settle_time))\n if _Common.LAST_UPDATE > 0:\n last_update_with_tolerance = (_Common.LAST_UPDATE/1000) + 84600\n if current_time >= last_update_with_tolerance:\n LOGGER.info(('DETECTED_EXPIRED_LIMIT_UPDATE', last_update_with_tolerance, current_time))\n _Common.MANDIRI_ACTIVE_WALLET = 0\n do_settlement_for(bank='MANDIRI', force=True)\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|TRIGGERED')\n if _Helper.whoami() not in _Common.ALLOWED_SYNC_TASK:\n LOGGER.debug(('[BREAKING-LOOP] ', _Helper.whoami()))\n break\n next_run_time = current_time + sync_time\n LOGGER.debug(('MANDIRI_SAM_UPDATE_BALANCE NEXT RUN', _Helper.convert_epoch(t=next_run_time)))\n sleep(sync_time)\n\n\nMANDIRI_UPDATE_SCHEDULE_RUNNING = False\n\n\ndef start_trigger_mandiri_sam_update():\n if not _QPROX.INIT_MANDIRI:\n LOGGER.warning(('FAILED MANDIRI_SAM_UPDATE_BALANCE', 'INIT_MANDIRI', _QPROX.INIT_MANDIRI))\n print(\"pyt: Failed MANDIRI_SAM_UPDATE_BALANCE, Mandiri SAM Not Init Yet\")\n return\n sleep(_Helper.get_random_num(.7, 2.9))\n if not MANDIRI_UPDATE_SCHEDULE_RUNNING:\n _Helper.get_pool().apply_async(trigger_mandiri_sam_update)\n else:\n print(\"pyt: Failed MANDIRI_SAM_UPDATE_BALANCE, Already Triggered Previously\")\n\n\ndef trigger_mandiri_sam_update():\n global MANDIRI_UPDATE_SCHEDULE_RUNNING\n\n # When This Function is Triggered, It will be forced update the SAM Balance And Ignore\n # Last Update Timestamp on TEMPORARY \n daily_settle_time = _ConfigParser.get_set_value('QPROX', 'mandiri^daily^settle^time', '02:00')\n current_time = _Helper.now() / 1000\n last_update = 0\n if _Common.LAST_UPDATE > 0:\n last_update = _Common.LAST_UPDATE/1000\n last_update_with_tolerance = (last_update + 84600000)/1000\n current_limit = 5000000\n if _Common.MANDIRI_ACTIVE_WALLET > current_limit:\n current_limit = 10000000\n if _Common.MANDIRI_ACTIVE_WALLET < current_limit or current_time >= last_update_with_tolerance:\n MANDIRI_UPDATE_SCHEDULE_RUNNING = True\n LOGGER.info(('TRIGGERED_BY_TIME_SETUP', _Helper.time_string('%H:%M'), daily_settle_time))\n _Common.MANDIRI_ACTIVE_WALLET = 0\n do_settlement_for(bank='MANDIRI', force=True)\n ST_SIGNDLER.SIGNAL_MANDIRI_SETTLEMENT.emit('MANDIRI_SETTLEMENT|TRIGGERED')\n MANDIRI_UPDATE_SCHEDULE_RUNNING = False\n else:\n LOGGER.warning(('FAILED_START_TIME_TRIGGER', _Helper.time_string('%H:%M'), daily_settle_time))\n LOGGER.warning(('LAST_UPDATE_BALANCE', _Helper.convert_epoch(last_update)))\n LOGGER.warning(('CURRENT_SAM_BALANCE', _Common.MANDIRI_ACTIVE_WALLET), current_limit)\n\n\ndef start_trigger_edc_settlement():\n sleep(_Helper.get_random_num(.7, 2.9))\n if not _Common.EDC_SETTLEMENT_RUNNING:\n _Common.EDC_SETTLEMENT_RUNNING = True\n _Helper.get_pool().apply_async(trigger_edc_settlement)\n else:\n print(\"pyt: Failed EDC_SETTLEMENT_SCHEDULE, Already Triggered Previously\")\n\n\ndef trigger_edc_settlement():\n daily_settle_time = _ConfigParser.get_set_value('EDC', 'daily^settle^time', '23:00')\n LOGGER.info(('TRIGGERED_BY_TIME_SETUP', 'EDC_SETTLEMENT_SCHEDULE', _Helper.time_string('%H:%M'), daily_settle_time))\n _EDC.define_edc_settlement()\n _Common.EDC_SETTLEMENT_RUNNING = False\n\n\n","repo_name":"ciwanridwan/Unified-Vm","sub_path":"_sService/_SettlementService.py","file_name":"_SettlementService.py","file_ext":"py","file_size_in_byte":24477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23791674376","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#from gluon import *\n\nclass Notes:\n ''' A library of construction notations where keys are mapped to a\n costruction statement.\n Use: to word construction documents.\n '''\n def __init__(self):\n self.notes='Use: to word construction documents.'\n\n def connote(self):\n ''' returns a dictionary mapping abreviated keywords to a string\n representing a costruction statement.\n '''\n cnote={'excavation':{'exc':'to excavate for ','topsoil':'topsoil ',\n 'soil':'solid earth ','marl':'solid marl ',\n 'stone':'solid stone ','loosestone':'loose stone ',\n 'softclay':'soft clay ','sand':'loose sand ',\n 'dispose':'cart away and dispose of excavated material ',\n 'backfill':'To backfill with suitable fill and compact '\n },\n \n \n 'concrete':{'mix':'mix place and vibrate ', 'conc':'concrete to ',\n 'r121':'1:2:1 ', 'r122':'1:2:2 ', 'r123':'1:2:3 ',\n 'r124':'1:2:4 ','r131':'1:3:1 ', 'r132':'1:3:2 ',\n 'r133':'1:3:3 ','r134':'1:3:4 ','r135':'1:3:5 ','r136':'1:3:6 ' \n },\n \n 'rebar':{'cut':'cut bend and install ','fab':'cut and fabticate ',\n 'lnk':'links to ', 'str':'stirrups ', 'brs':'bars to ','rbr':'re-bar ',\n 'm5':'6mm ', 'm10':'10mm ', 'm12':'12mm ','m16':'16mm ', 'm20':'20mm ',\n 'm25':'25mm ',\n '1/4':'1/4 inch ','3/8':'1/2 inch ','5/8':'5/8 inch ',\n '3/4':'3/4 inch ','1':'1 inch '\n },\n\n 'structural':{'fdn':'foundation ','stf':'stiffeners ', 'clm':'columns ',\n 'lnt':'lintels ','bms':'beams ','blt':'beltcourse ',\n 'slb':'slab ', 'wll':'block walls ', 'blk':'block cores '},\n \n 'wall':{'erect':'to erect ', 'wll':'block wall ', \n 'acore':'fill alternate cores ', 'core':'fill all cores ',\n 'drw':'dry wall ','cstone':'cut stone wall ',\n 'dstone':'dressed stone wall ','rstone':'rubble stone wall ',\n 'ply':'plyboard wall ','brd':'boarded wall '\n \n },\n\n 'form':{'make':'make formwork ','erect':'erect and remove formwork to ',\n 'mprop':'install and remove metal shores to ',\n 'wprop':'install and remove wooden shores to '\n },\n\n 'roof':{'install':'install roofing ','erect':'erect roof ',\n 'decra':'decra metal ','metro':'metro romano ',\n 'fiber':'fiber glass shingle ','zinc':'zinc sheet ',\n 'alsheet':'aluminum sheeting ','stseam':'standing seam '\n },\n\n 'floor':{'install':'install flooring ','lay':'level and lay ',\n 'cer':'ceramic tiles ','porc':'porcelain tiles ',\n 'prk':'pakae tile ','wood':'wooden flooring ',\n 'thset':'thinset mortar ',\n 'grt':'grouted in matching grout ',\n 'plsh':'polish '\n }\n }\n return cnote\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"2pees/FastEstimateII","sub_path":"main/Bq/cnotes.py","file_name":"cnotes.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"653768493","text":"import sys\nfrom typing import DefaultDict\n\ndiff = {\n 'D': (0, 1),\n 'U': (0, -1),\n 'L': (-1, 0),\n 'R': (1, 0),\n}\n\ndef solve(lines, length):\n rope = [(0, 0)] * length\n tailpositions = {(0, 0)}\n\n for direction, steps in lines:\n for _ in range(steps):\n head0, head1 = rope[0]\n diff0, diff1 = diff[direction]\n rope[0] = head0+diff0, head1+diff1\n\n for knot in range(length-1):\n head0, head1 = rope[knot]\n tail0, tail1 = rope[knot+1]\n\n if tail0 == head0 and tail1 < head1 - 1:\n tail1 = head1 - 1\n elif tail0 == head0 and tail1 > head1 + 1:\n tail1 = head1 + 1\n elif tail1 == head1 and tail0 < head0 - 1:\n tail0 = head0 - 1\n elif tail1 == head1 and tail0 > head0 + 1:\n tail0 = head0 + 1\n elif tail0 != head0 and tail1 != head1:\n diff_lr = head0 - tail0\n diff_ud = head1 - tail1\n\n if abs(diff_lr) > 1 or abs(diff_ud) > 1:\n tail0 += 1 if diff_lr > 0 else -1\n tail1 += 1 if diff_ud > 0 else -1\n\n rope[knot+1] = tail0, tail1\n\n tailpositions.add(rope[-1])\n\n return len(tailpositions)\n\n\ndef main():\n lines = []\n for line in sys.stdin:\n direction, steps = line.split(' ')\n lines.append((direction, int(steps)))\n\n print('part_one', solve(lines, 2))\n\n print('part_two', solve(lines, 10))\n\n\nmain()\n","repo_name":"akkerman/advent_of_code","sub_path":"2022/09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5205555156","text":"from ibapi.wrapper import EWrapper\r\nfrom ibapi.client import EClient\r\nfrom ibapi.contract import Contract\r\nfrom ibapi.order import Order\r\nimport datetime as dt\r\nimport time\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nfrom mailer import send_email\r\nimport sys\r\nfrom get_init_info import get_scalers, sleep_time\r\n\r\n\r\nwatchlist = {'PRCP' : 7.59, 'INMB' : 18, 'IMV' : 7}\r\n\r\nclass TestApp(EClient, EWrapper):\r\n\r\n def __init__(self):\r\n\r\n EClient.__init__(self, self)\r\n\r\n self.current_price = {}\r\n self.scaled_price = {}\r\n self.init_ID = None\r\n self.trading_ready = False\r\n self.donetrading = []\r\n\r\n self.triggers = {}\r\n self.positions = {}\r\n self.current_price = {}\r\n\r\n\r\n for ticker in watchlist.keys():\r\n self.triggers[ticker] = watchlist[ticker]\r\n self.positions[ticker] = 0\r\n self.current_price[ticker] = 0\r\n #\r\n # self.triggers = {'WIMI' : 9.21,\r\n # 'WAFU' : 8.50}\r\n #\r\n #\r\n #\r\n # self.positions = {'WIMI':0,\r\n # 'WAFU': 0}\r\n #\r\n #\r\n # self.current_price = {'WIMI':0,\r\n # 'WAFU' : 0}\r\n\r\n self.email_count = 0\r\n\r\n self.stock_info = {'current price' : self.current_price,\r\n 'trigger' : self.triggers,\r\n 'position' : self.positions,\r\n 'trade time': {},\r\n 'take profit': {},\r\n 'stop loss' : {},\r\n 'avg cost': {},\r\n 'unreal pnl' : {},\r\n 'real pnl' : {}\r\n }\r\n\r\n def liquidate(self):\r\n\r\n for ticker in watchlist:\r\n position = self.stock_info['position'][f'{ticker}']\r\n\r\n if position != 0:\r\n\r\n self.execute_order(self.init_ID, ticker, 'SELL', str(position), 'MKT')\r\n self.init_ID += 1\r\n print(f'{position} shares of {ticker} have been sold')\r\n\r\n def error(self, Id, errorCode:int, errorString:str):\r\n print(f'There is an error with {Id} errorcode {errorCode} that says {errorString}')\r\n\r\n\r\n def updatePortfolio(self, contract: Contract, position: float,\r\n marketPrice: float, marketValue: float,\r\n averageCost: float, unrealizedPNL: float,\r\n realizedPNL: float, accountName: str):\r\n # Doesn't work yet\r\n # if not self.avgcost_for_data or not len(self.avgcost_for_data):\r\n # self.avgcost_for_data = []\r\n #\r\n # else:\r\n # self.avgcost_for_data.append([contract.symbol, averageCost])\r\n printing = False\r\n\r\n if printing:\r\n print(\r\n f'UPDATING PORTFOLIO INFORMATION\\n Symbol: {contract.symbol}|SecType: {contract.secType}|Exchange: {contract.exchange}'\r\n f'|Position: {position}|Market Price: {marketPrice}|Market Value: {marketValue}|Average Cost: {averageCost}|Unrealized PnL: {unrealizedPNL}'\r\n f'|Realized PnL: {realizedPNL}|Account: {accountName}')\r\n\r\n self.stock_info['position'][f'{contract.symbol}'] = position\r\n\r\n\r\n self.stock_info['unreal pnl'][f'{contract.symbol}'] = unrealizedPNL\r\n self.stock_info['real pnl'][f'{contract.symbol}'] = realizedPNL\r\n #\r\n # if sum(self.stock_info['real pnl'].values()) < -1000:\r\n # self.liquidate()\r\n # time.sleep(2)\r\n #\r\n # sys.exit('stopped out')\r\n\r\n\r\n\r\n def init_mkt_data(self, tickers, printing = False):\r\n #self.current_price = np.ones(len(tickers)).tolist()\r\n req_id = 1\r\n self.reqMarketDataType(4)\r\n self.printing = printing\r\n self.IDtoTicker = {}\r\n\r\n\r\n for ticker in tickers:\r\n self.IDtoTicker[f'{req_id}'] = ticker\r\n self.reqMktData(req_id, self.create_contract(ticker), '', False, False, [])\r\n req_id += 1\r\n\r\n\r\n def tickPrice(self, reqId, tickType, price, attrib):\r\n\r\n #\r\n # send_email('marcusdambrosio@gmail.com', 'marcusdambrosio@gmail.com',\r\n # 'TRADING STARTED',\r\n # subject='TRADING STARTED\\n')\r\n\r\n super().tickPrice(reqId, tickType, price, attrib)\r\n ticker = self.IDtoTicker[f'{reqId}']\r\n # self.stock_info['current price'][f'{ticker}'] = price\r\n\r\n #LOGIC\r\n\r\n if dt.datetime.today().strftime('%H:%M:%S')[:-1] == '14:59:3':\r\n self.liquidate()\r\n sys.exit('MKT CLOSED')\r\n\r\n\r\n self.logic(ticker = ticker, price = price, trigger = self.stock_info['trigger'][ticker])\r\n\r\n\r\n def logic(self, ticker, price, trigger):\r\n\r\n print(ticker,price,trigger)\r\n\r\n if price >= trigger and self.stock_info['position'][ticker] == 0:\r\n\r\n quantity = 10000 / price\r\n self.execute_order(self.init_ID, ticker, 'BUY', str(quantity), 'MKT', bracket = True, stop_val = price - .1, prof_val = price + .15)\r\n self.stock_info['position'][ticker] = quantity\r\n self.stock_info['trade time'][ticker] = int(dt.datetime.today().strftime('%S'))\r\n\r\n elif self.stock_info['position'][ticker] != 0:\r\n quantity = 10000 / price\r\n dT = int(dt.datetime.today().strftime('%S')) - self.stock_info['trade time'][ticker]\r\n if dT > 20:\r\n if dT > 40:\r\n dT = 60 - dT\r\n\r\n else:\r\n self.execute_order(self.init_ID, ticker, 'SELL', str(quantity), 'MKT')\r\n\r\n\r\n\r\n def create_contract(self, symbol, sec_type='STK', exchange='SMART', curr='USD'):\r\n if not symbol:\r\n raise ValueError('No ticker specified.')\r\n\r\n contract = Contract()\r\n contract.symbol = symbol\r\n contract.secType = sec_type\r\n contract.exchange = exchange\r\n contract.currency = curr\r\n return contract\r\n\r\n def create_order(self, symbol, side, quantity, ord_type, price= 0,\r\n bracket=False, stop_val=None, prof_val=None):\r\n\r\n # Order config\r\n\r\n parent_order = Order()\r\n\r\n parent_order.action = side\r\n parent_order.totalQuantity = quantity\r\n parent_order.orderType = ord_type\r\n\r\n if ord_type == 'LMT':\r\n parent_order.lmtPrice = price\r\n\r\n if bracket:\r\n parent_order.transmit = True\r\n\r\n stop_loss = Order()\r\n stop_loss.action = 'SELL' if side == 'BUY' else 'BUY'\r\n stop_loss.totalQuantity = quantity\r\n stop_loss.orderType = 'STP'\r\n stop_loss.lmtPrice = stop_val\r\n #stop_loss.parentId = parent_order.orderId\r\n stop_loss.transmit = True\r\n\r\n take_prof = Order()\r\n take_prof.action = 'SELL' if side == 'BUY' else 'BUY'\r\n take_prof.totalQuantity = quantity\r\n take_prof.orderType = 'LMT'\r\n take_prof.lmtPrice = prof_val\r\n #take_prof.parentId = parent_order.orderId\r\n take_prof.transmit = True\r\n\r\n print(f'Bracket order for {symbol} created successfully.')\r\n return [parent_order, stop_loss, take_prof]\r\n\r\n else:\r\n parent_order.transmit = True\r\n print(f'{ord_type} for {symbol} created successfully.')\r\n return parent_order\r\n\r\n def store_send_data(self, symbol, side, quantity, ord_type, stop_val, prof_val):\r\n\r\n current_date = dt.datetime.today().strftime('%m-%d-%y')\r\n current_time = dt.datetime.now().strftime('%H:%M:%S')\r\n\r\n #doesn't work yet\r\n #avg_cost = [c for c in self.avgcost_for_data if c[0] == symbol][-1]\r\n avg_cost = 5\r\n new_data = {'Time': current_time,\r\n 'Symbol': symbol,\r\n 'Side': side,\r\n 'Quantity': quantity,\r\n 'Order Type': ord_type,\r\n 'Average Cost': avg_cost,\r\n 'Stop Loss' : stop_val,\r\n 'Take Profit' : prof_val}\r\n\r\n if f'{current_date}.csv' not in os.listdir('data_storage'):\r\n new_data = pd.DataFrame(new_data, index = np.arange(len(new_data)))\r\n new_data.to_csv(f'data_storage/{current_date}.csv')\r\n print('DATA ADDED SUCCESSFULLY')\r\n\r\n else:\r\n old_data = pd.read_csv(f'data_storage/{current_date}.csv')\r\n new_data = old_data.append(new_data, ignore_index = True)\r\n new_data.to_csv(f'data_storage/{current_date}.csv')\r\n print('DATA ADDED SUCCESSFULLY')\r\n\r\n\r\n def execute_order(self, nextOrderID, symbol, side, quantity, ord_type, price='0',\r\n bracket=False, stop_val=None, prof_val=None):\r\n\r\n if not symbol:\r\n raise ValueError('No ticker specified')\r\n\r\n if not side:\r\n raise ValueError('Buy or Sell?')\r\n\r\n if not quantity:\r\n raise ValueError('No quantity specified')\r\n\r\n if not ord_type:\r\n raise ValueError('No order type specified')\r\n\r\n if ord_type == 'LMT' and price == '0':\r\n raise ValueError('Limit value not specified')\r\n\r\n '''\r\n GET MKT PRICE FOR DATA STORAGE REASONS\r\n else:\r\n mkt_price = ...\r\n '''\r\n self.nextID = nextOrderID\r\n\r\n theContract = self.create_contract(symbol)\r\n theOrder = self.create_order(symbol, side, quantity, ord_type, price = price,\r\n bracket = bracket, stop_val = stop_val, prof_val= prof_val)\r\n\r\n if bracket:\r\n for i in theOrder:\r\n self.placeOrder(self.nextID, theContract, i)\r\n self.nextID += 1\r\n\r\n print(f'Bracketed {side} order for {quantity} shares of {symbol} was placed.')\r\n print(f'Stop loss at {stop_val} and take profit at {prof_val}')\r\n\r\n else:\r\n self.placeOrder(self.nextID, theContract, theOrder)\r\n #self.nextID += 1\r\n print(f'ORDER ID: {self.nextID} \\n'\r\n f'{side} order for {quantity} shares of {symbol} was placed.')\r\n\r\n self.store_send_data(symbol, side, quantity, ord_type, stop_val=stop_val, prof_val=prof_val)\r\n\r\n the_time = dt.datetime.now().strftime('%H:%M:%S')\r\n\r\n # time.sleep(2)\r\n # send_email('marcusdambrosio@gmail.com', 'marcusdambrosio@gmail.com', 'ORDER PLACED')\r\n # # f'At {the_time} a {ord_type} order for {quantity} shares of {symbol} was placed.\\n'\r\n # # f'The average cost was {self.current_price[symbol]} for a total purchase of {self.current_price[symbol] * quantity}-.\\n'\r\n # # f'The stop is set at ${stop_val} and profit will be taken at ${prof_val}.',\r\n # # subject = 'ORDER PLACED\\n')\r\n # self.email_count += 1\r\n \r\n if self.email_count > 15:\r\n sys.exit('TERMINATE')\r\n\r\n\r\n\r\n\r\n def nextValidId(self, orderId):\r\n print(f'The next valid order ID is {orderId}')\r\n self.init_ID = orderId\r\n self.reqAccountUpdates(True, '')\r\n self.init_mkt_data(watchlist, printing = False)\r\n\r\n\r\n\r\napp = TestApp()\r\nstart = True\r\n\r\n\r\nwhile __name__ == '__main__':\r\n\r\n\r\n if dt.datetime.today().strftime('%H:%M') == '08:30':\r\n start = True\r\n\r\n if start:\r\n\r\n print('Startup successful.')\r\n app.connect('127.0.0.1', 7497, 1117)\r\n\r\n if app.isConnected():\r\n print('Connected')\r\n\r\n app.run()\r\n\r\n\r\n","repo_name":"marcusdambrosio/InteractiveBrokers_app","sub_path":"MomoTrader.py","file_name":"MomoTrader.py","file_ext":"py","file_size_in_byte":11591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17186110309","text":"import sys\nfrom floris.floris import Floris\nfrom pycallgraph import PyCallGraph\nfrom pycallgraph import Config\nfrom pycallgraph import GlobbingFilter\nfrom pycallgraph.output import GraphvizOutput\n\nconfig = Config(groups=True)\nconfig.trace_filter = GlobbingFilter(exclude=[\n 'pycallgraph.*',\n 'json*',\n 'codecs*',\n '_bootlocale*',\n '_weakrefset*'\n])\n\ngraphviz = GraphvizOutput()\ngraphviz.output_file = 'initialization.png'\nwith PyCallGraph(config=config, output=graphviz):\n if len(sys.argv) > 1:\n floris = Floris(sys.argv[1])\n else:\n floris = Floris(\"example_input.json\")\n\ngraphviz = GraphvizOutput()\ngraphviz.output_file = 'calculate_wake.png'\nwith PyCallGraph(config=config, output=graphviz):\n floris.farm.flow_field.calculate_wake()\n","repo_name":"WISDEM/FLORIS","sub_path":"docs/call_graph_script.py","file_name":"call_graph_script.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"71735295873","text":"r\"\"\"Analyze Traffic Images\n\nThis executable is used to annotate traffic images to highlight vehicle types and to produce stats\nand graphs for the amount of time bicycle lanes and bus stops are blocked by vehicles:\n\n\nExample usage:\n ./analyzeimages \\\n -path_images ./data/rawimages/\n -path_labels_map data/car_label_map.pbtxt\n -save_directory data/processedimages/\n\"\"\"\n\nimport sys\n\nfrom matplotlib.ticker import FormatStrFormatter, FuncFormatter\n\nsys.path.append('./models-master/research/')\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport time\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport csv\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import defaultdict\nfrom io import StringIO\n# from matplotlib import pyplot as plt\nimport matplotlib.path as mpltPath\n\nfrom PIL import Image\nimport scipy.misc\n\n\ndef processimages(path_images_dir, path_labels_map,save_directory):\n pathcpkt = 'data/output_inference_graph.pb/frozen_inference_graph.pb'\n csv_file = 'data/csvfile.csv'\n num_classes = 6\n\n detection_graph = tf.Graph()\n\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(pathcpkt, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n label_map = label_map_util.load_labelmap(path_labels_map)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes,\n use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n f = open(csv_file, 'w')\n #f.write(\n # 'timestamp,number cars in bike lane, number trucks in bike lane, '\n # 'number cars in bus stop, number trucks in bus stop\\n')\n\n def load_image_into_numpy_array(imageconvert):\n (im_width, im_height) = imageconvert.size\n try:\n return np.array(imageconvert.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n except ValueError:\n return np.array([])\n\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n polygon_right_lane = [(178, 122), (188, 240), (231, 240), (187, 125)]\n polygon_left_lane = [(108, 143), (0, 215), (0, 233), (123, 142), (108, 97)]\n polygon_bus_lane = [(200, 155), (230, 240), (292, 240), (225, 157)]\n\n pathrightlane = mpltPath.Path(polygon_right_lane)\n pathleftlane = mpltPath.Path(polygon_left_lane)\n pathbuslane = mpltPath.Path(polygon_bus_lane)\n for testpath in os.listdir(path_images_dir):\n\n start_time = time.time()\n timestamp = testpath.split(\".jpg\")[0]\n\n try:\n image = Image.open(path_images_dir + '/' + testpath)\n image_np = load_image_into_numpy_array(image)\n except IOError:\n print(\"Issue opening \"+testpath)\n continue\n\n if image_np.size == 0:\n print(\"Skipping image \"+testpath)\n continue\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n min_score_thresh=0.4,\n use_normalized_coordinates=True,\n line_thickness=2)\n scores = np.squeeze(scores)\n boxes = np.squeeze(boxes)\n num_cars_in_bikelane, num_cars_in_bus_stop, num_trucks_in_bike_lane, num_trucks_in_bus_stop = 0, 0, 0, 0\n for i in range(boxes.shape[0]):\n if scores[i] > .4:\n box = tuple(boxes[i].tolist())\n\n ymin, xmin, ymax, xmax = box\n\n center_x = (((xmax * 352) - (xmin * 352)) / 2) + (xmin * 352)\n center_y = (((ymax * 240) - (ymin * 240)) / 2) + (ymin * 240)\n classes = np.squeeze(classes).astype(np.int32)\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n\n if class_name == 'car':\n points = [(center_x, center_y)]\n if pathrightlane.contains_points(points) or pathleftlane.contains_points(points):\n num_cars_in_bikelane += 1\n elif pathbuslane.contains_points(points):\n num_cars_in_bus_stop += 1\n\n elif class_name == 'truck' or class_name == 'police' or class_name == 'ups':\n points = [(center_x, center_y)]\n if pathrightlane.contains_points(points) or pathleftlane.contains_points(points):\n num_trucks_in_bike_lane += 1\n elif pathbuslane.contains_points(points):\n num_trucks_in_bus_stop += 1\n\n # write to a csv file whenever there is a vehicle, how many and of what type with timestamp\n f.write(timestamp + ',' + str(num_cars_in_bikelane) + ',' + str(num_trucks_in_bike_lane) + ',' + str(\n num_cars_in_bus_stop) + ',' + str(num_trucks_in_bus_stop) + '\\n')\n print(\"Process Time \" + str(time.time() - start_time))\n scipy.misc.imsave(save_directory + testpath, image_np)\n\n f.close()\n return csv_file\n\n\ndef initialize_datastore():\n blankarray = [0] * 24\n alldata = [[list(blankarray), list(blankarray), list(blankarray)],\n [list(blankarray), list(blankarray), list(blankarray)]]\n # alldata [ [cars_blocking_bikelane[24],trucks_blocking_bikelane[24],eitherblockingbikelane[24]\n # [cars_blocking_buslane[24],trucks_blocking_buslane[24],eitherblockingbuslane[24]]\n\n weekdaydata = [[list(blankarray), list(blankarray), list(blankarray)],\n [list(blankarray), list(blankarray), list(blankarray)]]\n # same as alldata above but for weekdays, weekenddata same but for weekends\n weekenddata = [[list(blankarray), list(blankarray), list(blankarray)],\n [list(blankarray), list(blankarray), list(blankarray)]]\n\n return [alldata, weekdaydata, weekenddata]\n\n\ndef weekday(datevalue):\n if datevalue.weekday() < 5:\n return True\n else:\n return False\n\n\ndef incrementarray(array, blockagearray, delta_time):\n timestamp_string = (blockagearray[0].split(\".jpg\"))[0]\n datetime_object = datetime.strptime(timestamp_string, '%Y-%m-%d %H:%M:%S.%f')\n\n hour = datetime_object.hour\n num_cars_in_bike_lane = int(blockagearray[1])\n num_trucks_in_bike_lane = int(blockagearray[2])\n num_cars_in_bus_stop = int(blockagearray[3])\n num_truck_in_bus_stop = int(blockagearray[4])\n\n if num_cars_in_bike_lane > 0:\n array[0][0][hour] += delta_time\n if num_trucks_in_bike_lane > 0:\n array[0][1][hour] += delta_time\n if num_cars_in_bike_lane > 0 or num_trucks_in_bike_lane > 0:\n array[0][2][hour] += delta_time\n if num_cars_in_bus_stop > 0:\n array[1][0][hour] += delta_time\n if num_truck_in_bus_stop > 0:\n array[1][1][hour] += delta_time\n if num_cars_in_bus_stop > 0 or num_truck_in_bus_stop > 0:\n array[1][2][hour] += delta_time\n\n\ndef incrementarrays(dataarrays, blockagearray, delta_time):\n alldata = dataarrays[0]\n weekdaydata = dataarrays[1]\n weekenddata = dataarrays[2]\n\n datetime_object = datetime.strptime((blockagearray[0].split(\".jpg\"))[0], '%Y-%m-%d %H:%M:%S.%f')\n\n incrementarray(alldata, blockagearray, delta_time)\n if weekday(datetime_object):\n incrementarray(weekdaydata, blockagearray, delta_time)\n else:\n incrementarray(weekenddata, blockagearray, delta_time)\n\n return [alldata, weekdaydata, weekenddata]\n\n\ndef buildsaveplot(list_to_graph, title):\n label = ['', '', '', '', '', '6 am', '',\n '', '', '', '', '12 noon', '', '', '', '', '', '6 Pm', '',\n '',\n '', '', '', 'Midnight']\n index = np.arange(len(label))\n plt.bar(index, list_to_graph)\n plt.xticks(index, label, fontsize=10, rotation=30)\n plt.title(title)\n plt.plot()\n\n plt.ylim([0, 100.0])\n ax = plt.gca()\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f%%'))\n plt.savefig(\"output/\"+title.replace(\" \", \"\") + \".png\", bbox_inches='tight')\n plt.close()\n\n\ndef analyzeresults(csv_file):\n total_time_secs, total_time_bike_lane_blocked_secs, total_time_bus_stop_blocked_secs = 0, 0, 0\n\n weekdaytotalseconds = [1] * 24 # where we are going to store how many seconds worth of images there are\n weekendtotalseconds = [1] * 24 # for each hour this is necessary beecause we may be missing images\n\n previous_timestamp = 0\n dataarrays = initialize_datastore()\n\n data = csv.reader(open(csv_file, 'r'))\n data = sorted(data, key=lambda rowparse: datetime.strptime((rowparse[0].split(\".jpg\"))[0], '%Y-%m-%d %H:%M:%S.%f'))\n\n for row in data:\n datetime_object = datetime.strptime((row[0].split(\".jpg\"))[0], '%Y-%m-%d %H:%M:%S.%f')\n timestamp = float(datetime_object.strftime('%s'))\n hour = datetime_object.hour\n\n if previous_timestamp != 0:\n delta_time = timestamp - previous_timestamp\n if delta_time > 30:\n print(\"DELTA TIME LARGE\")\n delta_time = 30\n\n total_time_secs += delta_time\n if weekday(datetime_object):\n weekdaytotalseconds[hour] += delta_time # necessary because there may be time stamps missing in images\n else:\n weekendtotalseconds[hour] += delta_time\n\n dataarrays = incrementarrays(dataarrays, row, delta_time)\n previous_timestamp = timestamp\n\n weekendpercentageblocked = [[0] * 24, [0] * 24] # bike lane first array and bus lane second\n weekdaypercentageblocked = [[0] * 24, [0] * 24]\n\n for hour in range(0, 24):\n total_time_bike_lane_blocked_secs += dataarrays[0][0][2][hour]\n total_time_bus_stop_blocked_secs += dataarrays[0][1][2][hour]\n weekdaypercentageblocked[0][hour] = 100 * (dataarrays[1][0][2][hour] / weekdaytotalseconds[hour])\n weekendpercentageblocked[0][hour] = 100 * (dataarrays[2][0][2][hour] / weekendtotalseconds[hour])\n weekdaypercentageblocked[1][hour] = 100 * (dataarrays[1][1][2][hour] / weekdaytotalseconds[hour])\n weekendpercentageblocked[1][hour] = 100 * (dataarrays[2][1][2][hour] / weekendtotalseconds[hour])\n\n total_time_seven2seven, blockedbikelaneseven2seven, blockedbuslaneseven2seven = 0, 0, 0\n for x in range(7, 19):\n total_time_seven2seven += weekdaytotalseconds[x]\n blockedbikelaneseven2seven += dataarrays[1][0][2][x]\n blockedbuslaneseven2seven += dataarrays[1][1][2][x]\n\n print(\"RESULTS \\n Total Time \" + str(total_time_secs) + \" blocked bike lane time \" + str(\n total_time_bike_lane_blocked_secs) + \"blocked truck lane time\" + str(total_time_bus_stop_blocked_secs))\n print(\"Bike lane blocked \" + str(100 * (total_time_bike_lane_blocked_secs / total_time_secs)) + \"% of the time\")\n print(\"Bus lane blocked \" + str(100 * (total_time_bus_stop_blocked_secs / total_time_secs)) + \"% of the time\")\n print(\"Bike lane blocked \" + str(\n 100 * (blockedbikelaneseven2seven / total_time_seven2seven)) + \"% of the time durring weekday from 7 am to 7pm\")\n print(\"Bus lane blocked \" + str(\n 100 * (blockedbuslaneseven2seven / total_time_seven2seven)) + \"% of the time durring weekday from 7 am to 7pm\")\n\n buildsaveplot(weekdaypercentageblocked[0], 'Weekday Bike Lane Percentage Blocked by Hour')\n buildsaveplot(weekdaypercentageblocked[1], 'Weekday Bus Stop Percentage Blocked by Hour')\n buildsaveplot(weekendpercentageblocked[0], 'Weekend Bike Lane Percentage Blocked by Hour')\n buildsaveplot(weekendpercentageblocked[1], 'Weekend Bus Stop Percentage Blocked by Hour')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Analyze traffic images to determine rate of blocking bike'\n 'and bus lanes', formatter_class=RawTextHelpFormatter)\n parser.add_argument('-path_images', help='the folder with all the downloaded images in it')\n parser.add_argument('-path_labels_map', help='the file with the integer to label map')\n parser.add_argument('-save_directory', help='the directory you want to save the annotated images to')\n args = parser.parse_args()\n #csv_file = processimages(args.path_images,args.path_labels_map,args.save_directory)\n analyzeresults('data/analysis10days.csv')\n analyzeresults(csv_file)","repo_name":"Bellspringsteen/OurCamera","sub_path":"analyzeimages.py","file_name":"analyzeimages.py","file_ext":"py","file_size_in_byte":14533,"program_lang":"python","lang":"en","doc_type":"code","stars":373,"dataset":"github-code","pt":"61"} +{"seq_id":"39575577422","text":"from django.shortcuts import render\nfrom django import views\nfrom django.http import request\nfrom django.views.generic import CreateView,TemplateView\nfrom django.urls import reverse_lazy\nimport requests\nfrom .forms import CustomUserCreationForm\nfrom . import git\n\n\nclass SignUpView(CreateView):\n form_class=CustomUserCreationForm\n template_name='signup.html'\n success_url=reverse_lazy('login')\n\n\n \ndef github_repos(request):\n user={}\n repos=[]\n if request.user.is_authenticated:\n\n if 'username' in request.GET:\n username=request.GET['username']\n user=git.user_profile(username)\n repos=git.user_repos(username)\n print(user)\n return render(request,'home.html',{\n 'user':user,\n 'repos':repos,\n })\n\n\ndef github_repo_commits(request):\n commits=[]\n username1=request.GET['username1']\n repo=request.GET['repo']\n\n print(repo)\n commits=git.user_commit_history(username1,repo)\n return render(request,'commits.html',{'commits':commits})\n\n\n ","repo_name":"Nirav-1999/Git_api","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12995052654","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nwith open('README.md', 'r') as readMe:\n long_description = readMe.read()\n\nsetup(\n\n name = 'self_concepts',\n version = '1.0',\n author = 'Grady Booch',\n author_email = 'egrady@booch.com',\n description = 'Self''s foundational abstractions',\n long_description = long_description,\n long_description_content_type = 'text/markdown',\n keywords = 'self agi neuro-symbolic',\n url = 'https://github.com/booch-self/self-concepts',\n packages = find_packages(where='source'),\n classifiers = [\n 'Programming Language :: Python :: 3.0',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'\n ],\n python_requires = ' >= 3.0'\n\n)\n","repo_name":"booch-self/self-concepts","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"17024583943","text":"\"\"\"t2j converts TOML input to JSON output and vice versa.\"\"\"\n\nimport argparse\nimport collections.abc\nimport json\nimport sys\nfrom contextlib import ExitStack\n\nimport tomli\nimport tomli_w\n\n__version__ = '0.1.0'\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Converts TOML to JSON (or JSON to TOML).\")\n parser.add_argument('--toml-to-json', '-T', dest='direction',\n action='store_const', const='t2j')\n parser.add_argument('--json-to-toml', '-J', dest='direction',\n action='store_const', const='j2t')\n parser.add_argument('filename', nargs='?')\n parser.set_defaults(direction=parser.prog)\n args = parser.parse_args()\n with ExitStack() as close_stack:\n if args.filename is None:\n input_file = sys.stdin\n else:\n input_file = close_stack.enter_context(open(args.filename))\n output_file = sys.stdout\n if args.direction == 't2j':\n toml_object = tomli.load(input_file.buffer)\n json.dump(toml_object, output_file)\n elif args.direction == 'j2t':\n json_object = json.load(input_file)\n if not isinstance(json_object, collections.abc.Mapping):\n parser.error(\"input is not a JSON object\")\n tomli_w.dump(json_object, output_file.buffer)\n else:\n parser.error(f\"invalid conversion direction {args.direction!r}\")\n","repo_name":"astralblue/t2j","sub_path":"t2j.py","file_name":"t2j.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37244961558","text":"#Uses python3\n\nimport sys\nimport heapq as hp\n\n\ndef distance(graph, cost, s, t, infinity=float(\"inf\")):\n dists, prev, verts = [], [], []\n for i in range(len(graph)):\n dists.append(infinity)\n prev.append(None)\n verts.append(i)\n dists[s] = 0\n processed = set()\n queue = list(zip(dists, verts))\n hp.heapify(queue)\n while queue:\n d, u = hp.heappop(queue)\n if u not in processed:\n processed.add(u)\n for node in graph[u]:\n w = cost[(u, node)]\n if dists[node] > d + w:\n dists[node] = d + w\n prev[node] = u\n hp.heappush(queue, (dists[node], node))\n if dists[t] == infinity:\n return -1\n return dists[t]\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))\n data = data[3 * m:]\n graph = {i: [] for i in range(n)}\n cost = {}\n for ((a, b), w) in edges:\n graph[a - 1].append(b - 1)\n cost[(a-1, b-1)] = w\n s, t = data[0] - 1, data[1] - 1\n print(distance(graph, cost, s, t))\n","repo_name":"SherMM/coursera-ucsd-algorithms-datastructures","sub_path":"Graphs/Week4/dijkstra/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9161705811","text":"#! /usr/bin/env python3\n\nimport astropy\nfrom astropy.table import Table, Column\nfrom astropy.io.votable import from_table, writeto\nimport numpy as np\nimport glob\nimport argparse\nimport sys\n\n__author__ = \"Paul Hancock\"\n__date__ = \"2019-11-21\"\n\ndef get_epoch_catalogues(epochs_file):\n \"\"\"\n Read a file which contains a list of the catalogues to be read\n\n parameters\n ----------\n epochs_files : str\n A file which has a list of catalogues, one per line.\n\n returns\n -------\n files : list\n A list of filenames\n \"\"\"\n files = list(map(str.strip, open(epochs_file).readlines()))\n return files\n\n\ndef join_catalogues(reference, epochs):\n \"\"\"\n Take a reference cataloge, strip all but the uuid/ra/dec columns and then\n join the flux/err_flux data from each of the epoch catalogues\n From each epoch we extract the peak_flux and err_peak_flux columns and rename\n them by appending _N where N is the epoch number\n\n parameters\n ----------\n reference : str\n Filename for the reference catalogue\n\n epochs : list\n A list of the individual epoch file names\n\n returns\n -------\n table : `astropy.table.Table`\n A joined table\n \"\"\"\n # Read the reference catalogue and retain only the uuid and ra/dec columns\n # rename the ra/dec columns\n print(\"Using reference catalogue {0}\".format(reference))\n ref = Table.read(reference)['uuid', 'ra','dec']\n ref.rename_column('ra', 'ref_ra')\n ref.rename_column('dec', 'ref_dec')\n ref.sort(keys='uuid')\n\n # make the empty columns\n new_cols =[]\n data = np.zeros(len(ref), dtype=np.float32)*np.nan\n str_data = np.full(len(ref),Table.read(files[0])['image'][0])\n str_data[:] = 'None'\n\n for i in range(len(files)):\n for colname in ['peak_flux_{0}','err_peak_flux_{0}', 'local_rms_{0}', 'background_{0}']:\n new_cols.append(Column(data=data.copy(), name=colname.format(i)))\n for colname in ['image_{0}', 'epoch_{0}']:\n if 'image' in colname:\n new_cols.append(Column(data=str_data.copy(), name=colname.format(i)))\n else:\n new_cols.append(Column(data=str_data.copy(), name=colname.format(i), dtype='S19'))\n \n print(\"ref table is {0} rows\".format(len(ref)))\n \n # if we add all the columns at once the ordering is borked!\n for col in new_cols:\n ref.add_column(col)\n \n # now put the data into the new big table\n for i,f in enumerate(files):\n print(\"Joining epoch {0} catalogue {1}\".format(i,f))\n new_cols = Table.read(f)['uuid', 'peak_flux', 'err_peak_flux', 'local_rms', 'background',\n 'image', 'epoch']\n new_cols.sort(keys='uuid')\n # compute the order/presence\n ordering = np.argwhere(np.in1d(ref['uuid'], new_cols['uuid'], assume_unique=True))[:,0]\n ref['peak_flux_{0}'.format(i)][ordering] = new_cols['peak_flux']\n ref['err_peak_flux_{0}'.format(i)][ordering] = new_cols['err_peak_flux']\n ref['local_rms_{0}'.format(i)][ordering] = new_cols['local_rms']\n ref['background_{0}'.format(i)][ordering] = new_cols['background']\n ref['image_{0}'.format(i)][ordering] = new_cols['image']\n ref['epoch_{0}'.format(i)][ordering] = new_cols['epoch']\n\n return ref\n\n\ndef write_table(tab, filename):\n \"\"\"\n Write a VOTable using a binary format.\n\n parameters\n ----------\n tab : `astropy.table.Table`\n The table to be written\n\n filename : str\n The output filename. Should end with .xml or .vot\n \"\"\"\n vot = from_table(tab)\n vot.set_all_tables_format('binary')\n vot.to_xml(filename)\n print(\"Wrote {0}\".format(filename))\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n group1 = parser.add_argument_group(\"Combine catalogues into a flux table\")\n group1.add_argument(\"--refcat\", dest='ref', type=str, default=None,\n help=\"The reference catalogue\")\n group1.add_argument(\"--epochs\", dest='epochs', type=str, default=None,\n help=\"A file containing the list of epoch catalogues.\")\n group1.add_argument(\"--out\", dest='outfile', type=str, default='flux_table.vot',\n help=\"The output table name. Default = flux_table.vot\")\n results = parser.parse_args()\n\n if None in (results.ref, results.epochs, results.outfile):\n parser.print_help()\n sys.exit()\n\n files = get_epoch_catalogues(results.epochs)\n table = join_catalogues(results.ref, files)\n write_table(table, results.outfile)\n","repo_name":"PaulHancock/Robbie","sub_path":"scripts/join_catalogues.py","file_name":"join_catalogues.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22483247618","text":"import os\nfrom ibis.model.control import Control\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass Fork(Control):\n\n \"\"\"Fork control class.\"\"\"\n\n def __init__(self, **params):\n self.params = params\n self.params['action_type'] = self.params['action_type'].lower()\n if self.params['action_type'] == 'fork':\n try:\n super(self.__class__,\n self).__init__(self.params['action_type'],\n self.params['name'],\n self.params['cfg_mgr'])\n self.logger.info(\"Successfully forked\")\n except KeyError as e:\n self.logger.error('Fork KeyError - reason \"%s\"' % str(e))\n else:\n self.logger.error('Invalid expecting fork control type')\n\n def get_to_nodes(self):\n \"\"\"Return a list of node names.\n\n :return List[String]\n \"\"\"\n return self.params.get('to_nodes', [])\n","repo_name":"Cigna/ibis","sub_path":"ibis/model/fork.py","file_name":"fork.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"} +{"seq_id":"73245552193","text":"from exceptions import *\nimport utils\n\n\n_global_var_map = dict()\n\n\ndef get_var(name_: str):\n check = utils.check_var_name(name_)\n if not check:\n raise RunTimeException(\"Runtime error, var name error.\")\n return _global_var_map[name_]\n\n\ndef set_var(name_: str, value_):\n check = utils.check_var_name(name_)\n if not check:\n raise RunTimeException(\"Runtime error, var name error.\")\n _global_var_map[name_] = value_\n","repo_name":"Majjcom/autoRunner","sub_path":"global_var.py","file_name":"global_var.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2065216872","text":"\r\nint_var = 2\r\n\r\nfloat_var = 2.34\r\n\r\ndict_var = dict(test1=\"tset\", test2=1.23, test3=\"YOO NEW DICT?\")\r\n\r\nbool_var = False\r\n\r\nlist_var = [5, 4, 3, 2, 1]\r\n\r\ntuple_var = (1, 2, 3, 4, 5)","repo_name":"Decee1/py_file_variable_merger","sub_path":"config2.py","file_name":"config2.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72530996993","text":"#!/usr/bin/env python3\n##-- imports\nfrom __future__ import annotations\n\nimport abc\nimport logging as logmod\nfrom copy import deepcopy\nfrom dataclasses import InitVar, dataclass, field\nfrom os.path import splitext\nfrom re import Pattern\nfrom typing import (TYPE_CHECKING, Any, Callable, ClassVar, Final, Generic,\n Iterable, Iterator, Mapping, Match, MutableMapping,\n Protocol, Sequence, Tuple, TypeAlias, TypeGuard, TypeVar,\n cast, final, overload, runtime_checkable)\nfrom uuid import UUID, uuid1\nfrom weakref import ref\n\nfrom acab_config.utils.log_formatter import SimpleLogColour\n\n##-- end imports\n\nlogging = logmod.getLogger(__name__)\nSC = SimpleLogColour\n\ndef print_header(header):\n print(\"\\n--------------------\")\n print(f\"{SC.green(header)}: \")\n print(\"--------------------\")\n\ndef print_class_colour(heading, cls):\n print(f\"{heading}: {SC.yellow(cls.__module__)}.{SC.green(cls.__class__.__name__)}\")\n if bool(cls.__doc__):\n print(SC.blue(cls.__doc__))\n\n print()\n bases = \"\\n\\t\".join(f\"{SC.yellow(x.__module__)}.{SC.green(x.__qualname__)}\" for x in cls.__class__.__bases__)\n print(f\"Instance of:\\n\\t{bases}\")\n\ndef print_module_colour(mod):\n result = []\n if bool(len(mod.dsl_fragments)):\n result.append(SC.green(f\"{len(mod.dsl_fragments)} DSL\"))\n else:\n result.append(SC.red(f\"{len(mod.dsl_fragments)} DSL\"))\n if bool(len(mod.semantics)):\n result.append(SC.green(f\"{len(mod.semantics)} Sem\"))\n else:\n result.append(SC.red(f\"{len(mod.semantics)} Sem\"))\n if bool(len(mod.operators)):\n result.append(SC.green(f\"{len(mod.operators)} Op\"))\n else:\n result.append(SC.red(f\"{len(mod.operators)} Op\"))\n if bool(len(mod.printers)):\n result.append(SC.green(f\"{len(mod.operators)} Pr\"))\n else:\n result.append(SC.red(f\"{len(mod.operators)} Pr\"))\n\n source = splitext(mod.source)\n print(f\"\\t({' | '.join(result)} : {source[0]}{SC.blue(source[1])})\")\n\ndef print_handler_system(system):\n print(\"\\nComponents: (no. of handlers : signal : flags)\")\n sorted_specs = sorted([(len(y), y, x) for x,y in system.handler_specs.items()], reverse=True, key=lambda x: x[0])\n for spec_len, spec, signal in sorted_specs:\n spec_len = SC.green(f\"{spec_len:<4}\") if bool(spec_len) else SC.red(f\"{spec_len:<4}\")\n spec_flags = [x.name for x in spec.flags]\n flag_str = f\" : {', '.join(spec_flags)}\" if bool(spec_flags) else \"\"\n print(f\"\\t{spec_len} : {SC.blue(signal):<40}{flag_str}\")\n\n print(\"\\nHandlers not attached to a Signal: \")\n for signal in sorted((x.signal for x in system.loose_handlers)):\n print(f\"\\t{SC.blue(signal)}\")\n\ndef two_part_split(the_string):\n pair = splitext(the_string)\n return f\"{pair[0]}{SC.blue(pair[1])}\"\n","repo_name":"jgrey4296/acab","sub_path":"acab/modules/repl/commands/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"9978610452","text":"import django.utils.timezone\nimport model_utils.fields\nfrom common.db import models as common_models\nfrom django.db import migrations, models\nfrom jsonfield import JSONField\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Player',\n fields=[\n ('id', common_models.PositiveBigAutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name='ID')\n ),\n ('key', models.CharField(max_length=255, unique=True)),\n ('created_at', model_utils.fields.AutoCreatedField(\n db_index=True,\n default=django.utils.timezone.now,\n editable=False\n )),\n ('updated_at', model_utils.fields.AutoLastModifiedField(\n db_index=True,\n default=django.utils.timezone.now,\n editable=False\n )),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', common_models.PositiveBigAutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name='ID'\n )),\n ('value', models.CharField(max_length=255)),\n ('answers', JSONField(null=True)),\n ('creator', models.ForeignKey(\n 'app.Player',\n models.CASCADE,\n '%(class)s_creator_is_player_related',\n null=True\n )),\n ('for_player', models.ForeignKey(\n 'app.Player',\n models.CASCADE,\n '%(class)s_for_player_is_player_related',\n null=True\n )),\n ('created_at', model_utils.fields.AutoCreatedField(\n db_index=True,\n default=django.utils.timezone.now,\n editable=False\n )),\n ('updated_at', model_utils.fields.AutoLastModifiedField(\n db_index=True,\n default=django.utils.timezone.now,\n editable=False\n )),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n","repo_name":"nmfzone/tebak-asik-line-bot","sub_path":"app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24364324262","text":"import torch\nimport pandas as pd\nimport cv2\nfrom prepare_dataset import *\n\n\n# In[138]:\n\n\n\nmodel_type1 = \"DPT_Large\" # MiDaS v3 - Large (highest accuracy, slowest inference speed)\nmodel_type2 = \"DPT_Hybrid\" # MiDaS v3 - Hybrid (medium accuracy, medium inference speed)\n#model_type = \"MiDaS_small\" # MiDaS v2.1 - Small (lowest accuracy, highest inference speed)\n\nmidas1 = torch.hub.load(\"intel-isl/MiDaS\", model_type1)\nmidas2 = torch.hub.load(\"intel-isl/MiDaS\", model_type2)\n\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nmidas1.to(device)\nmidas1.eval()\nmidas2.to(device)\nmidas2.eval()\n\nmidas_transforms = torch.hub.load(\"intel-isl/MiDaS\", \"transforms\")\n\n# if model_type == \"DPT_Large\" or model_type == \"DPT_Hybrid\":\ntransform = midas_transforms.dpt_transform\n# else:\n# transform = midas_transforms.small_transform\n\n\ndata = pd.read_csv('/hdd/data/bmai_clean/full_cambodge_data.csv')\npaths = [\"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/01010619354/01010619354_S_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/03031470303/03031470303_S_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/02010101303/02010101303-F_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/02010203324/02010203324-s_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/01010204363/01010204363_S_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/03031151302/03031151302.S_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/02021135314/02021135314-S_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/03020635319/03020635320-S_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/03031581322/03031581322FJP_384.JPG\",\n \"/hdd/data/bmai_clean/cambodge/KH_September2018/KH_/02021556369/2021556369-F_384.JPG\"]\nimgs= [cv2.imread(filename) for filename in paths]\n\nwith torch.no_grad():\n for i in range(len(imgs)):\n img = imgs[i].copy()\n input_batch = transform(img).to(device)\n prediction1 = midas1(input_batch)\n prediction2 = midas2(input_batch)\n \n prediction1 = torch.nn.functional.interpolate(\n prediction1.unsqueeze(1),\n size=img.shape[:2],\n mode=\"bicubic\",\n align_corners=False,\n ).squeeze()\n \n prediction2 = torch.nn.functional.interpolate(\n prediction2.unsqueeze(1),\n size=img.shape[:2],\n mode=\"bicubic\",\n align_corners=False,\n ).squeeze()\n\n output1 = prediction1.cpu().numpy()\n output2 = prediction2.cpu().numpy()\n \n cv2.imwrite(f'midas_{model_type1}_{i}.JPG',output1)\n cv2.imwrite(f'midas_{model_type2}_{i}.JPG',output2)\n \n","repo_name":"benhaj/bmai_ahmed","sub_path":"Midas_depths_predictions.py","file_name":"Midas_depths_predictions.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39245191629","text":"from exif import Image\n\nwith open(\"img4.jpg\", \"rb\") as palm_1_file:\n palm_1_image = Image(palm_1_file)\n#with open(\"img3.jpg\", \"rb\") as palm_2_file:\n# palm_2_image = Image(palm_2_file)\n\n\nimages = [ palm_1_image]\n\nfor index, image in enumerate(images):\n if image.has_exif:\n status = f\"contains EXIF (version {image.exif_version}) information.\"\n else:\n status = \"does not contain any EXIF information.\"\n print(image._has_exif)\n print(f\"Image {index} {status}\")\n","repo_name":"IUDA194/A-bot","sub_path":"cndgmetadate.py","file_name":"cndgmetadate.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6490974348","text":"#!/usr/bin/python3\nfrom kivy.base import runTouchApp\nfrom kivy.uix.label import Label\nfrom kivy.graphics import Color, Rectangle\nfrom kivy.lang import Builder\nfrom kivy.core.window import Window\n\nclass HighlightLabel(Label):\n def __init__(self, **kwargs):\n super(HighlightLabel, self).__init__(**kwargs)\n Window.bind(mouse_pos=self.on_mouse_pos)\n self._instructions = []\n\n def on_mouse_pos(self, *largs):\n pos = self.to_widget(*largs[1])\n if self.collide_point(*pos):\n tx, ty = pos\n tx -= self.center_x - self.texture_size[0] / 2.\n ty -= self.center_y - self.texture_size[1] / 2.\n ty = self.texture_size[1] - ty\n for uid, zones in self.refs.items():\n for zone in zones:\n x, y, w, h = zone\n if x <= tx <= w and y <= ty <= h:\n self._highlight_ref(uid)\n return\n self._clear_instructions()\n\n def _highlight_ref(self, name):\n if self._instructions:\n return\n store = self._instructions.append\n for box in self.refs[name]:\n box_x = self.center_x - self.texture_size[0] * 0.5 + box[0]\n box_y = self.center_y + self.texture_size[1] * 0.5 - box[1]\n box_w = box[2] - box[0]\n box_h = box[1] - box[3]\n with self.canvas:\n store(Color(0, 1, 0, 0.25))\n store(Rectangle(\n pos=(box_x, box_y), size=(box_w, box_h)))\n\n def _clear_instructions(self):\n rm = self.canvas.remove\n for instr in self._instructions:\n rm(instr)\n self._instructions = []\n\nKV = '''\nHighlightLabel:\n markup: True\n text_size: self.size\n valign: 'middle'\n id: lbl\n text:\n ('[ref=headline]This is a headline![/ref] here is some'\n 'more text and [ref=overview]here is an overview... '\n 'Obviously this is just a test[/ref].')\n'''\n\nrunTouchApp(Builder.load_string(KV))\n","repo_name":"Exodus111/Projects","sub_path":"Games/Story-RPG/Original/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20915335253","text":"from django.http import HttpResponse, JsonResponse, HttpResponseForbidden\r\nfrom django.contrib.auth import authenticate, login\r\nfrom django.db import IntegrityError\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nimport json\r\nfrom ina_api.models import *\r\nfrom django.views.decorators.http import require_http_methods\r\nfrom rest_framework.decorators import api_view\r\nfrom django.core import serializers\r\nimport mimetypes\r\n\r\n@require_http_methods(['GET'])\r\ndef getProjectFollowedById(request, id):\r\n try:\r\n projectFollowedObject = Project_Followed.objects.get(pk=id)\r\n userObject = projectFollowedObject.user.__repr__()\r\n projectObject = projectFollowedObject.project.__repr__()\r\n projectFollowedJson = serializers.serialize('json', [ projectFollowedObject, ])\r\n return JsonResponse({\"bool\": True, \"msg\": \"ProjectFollowed bestaat\", \"projectFollowed\": projectFollowedJson, \"user\": userObject, \"project\": projectObject}, safe=True)\r\n except ObjectDoesNotExist:\r\n return JsonResponse({\"bool\": False, \"msg\": \"ProjectFollowed bestaat niet\"}, safe=True)\r\n\r\n@require_http_methods(['POST'])\r\n@api_view(['POST'])\r\ndef followProjectById(request):\r\n data = json.loads(request.body.decode('utf8'))\r\n projectId = data['id']\r\n userId = data['userId']\r\n try:\r\n projectObject = Project.objects.get(pk=projectId)\r\n userObject = User.objects.get(pk=userId)\r\n if Project_Followed.objects.filter(project=projectObject).exists():\r\n return JsonResponse({\"bool\": False, \"msg\": \"Je volgt al dit project\"}, safe=True)\r\n else:\r\n projectFollowed = Project_Followed(project=projectObject, user=userObject)\r\n projectFollowed.save()\r\n projectObject.follower_count += 1\r\n projectObject.save()\r\n followerCount = projectObject.follower_count\r\n return JsonResponse({\"bool\": True, \"msg\": \"Je volgt nu het project\", \"followerCount\": followerCount}, safe=True)\r\n except:\r\n return JsonResponse({\"bool\": False, \"msg\": \"volgen is mislukt\"}, safe=True)\r\n\r\n@require_http_methods(['GET'])\r\n@api_view(['GET'])\r\ndef checkIfFollowed(request, userId, projectId):\r\n try:\r\n try:\r\n object = Project_Followed.objects.get(user=User.objects.get(pk=userId), project=Project.objects.get(pk=projectId))\r\n return JsonResponse({\"bool\": True, \"msg\": \"Deze gebruiker volgt dit project wel\", \"followed\": True, \"canNotificate\": object.canNotificate})\r\n except ObjectDoesNotExist:\r\n return JsonResponse({\"bool\": True, \"msg\": \"Deze gebruiker volgt dit project niet\", \"followed\": False})\r\n except:\r\n return JsonResponse({\"bool\": False, \"msg\": \"Er is iets misgegaan\"})\r\n\r\n@require_http_methods(['POST'])\r\n@api_view(['POST'])\r\ndef setCanNotificate(request):\r\n data = json.loads(request.body.decode('utf8'))\r\n try:\r\n projectFollowedObject = Project_Followed.objects.get(user=User.objects.get(pk=data['userId']), project=Project.objects.get(pk=data['projectId']))\r\n projectFollowedObject.canNotificate = data['canNotificate']\r\n projectFollowedObject.save()\r\n return JsonResponse({\"bool\": True, \"msg\": \"Notificatie instellingen aangepast\"})\r\n except Exception as e:\r\n print(e)\r\n return JsonResponse({\"bool\": False, \"msg\": \"Kon notificatie niet instellen\"})\r\n\r\n@require_http_methods(['GET'])\r\n@api_view(['GET'])\r\ndef checkIfProjectFollowed(request, projectId, userId):\r\n try:\r\n if Project_Followed.objects.filter(project=Project.objects.get(pk=projectId), user=User.objects.get(pk=userId)).exists():\r\n return JsonResponse({\"bool\": True, \"msg\": \"Project wordt al gevolgd door deze gebruiker\", \"followed\": True})\r\n else:\r\n return JsonResponse({\"bool\": True, \"msg\": \"Project wordt nog niet gevolgd door deze gebruiker\", \"followed\": False})\r\n except Exception as e:\r\n print(e)\r\n return JsonResponse({\"bool\": False, \"msg\": \"Er ging iets mis\"})\r\n\r\n\r\n@require_http_methods(['POST'])\r\n@api_view(['POST'])\r\ndef unfollowProjectById(request):\r\n data = json.loads(request.body.decode('utf8'))\r\n projectId = data['id']\r\n userId = data['userId']\r\n try:\r\n projectObject = Project.objects.get(pk=projectId)\r\n userObject = User.objects.get(pk=userId)\r\n if Project_Followed.objects.filter(project=projectObject).exists():\r\n Project_Followed.objects.get(project=projectObject, user=userObject).delete()\r\n projectObject.follower_count -= 1\r\n projectObject.save()\r\n follower_count = projectObject.follower_count\r\n return JsonResponse({\"bool\": True, \"msg\": \"Project succesvol ontvolgd\", \"followerCount\": follower_count})\r\n else:\r\n return JsonResponse({\"bool\": True, \"msg\": \"Je volgt het project nog niet\"})\r\n except Exception as e:\r\n print(e)\r\n return JsonResponse({\"bool\": False, \"msg\": \"Het is niet gelukt om te ontvolgen\"})\r\n","repo_name":"Optimum-Software/ina_backend","sub_path":"ina_api/controllers/ProjectFollowedController.py","file_name":"ProjectFollowedController.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408852595","text":"import urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\nimport pprint\nimport requests\n\n\nclass MBTAZIP(dml.Algorithm):\n contributor = 'maulikjs'\n reads = ['maulikjs.MBTAstops']\n writes = ['maulikjs.MBTAstopsZIP']\n\n @staticmethod\n def execute(trial = False):\n '''Retrieve some data sets (not using the API here for the sake of simplicity).'''\n startTime = datetime.datetime.now()\n distMBTA = []\n myDict = {}\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('maulikjs', 'maulikjs')\n mbta = repo['maulikjs.MBTAstops']\n\n for key in mbta.find():\n \n\n try:\n myDict['stop_id'] = key['stop_id']\n url = 'https://maps.googleapis.com/maps/api/geocode/json?latlng='+str(key['latitude'])+','+str(key['longitude'])+'&result_type=postal_code&key='+dml.auth['services']['Google']['key']\n \n resp = requests.get(url).json()\n zipc = resp['results'][0]['address_components'][0]['long_name']\n\n myDict['zip']=zipc\n \n\n except:\n continue\n\n n=n+1\n if n%1000==0:\n print(n)\n\n distMBTA.append(myDict.copy())\n \n\n\n\n url = 'https://cs-people.bu.edu/maulikjs/data/zip-persqft-zillow.json'\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n r = json.loads(response)\n s = json.dumps(r, sort_keys=True, indent=2)\n repo.dropCollection(\"maulikjs.MBTAstopsZIP\")\n repo.createCollection(\"maulikjs.MBTAstopsZIP\")\n repo['maulikjs.MBTAstopsZIP'].insert_many(distMBTA)\n repo['maulikjs.MBTAstopsZIP'].metadata({'complete':True})\n print(repo['maulikjs.MBTAstopsZIP'].metadata())\n\n # while True:\n\n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\":startTime, \"end\":endTime}\n \n \n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('maulikjs', 'maulikjs')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/maulikjs#') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/maulikjs#') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('gog','https://maps.googleapis.com/maps/api')\n\n this_script = doc.agent('alg:MBTAZIP', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n resource = doc.entity('dat:MBTAstops', {'prov:label':'MBTA Stops', prov.model.PROV_TYPE:'ont:DataResource'})\n resource2 = doc.entity('gog:geocode', {'prov:label':'Google Geocode API', prov.model.PROV_TYPE:'ont:DataResource'})\n get_prices = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n \n doc.wasAssociatedWith(get_prices, this_script)\n\n doc.usage(get_prices, resource, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval'})\n \n doc.usage(get_prices, resource2, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval',\n 'ont:Query':'?latlng=$&result_type=postal_code&key=$'\n }\n )\n \n prices = doc.entity('dat:MBTAstopsZIP', {prov.model.PROV_LABEL:'MBTA stops by zipcode', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAttributedTo(prices, this_script)\n doc.wasGeneratedBy(prices, get_prices, endTime)\n doc.wasDerivedFrom(prices, resource, get_prices, get_prices, get_prices)\n doc.wasDerivedFrom(prices, resource2, get_prices, get_prices, get_prices)\n\n \n repo.logout()\n \n return doc\n\nMBTAZIP.execute()\ndoc = MBTAZIP.provenance()\nprint(doc.get_provn())\nprint(json.dumps(json.loads(doc.serialize()), indent=4))\n\n# eof\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"maulikjs/MBTAZIP.py","file_name":"MBTAZIP.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18471352025","text":"# ========== SELECTION SORT ===============\nimport random\n\n# Inicializace generatoru pseudonahodnych cisel\n# Sekvence bude stejna i po nekolika spustenich\n# Zkuste zmenit cislo pro generovani jine sekvence\nrandom.seed(3) \n\nour_list = []\n#-----------------------------------\n# generuji 5 nahodnych cisel a ukladam do listu\nfor i in range(5):\n our_list.append(random.randint(1, 10))\nprint(\"List before sorting:\", our_list)\n# ----------------------------------\n\n# Selection sort implementation begin =======\n\"\"\" Selection sort najde minimum a da ho dopredu. Ulohu si rozlozime na nekolik podcasti:\n 1. Najdi minimum\n 2. Dej minimum dopredu\n 3. Opakuj na neserazene casti listu.\n \n Abychom vedeli, kde je neserazena cast listu, potrebujeme si ukladat aktualni index nejmensiho cisla (promenna index).\n V tomto pripade seradime prvky sestupne. Pro vzestupne serazeni najdeme misto minima, maximum.\n \"\"\"\n\nlist_length = len(our_list)\nsmallest_element = our_list[0] # zacni na prvnim elementu v listu\nindex = 0 # index nejmensiho elementu\n\n# Prochazime od 0 do delky listu\nfor elem in range(list_length):\n smallest_element = our_list[elem] # potrebujeme se vratit pouze k casti, ktera je neserazena\n index = elem # prenastaveni indexu nejmensiho cisla\n \n # Nachazeni minima\n # Jdeme od elem do delky listu, jelikoz potrebujeme jit jen od neserazene casti do konce\n for i in range(elem, list_length):\n # pokud najdeme mensi cislo, tak ho prenastavime \n if our_list[i] < smallest_element:\n smallest_element = our_list[i]\n index = i # prenastavujeme index nejmensiho cisla\n \n \n our_list.pop(index) # odstranim z naseho listu nejmensi prvek\n our_list.insert(0,smallest_element) # dam ho na zacatek pole\n\n # pote, co najdeme nove minimum a dame ho na na zacatek pole, spusti se novy beh vnejsiho for cyklu (posuneme se dale v listu)\n # nove minimum nachazime pouze v neserazene oblasti, protoze ve vnitrnim cyklu jdeme od elem do delky listu a ne od zacatku\n \nprint(\"List after sorting:\", our_list)\n","repo_name":"hanigue/Bootcamp","sub_path":"selection_sit.py","file_name":"selection_sit.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43451281070","text":"import collections\nfrom collections import abc\n\nmy_dict = {}\n\nprint(isinstance(my_dict, abc.Mapping))\nprint(isinstance(my_dict, abc.MutableMapping))\n\n# dict comprehensions\ndial_codes = [(880, 'Bangladesh'), (55, 'Brazil')]\n\n# dict comprehension by looping and tuple unpacking\ncountry_dial = {country: code for code, country in dial_codes}\n\n# get values, set default if key missing\nprint(country_dial.get(3, {'country': 'USA'}))\n\n# setDefault example\nindex = {}\n\nfor x in range(10):\n index.setdefault(x, []).append(f'{x} index')\n\nprint(index)\n\n# alternative to setDefault\nkey = 0\nif key not in index:\n index[key] = []\n index[key].append(f'{x} index')\n\nprint(index)\n\n# using defaultdict to set default empty list and avoid KeyError\n\nindex = collections.defaultdict(list)\nfor x in range(10):\n index[x].append(f'{x} index')\n\nprint(index)\n\n# using defaultdict to set default empty tuple and avoid KeyError\n\nindex = collections.defaultdict(tuple)\nfor x in range(10):\n index[x] = (f'{x} index')\n\nprint(len(index))\n","repo_name":"ernestoaparicio/fluent-python-practice","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14767342948","text":"\"\"\"\ntest URL Configuration for juntagrico_billing development\n\"\"\"\nfrom django.urls import include, re_path\nfrom django.contrib import admin\nimport juntagrico\n\nurlpatterns = [\n re_path(r'^admin/', admin.site.urls),\n re_path(r'^impersonate/', include('impersonate.urls')),\n re_path(r'^', include('juntagrico.urls')),\n re_path(r'^', include('juntagrico_billing.urls')),\n re_path(r'^$', juntagrico.views.home),\n]\n","repo_name":"juntagrico/juntagrico-billing","sub_path":"testurls.py","file_name":"testurls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74124346435","text":"\"\"\"General Exploratory Dashboard Interface.\"\"\"\n# PLANNED: Generalize and move to Dash_Charts\n\nimport re\nimport time\nfrom datetime import datetime\n\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nimport plotly.express as px\nfrom dash.exceptions import PreventUpdate\nfrom dash_charts.components import dropdown_group, opts_dd\nfrom dash_charts.modules_datatable import ModuleFilteredTable\nfrom dash_charts.utils_app_modules import DataCache\nfrom dash_charts.utils_app_with_navigation import AppWithTabs\nfrom dash_charts.utils_callbacks import map_args, map_outputs\nfrom icecream import ic\n\nfrom .app_tabs import InstructionsTab, TabIris, TabTip\nfrom .upload_module import UploadModule\n\n# add popup module with datatable vertically so that all data fits (add column that says \"more\" with button that will\n# open and populate the modal)\n\n# Create Data Module\n# > See dbc form: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/input/\n# (*) [1/2-storage] Create single SQLite DB file to hold upload dataframes\n# > For each upload file, create table with filename and dump user uploaded data (use dataset for SQL management\n# and to check if tables exists already - may need pandas to load CSV/JSON data first before dumping into SQL\n# with dataset)\n# > Create main table that stores meta information on each table - date added, source filename (with full path?),\n# (last accessed?), etc.\n# > Add dcc.Input to select SQL table and table to show raw data (use dash_charts table class)\n\n# ( ) [2/2-storage] Create user-specific SQLite DB file f'{username}.db'. Have user enter username, which could be\n# stored in URL or in dcc.Store(). This defines which SQLite database is loaded\n\n# ( ) Edge Cases\n# > Allow user to clear data (either specific tables or all database)\n# > show table of raw data (handle really wide tables - clip columns?)\n# > If uploading a file with the same name - offer to clear already uploaded data or if a new name should be\n# entered (prompt user for input suggesting \"-1\"/\"-2\"/etc)\n\n# ( ) Allow selecting of the datatable from SQL, then update the inputs so that the selected data can be charted in the\n# px charts (like the demo px.Tips data is shown now) - need to get column names, update dataframe in tab, etc.\n\n\n# ( ) Visualization\n# > Create Dash_Charts table to filter by streaming platform, category, and rating\n# > Create custom views. Could be good to see distribution of scores for an anime and where my score falls\n\n# Other notes\n# > PLANNED: Make sure the tab navigation fits within the boundary - currently blocks any rows above/below the tabs if\n# not the only thing in the app\n# > PLANNED: Make the tabs and chart compact as well when the compact argument is set to True\n# > PLANNED: select chart types from type and generate multiple charts on same page?\n# > Need way to store state of charts when switching tabs...\n# > methods for conerting to and from `dcc.Store`: df_to_store() // store_to_df()\n# > Maybe popover will be useful for describing inputs?\n# https://dash-bootstrap-components.opensource.faculty.ai/docs/components/popover/\n# > Maybe auto-delete data after 30 days of inactivity?\n#\n# > ex_px variation - notes:\n# > Along bottom of screen - file select - give keyword name to select from dropdowns for each px app/tab\n# > This way multiple data sets can be loaded in PD DataFrames\n# > In dropdown option of default or loaded data\n# > Data should be tidy, then regular dropdown can be used\n# > Should show table with data below input\n# > Pdoc3 search? {May not work for local docs, but checkout how pdoc does it's pdoc:\n# https://github.com/pdoc3/pdoc/blob/master/doc/pdoc_template/}\n\n\n# PLANNED: HOIST TO DASH_CHARTS\ndef get_triggered_id():\n \"\"\"Use Dash context to get the id of the input element that triggered the callback.\n\n See advanced callbacks: https://dash.plotly.com/advanced-callbacks\n\n Returns:\n str: id of the input that triggered the callback\n\n Raises:\n PreventUpdate: if callback was fired without an input\n\n \"\"\"\n ctx = dash.callback_context\n if not ctx.triggered:\n raise PreventUpdate\n\n prop_id = ctx.triggered[0]['prop_id'] # in format: `id.key` where we only want the `id`\n return re.search(r'(^.+)\\.[^\\.]+$', prop_id).group(1)\n\n\nclass KitsuExplorer(AppWithTabs): # noqa: H601\n \"\"\"Kitsu User Dataset Explorer Plotly/Dash Application.\"\"\"\n\n name = 'KitsuExplorer'\n\n external_stylesheets = TabTip.external_stylesheets\n\n tabs_location = 'right'\n \"\"\"Tab orientation setting. One of `(left, top, bottom, right)`.\"\"\"\n\n tabs_margin = '110px'\n \"\"\"Adjust this setting based on the width or height of the tabs to prevent the content from overlapping the tabs.\"\"\"\n\n tabs_compact = True\n \"\"\"Boolean setting to toggle between a padded tab layout if False and a minimal compact version if True.\"\"\"\n\n id_modal = 'modal'\n \"\"\"Unique name for the pop-up modal.\"\"\"\n\n id_modal_close = 'modal-close'\n \"\"\"Unique name for the close button of the pop-up modal.\"\"\"\n\n id_wip_button = 'button-wip'\n \"\"\"Placeholder button ID for testing.\"\"\"\n\n mod_table = ModuleFilteredTable('filtered_table')\n \"\"\"Main table module (DataTable).\"\"\"\n\n mod_cache = DataCache('user_session')\n \"\"\"Data cache module for storing session information.\"\"\"\n\n mod_upload = UploadModule('main_upload')\n \"\"\"Upload module for loading data.\"\"\"\n\n def initialization(self):\n \"\"\"Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.\"\"\"\n super().initialization()\n self.register_uniq_ids([self.id_modal, self.id_modal_close, self.id_wip_button])\n\n # Register modules\n self.modules = [self.mod_table, self.mod_cache, self.mod_upload]\n\n def define_nav_elements(self):\n \"\"\"Return list of initialized tabs.\n\n Returns:\n list: each item is an initialized tab (ex `[AppBase(self.app)]` in the order each tab is rendered\n\n \"\"\"\n return [\n TabTip(app=self.app),\n TabIris(app=self.app),\n InstructionsTab(app=self.app),\n ]\n\n def return_layout(self):\n \"\"\"Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n \"\"\"\n return html.Div([\n dbc.Row([dbc.Col([\n html.H3('Kitsu Library Explorer', style={'padding': '10px 0 0 10px'}),\n dbc.Button('Primary', color='primary', className='mr-1', id=self.ids[self.id_wip_button]),\n super().return_layout(),\n ])], style={'margin': 0, 'padding': 0}),\n html.Hr(),\n dbc.Row([dbc.Col([\n self.mod_upload.return_layout(self.ids),\n self.mod_cache.return_layout(self.ids),\n ])], style={'maxWidth': '90%', 'paddingLeft': '5%'}),\n\n dbc.Row([dbc.Col([\n html.H2('Data Interaction'),\n html.P(' FIXME: Needs dropdown to select table_name. Filtered data should be applied to px chart'),\n self.mod_table.return_layout(self.ids, px.data.gapminder()),\n ])], style={'maxWidth': '90%', 'paddingLeft': '5%'}),\n\n dbc.Modal([\n dbc.ModalHeader('Module Header'),\n dbc.ModalBody('Module body text'),\n dbc.ModalFooter(dbc.Button('Close', id=self.ids[self.id_modal_close], className='ml-auto')),\n ], backdrop='static', centered=True, id=self.ids[self.id_modal]),\n ])\n\n def create_callbacks(self):\n \"\"\"Create Dash callbacks.\"\"\"\n super().create_callbacks()\n self.register_modal_handler()\n\n def register_modal_handler(self):\n \"\"\"Handle opening and closing the modal.\"\"\"\n outputs = [(self.id_modal, 'is_open'), (self.mod_cache.get(self.mod_cache.id_cache), 'data')]\n inputs = [(self.id_wip_button, 'n_clicks'), (self.id_modal_close, 'n_clicks')]\n states = [(self.mod_cache.get(self.mod_cache.id_cache), 'data')]\n\n @self.callback(outputs, inputs, states)\n def modal_handler(*raw_args):\n a_in, a_state = map_args(raw_args, inputs, states)\n data = a_state[self.mod_cache.get(self.mod_cache.id_cache)]['data']\n if data is None:\n data = {}\n data['username'] = 'username' # FIXME: Get username from input (pt. 2)\n\n # Return False (close) only if the close button was clicked\n button_id = get_triggered_id()\n return [button_id != self.ids[self.id_modal_close], data]\n","repo_name":"KyleKing/Kitsu_Lib","sub_path":"kitsu_lib/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"5493046147","text":"from tokenize import String\nimport numpy as np\nfrom typing import Tuple, List\nimport constants\nfrom utils import pizza_calculations\nimport math\nimport random\n\n#constants\nBUFFER = 0.001\n\nclass Player:\n def __init__(self, num_toppings, rng: np.random.Generator) -> None:\n \"\"\"Initialise the player\"\"\"\n self.rng = rng\n self.num_toppings = num_toppings\n self.multiplier=40\t# Pizza radius = 6*multiplier units\n self.xCenter = 12*self.multiplier\t# Center Point x of pizza\n self.yCenter = 10*self.multiplier\t# Center Point y of pizza\n self.calculator = pizza_calculations()\n self.preferencesDict = {}\n self.lineCount = 0\n self.circleCount = 0\n self.radioCount = 0 \n\n def customer_gen(self, num_cust, rng = None):\n\n \"\"\"Function in which we create a distribution of customer preferences\n\n Args:\n num_cust(int) : the total number of customer preferences you need to create\n rng(numpy generator object) : A random seed that you can use to generate your customers. You can choose to not pass this, in that case the seed taken will be self.rng\n\n Returns:\n preferences_total(list) : List of size [num_cust, 2, num_toppings], having all generated customer preferences\n \"\"\"\n\n preferences_total = []\n\n if rng is None:\n # standard norm distribution has mean 0 and variance 1\n mean_vector = np.zeros(self.num_toppings)\n # np.eye = Return a 2-D array with ones on the diagonal and zeros elsewhere.\n covariance_matrix = np.eye(self.num_toppings)\n\n for i in range(num_cust):\n # customer 1\n preferences1 = self.rng.multivariate_normal(mean_vector, covariance_matrix)\n # clip to ensure non-negative values\n preferences1 = np.clip(preferences1, 0, 1)\n # Add a small constant to avoid null arrays after clipping\n preferences1 += 1e-10\n # scale to sum to 12\n preferences1 *= 12 / np.sum(preferences1)\n\n # customer 2\n preferences2 = self.rng.multivariate_normal(mean_vector, covariance_matrix)\n preferences2 = np.clip(preferences2, 0, 1)\n preferences2 += 1e-10\n preferences2 *= 12 / np.sum(preferences2)\n\n preferences = [preferences1, preferences2]\n preferences_total.append(preferences)\n\n else:\n # standard norm distribution has mean 0 and variance 1\n mean_vector = np.zeros(self.num_toppings)\n # np.eye = Return a 2-D array with ones on the diagonal and zeros elsewhere.\n covariance_matrix = np.eye(self.num_toppings)\n\n for i in range(num_cust):\n # customer 1\n preferences1 = rng.multivariate_normal(mean_vector, covariance_matrix)\n # clip to ensure non-negative values\n preferences1 = np.clip(preferences1, 0, 1)\n # Add a small constant to avoid null arrays after clipping\n preferences1 += 1e-10\n # scale to sum to 12\n preferences1 *= 12 / np.sum(preferences1)\n\n # customer 2\n preferences2 = rng.multivariate_normal(mean_vector, covariance_matrix)\n preferences2 = np.clip(preferences2, 0, 1)\n preferences2 += 1e-10\n preferences2 *= 12 / np.sum(preferences2)\n\n preferences = [preferences1, preferences2]\n\n # print(f'PREFERENCES: {preferences}')\n # print(f'PREFERENCES SUM: {np.sum(preferences)}') # should sum to 24\n\n preferences_total.append(preferences)\n\n return preferences_total\n def circle_topping_2(self, preferences):\n \"\"\"\n Return 1 pizza of 24 toppings in a circle, split horizontally\n \"\"\"\n radius = BUFFER + 0.375 / np.sin(np.pi / 24)\n theta = np.pi / 24\n angle = 0 # use np.pi/2 to make a vertical half\n pizza = [\n [\n radius * np.cos(angle + (2 * i + 1) * theta),\n radius * np.sin(angle + (2 * i + 1) * theta),\n 1 + i // 12\n ]\n for i in range(24)\n ]\n return pizza\n\n def circle_topping_3_v1(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically.\n\n Inner circle: 1\n Outer circle: 2, 3\n \"\"\"\n # toppings 1 (num toppings/n = 24/4 = 6 per topping)\n inner_indices = [1] * 8 + [2] * 8\n # toppings 2 and 3\n outer_indices = [3] * 8\n\n return self.circle_topping_3(preferences, inner_indices, outer_indices)\n\n def circle_topping_3_v2(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically.\n\n Inner circle: 1\n Outer circle: 2, 3\n \"\"\"\n # toppings 1 (num toppings/n = 24/4 = 6 per topping)\n inner_indices = [1] * 8 + [3] * 8\n # toppings 2 and 3\n outer_indices = [2] * 8\n\n return self.circle_topping_3(preferences, inner_indices, outer_indices)\n\n def circle_topping_3_v3(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically.\n\n Inner circle: 1\n Outer circle: 2, 3\n \"\"\"\n # toppings 1 (num toppings/n = 24/4 = 6 per topping)\n inner_indices = [2] * 8 + [3] * 8\n # toppings 2 and 3\n outer_indices = [1] * 8\n\n return self.circle_topping_3(preferences, inner_indices, outer_indices)\n\n def circle_topping_3(self, preferences, inner_indices, outer_indices):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 6 in an outer arc\n \"\"\"\n\n theta = np.pi / 16\n inner_radius = BUFFER + 0.375 / np.sin(theta)\n outer_radius = BUFFER + 0.375 / np.sin(theta / 2)\n\n outer_angle = np.pi / 2\n\n inner = [\n [\n inner_radius * np.cos((2 * i + 1) * theta),\n inner_radius * np.sin((2 * i + 1) * theta),\n inner_indices[i]\n ]\n for i in range(16)\n ]\n outer = [\n [\n outer_radius * np.cos(outer_angle + (2 * i + 1) * theta),\n outer_radius * np.sin(outer_angle + (2 * i + 1) * theta),\n outer_indices[i]\n ]\n for i in range(8)\n ]\n pizza = inner + outer\n return pizza\n\n def circle_topping_4_v1(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically.\n\n Inner circle: 1, 2\n Outer circle: 3, 4\n \"\"\"\n # toppings 1 and 2 (num toppings/n = 24/4 = 6 per topping)\n inner_indices = [1] * 6 + [2] * 6\n # toppings 3 and 4\n outer_indices = [3] * 6 + [4] * 6\n\n return self.circle_topping_4(preferences, inner_indices, outer_indices)\n\n def circle_topping_4_v2(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically\n\n Inner circle: 3, 4\n Outer circle: 1, 2\n \"\"\"\n inner_indices = [3] * 6 + [4] * 6\n outer_indices = [1] * 6 + [2] * 6\n\n return self.circle_topping_4(preferences, inner_indices, outer_indices)\n\n def circle_topping_4_v3(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically\n\n Inner circle: 1, 3\n Outer circle: 2, 4\n \"\"\"\n inner_indices = [1] * 6 + [3] * 6\n outer_indices = [2] * 6 + [4] * 6\n\n return self.circle_topping_4(preferences, inner_indices, outer_indices)\n\n def circle_topping_4_v4(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically\n\n Inner circle: 2, 4\n Outer circle: 1, 3\n \"\"\"\n inner_indices = [2] * 6 + [4] * 6\n outer_indices = [1] * 6 + [3] * 6\n\n return self.circle_topping_4(preferences, inner_indices, outer_indices)\n\n def circle_topping_4_v5(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically\n\n Inner circle: 3, 2\n Outer circle: 4, 1\n \"\"\"\n inner_indices = [3] * 6 + [2] * 6\n outer_indices = [4] * 6 + [1] * 6\n\n return self.circle_topping_4(preferences, inner_indices, outer_indices)\n\n def circle_topping_4_v6(self, preferences):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically\n\n Inner circle: 4, 1\n Outer circle: 3, 2\n \"\"\"\n inner_indices = [4] * 6 + [1] * 6\n outer_indices = [3] * 6 + [2] * 6\n\n return self.circle_topping_4(preferences, inner_indices, outer_indices)\n\n def circle_topping_4(self, preferences, inner_indices, outer_indices):\n \"\"\"\n Return 1 pizza of 12 toppings in an inner circle, split horizontally,\n and 12 in an outer circle, split vertically\n \"\"\"\n\n theta = np.pi / 12\n inner_radius = BUFFER + 0.375 / np.sin(theta)\n outer_radius = BUFFER + 0.375 / np.sin(theta / 2)\n\n outer_angle = np.pi / 2\n\n inner = [\n [\n inner_radius * np.cos((2 * i + 1) * theta),\n inner_radius * np.sin((2 * i + 1) * theta),\n inner_indices[i]\n ]\n for i in range(12)\n ]\n outer = [\n [\n outer_radius * np.cos(outer_angle + (2 * i + 1) * theta),\n outer_radius * np.sin(outer_angle + (2 * i + 1) * theta),\n outer_indices[i]\n ]\n for i in range(12)\n ]\n pizza = inner + outer\n return pizza\n def radio_topping_3(self, preference):\n numbers = [1, 2, 3] #for ids \n random.shuffle(numbers)\n pizza = np.zeros((24, 3))\n #start w vertical line \n y = 1\n id = numbers.pop()\n for i in range(4):\n pizza[i][0] = -.38\n pizza[i][1] = y\n pizza[i][2] = id\n y += .76\n y=1\n for i in range(4,8): #add .76 to x \n pizza[i][0] = .38\n pizza[i][1] = y\n pizza[i][2] = id\n y += .76\n #now do our spiral ones \n y = -1 \n x = 1 \n margin = .76/math.sqrt(2)\n x_margin = .76*math.sqrt(3)/2\n y_margin = .38\n mini_margin = .38/math.sqrt(2)\n x_mini_margin = .19\n y_mini_margin = .38*math.sqrt(3)/2\n #start going right \n id = numbers.pop()\n for i in range(8,12):\n \n y_below = y - y_mini_margin \n \n x_below = x - x_mini_margin\n pizza[i][0] = x_below\n pizza[i][1] = y_below\n pizza[i][2] = id\n x += x_margin\n y -= y_margin\n y = -1 \n x = 1\n for i in range(12,16):\n y_above = y + y_mini_margin\n x_above = x + x_mini_margin\n pizza[i][0] = x_above\n pizza[i][1] = y_above\n pizza[i][2] = id\n x += x_margin\n y -= y_margin \n #now going left \n y = -1 \n x = -1 \n id = numbers.pop()\n for i in range(16,20):\n y_below = y - y_mini_margin \n x_below = x + x_mini_margin\n pizza[i][0] = x_below\n pizza[i][1] = y_below\n pizza[i][2] = id\n x -= x_margin\n y -= y_margin \n y = -1 \n x = -1 \n for i in range(20,24):\n y_above = y + y_mini_margin\n x_above = x - x_mini_margin\n pizza[i][0] = x_above\n pizza[i][1] = y_above\n pizza[i][2] = id\n x -= x_margin\n y -= y_margin \n '''for topping in pizza:\n print(\"x position is \" + str(topping[0]))\n print(\"y position is \" + str(topping[1]))\n print(\"id is \" + str(topping[2]))'''\n return pizza\n\n def radio_topping_4(self, preferences):\n #pizza = np.zeros((24, 3))\n numbers = [1, 2, 3,4] #for ids \n random.shuffle(numbers)\n #make these in a line and then going out diagonal \n pizza = np.zeros((24, 3))\n #start in the line up\n y = 1\n id = numbers.pop()\n for i in range(6):\n pizza[i][0] = 0\n pizza[i][1] = y\n pizza[i][2] = id\n y += .76\n #then down \n y = -1\n id = numbers.pop()\n for i in range(6,12):\n pizza[i][0] = 0\n pizza[i][1] = y\n pizza[i][2] = id\n y -= .76\n #then right\n y=0\n x = 1\n id = numbers.pop()\n for i in range(12,18):\n pizza[i][0] = x\n pizza[i][1] = 0\n pizza[i][2] = id\n x += .76\n #then left \n x = -1\n id = numbers.pop()\n for i in range(18,24):\n pizza[i][0] = x\n pizza[i][1] = 0\n pizza[i][2] = id\n x -= .76\n return pizza\n def lines_topping_2(self, preferences):\n # arrange 6 in two lines\n # arrange 4 in clusters\n # honestly for 2 topping the lines may make more sense\n # x_margin = math.sqrt(6**2-4.5**2) #circle geoemetry\n # pizzas = np.zeros((10, 24, 3))\n pizzas = []\n pizza = np.zeros((24, 3))\n x_margin = 1\n # new_y_start_change = (6-math.sqrt(35))/2\n new_y_start_change = .75 * 6\n center_size = .375\n # now lets find the starting point\n x_pos_left = -x_margin - center_size\n x_pos_right = x_margin + center_size\n\n y_start = new_y_start_change\n # loop thru a range of 12 where we place all w x-x_margin\n # have y start at new_y_start_change and go down .75 each time\n # pizza = []\n y = y_start\n # for i in range(24):\n for i in range(12):\n pizza[i][0] = x_pos_left\n pizza[i][1] = y\n pizza[i][2] = 1\n y -= .76 # to move down\n # print(\"x_pos left: \" + str(x_pos_left) + \"and y \" + str(y))\n y = y_start\n for j in range(12, 24):\n pizza[j][0] = x_pos_right\n pizza[j][1] = y\n pizza[j][2] = 2\n y -= .76 # to move down\n # print(\"x_pos right: \" + str(x_pos_right) + \"and y \" + str(y))\n\n '''for topping in pizza:\n print(\"this is x \" + str(topping[0]))\n print(\"this is y \" + str(topping[1]))\n print(\"this is id \" + str(topping[2]))'''\n\n #commenting out if we use random choice of number of each algo per pizza\n # for x in range(10):\n # pizzas.append(pizza)\n\n '''for pizza in pizzas:\n for topping in pizza:\n print(\"this is x \" + str(topping[0]))\n print(\"this is y \" + str(topping[1]))\n print(\"this is id \" + str(topping[2]))'''\n\n print(\"using this function\")\n # return list(pizzas)\n return pizza\n\n # do the same with x+x_margin and toppin id 2\n\n # repeat 10 times to make all 10 pizzas\n\n # return list of pizzas\n\n\n def lines_topping_3(self, preferences):\n numbers = [1, 2, 3]\n random.shuffle(numbers)\n pizzas = []\n pizza = np.zeros((24, 3))\n x_margin = 1.5\n # new_y_start_change = (6-math.sqrt(35))/2\n new_y_start_change = .75 * 4\n center_size = .375\n # now lets find the starting point\n x_pos_left = -x_margin #- center_size\n x_pos_middle = 0#center_size\n x_pos_right = x_margin #+ center_size\n\n y_start = new_y_start_change\n y = y_start\n # for i in range(24):\n id = numbers.pop()\n for i in range(8):\n pizza[i][0] = x_pos_left\n pizza[i][1] = y\n pizza[i][2] = id\n y -= .76 # to move down\n # print(\"x_pos left: \" + str(x_pos_left) + \"and y \" + str(y))\n y = y_start\n id = numbers.pop()\n for j in range(8, 16):\n pizza[j][0] = x_pos_middle\n pizza[j][1] = y\n pizza[j][2] = id\n y -= .76 # to move down\n y = y_start\n id = numbers.pop()\n for k in range(16, 24):\n pizza[k][0] = x_pos_right\n pizza[k][1] = y\n pizza[k][2] = id\n y -= .76 # to move down\n # print(\"x_pos right: \" + str(x_pos_right) + \"and y \" + str(y))\n return pizza\n\n def lines_topping_4(self, preferences):\n pizzas = []\n pizza = np.zeros((24, 3))\n numbers = [1, 2, 3, 4]\n random.shuffle(numbers)\n x_margin = 1.5\n # new_y_start_change = (6-math.sqrt(35))/2\n new_y_start_change = .75 * 3\n center_size = .375\n # now lets find the starting point\n x_pos_left1 = -2.25 - center_size\n x_pos_left2 = -.75 - center_size\n x_pos_right1 = .75 + center_size\n x_pos_right2 = 2.25 + center_size\n\n y_start = new_y_start_change\n y = y_start\n # for i in range(24):\n id = numbers.pop()\n for i in range(6):\n pizza[i][0] = x_pos_left1\n pizza[i][1] = y\n pizza[i][2] = id \n y -= .76 # to move down\n # print(\"x_pos left: \" + str(x_pos_left) + \"and y \" + str(y))\n y = y_start\n id = numbers.pop()\n for j in range(6, 12):\n pizza[j][0] = x_pos_left2\n pizza[j][1] = y\n pizza[j][2] = id\n y -= .76 # to move down\n y = y_start\n id = numbers.pop()\n for k in range(12, 18):\n pizza[k][0] = x_pos_right1\n pizza[k][1] = y\n pizza[k][2] = id\n y -= .76 # to move down\n y = y_start\n id = numbers.pop()\n for l in range(18, 24):\n pizza[l][0] = x_pos_right2\n pizza[l][1] = y\n pizza[l][2] = id\n y -= .76 # to move down\n # print(\"x_pos right: \" + str(x_pos_right) + \"and y \" + str(y))\n return pizza\n\n def list_sum_to_total(self, total, num_values):\n if num_values < 1:\n raise ValueError(\"Number of values must be at least 1.\")\n if num_values == 1:\n return [total]\n\n distribution = []\n\n for _ in range(num_values - 1):\n value = random.randint(0, total)\n distribution.append(value)\n total -= value\n\n distribution.append(total)\n random.shuffle(distribution)\n\n return distribution\n\n #def choose_discard(self, cards: list[str], constraints: list[str]):\n def choose_toppings(self, preferences):\n \"\"\"Function in which we choose position of toppings\n\n Args:\n num_toppings(int) : the total number of different topics chosen among 2, 3 and 4\n preferences(list) : List of size 100*2*num_toppings for 100 generated preference pairs(actual amounts) of customers.\n\n Returns:\n pizzas(list) : List of size [10,24,3], where 10 is the pizza id, 24 is the topping id, innermost list of size 3 is [x coordinate of topping center, y coordinate of topping center, topping number of topping(1/2/3/4) (Note that it starts from 1, not 0)]\n \"\"\"\n\n pizzas = []\n radiusRange = 1\n cutRange = 1\n #initialize pizza types\n if self.num_toppings == 2:\n pizzas.append([self.lines_topping_2(preferences), \"line\", 0])\n pizzas.append([self.circle_topping_2(preferences), \"circle\", 0])\n cutRange = 5\n if self.num_toppings == 3:\n pizzas.append([self.lines_topping_3(preferences), \"line\", 0])\n pizzas.append([self.circle_topping_3_v1(preferences), \"circle1\", 0])\n pizzas.append([self.circle_topping_3_v2(preferences), \"circle2\", 0])\n pizzas.append([self.circle_topping_3_v3(preferences), \"circle3\", 0])\n pizzas.append([self.radio_topping_3(preferences), \"radio\", 0])\n cutRange = 4 \n if self.num_toppings == 4:\n pizzas.append([self.lines_topping_4(preferences), \"line\", 0])\n pizzas.append([self.circle_topping_4_v1(preferences), \"circle1\", 0])\n pizzas.append([self.circle_topping_4_v2(preferences), \"circle2\", 0])\n pizzas.append([self.circle_topping_4_v3(preferences), \"circle3\", 0])\n pizzas.append([self.circle_topping_4_v4(preferences), \"circle4\", 0])\n pizzas.append([self.circle_topping_4_v5(preferences), \"circle5\", 0])\n pizzas.append([self.circle_topping_4_v6(preferences), \"circle6\", 0])\n pizzas.append([self.radio_topping_4(preferences), \"radio\", 0])\n cutRange = 3\n\n for preference in preferences: #every preference\n maximumS = -1000\n maximumCut = [self.xCenter, self.yCenter, np.pi/6, pizzas[0]] \n pizza_id = 0\n for pizzaObj in pizzas: #every pizza type\n pizza = pizzaObj[0] \n pizzaType = pizzaObj[1]\n for radius in range(0, 6, radiusRange): #radius lengths\n for x in range(-radius*cutRange, radius*cutRange): #how many cuts along the circumference\n x = x/cutRange\n for ySign in range(-1, 2, 2): #top / bottom of circle\n y = self.circleCoordinates(x, ySign, radius)\n cut = [x, y, np.pi/6, pizzaType]\n \n xCord = (self.xCenter + x*self.multiplier)\n yCord = (self.yCenter - y*self.multiplier)\n obtained_pref, slice_areas_toppings = self.calculator.ratio_calculator(pizza, [xCord, yCord, np.pi/6], self.num_toppings, self.multiplier, self.xCenter, self.yCenter)\n obtained_pref = np.array(obtained_pref)\n random_pref, temp = self.calculator.ratio_calculator(pizza, [self.xCenter, self.yCenter, self.rng.random()*2*np.pi], self.num_toppings, self.multiplier, self.xCenter, self.yCenter)\n random_pref = np.array(random_pref)\n required_pref = np.array(preference)\n uniform_pref = np.ones((2, self.num_toppings))*(12/self.num_toppings)\n b = np.round(np.absolute(required_pref - uniform_pref), 3)\n c = np.round(np.absolute(obtained_pref - required_pref), 3)\n u = np.round(np.absolute(random_pref - uniform_pref), 3)\n s = (b-c).sum()\n if s > maximumS:\n maximumS = s\n maximumCut = cut\n maximumPizza = pizza_id\n pizza_id += 1\n\n #note: the dict is not in use since every preference is uniquely generated in the topping and choose pizza phases.\n # it hard to match preferences from here to preferences in choosing the pizza \n hashKey = int(preference[0][0] * preference[0][1] * preference[1][0] * preference[1][1])\n self.preferencesDict[hashKey] = maximumCut \n\n #store how many pizzas of each type we are building\n pizzaBuilt = pizzas[maximumPizza][2]\n pizzas[maximumPizza][2] = pizzaBuilt + 1\n\n finalPizzas = []\n for pizzaType in pizzas: #add the pizza types to the final list\n finalPizzas += [pizzaType[0]]*(round(pizzaType[2]/10))\n\n while len(finalPizzas) < 10: #add more pizzas if below 10\n finalPizzas += [pizzas[0][0]]\n finalPizzas = finalPizzas[0:10] #make sure to only use 10 pizzas if over 10 pizzas\n \n return finalPizzas\n #-2\n # if self.num_toppings == 2:\n #\n # # we can use the approach distribution here to decide how many of each approach we want to use\n # # We have 2 algos for now\n # num_runs_per_approach = self.list_sum_to_total(10, 2)\n # print(f'num_runs_per_approach: {num_runs_per_approach}')\n # #\n # # pizzas = []\n # # for i in range(num_runs_per_approach[0]):\n # # pizzas.append(self.lines_topping_2(preferences))\n # # for i in range(num_runs_per_approach[1]):\n # # pizzas.append(self.circle_topping_2(preferences))\n # # return pizzas\n #\n # pizzas = [self.lines_topping_2(preferences)] * 5 + [self.circle_topping_2(preferences)] * 5\n #\n # return pizzas\n #\n #\n # elif self.num_toppings == 3:\n #\n # # num_runs_per_approach = self.list_sum_to_total(10, 4)\n # # print(f'num_runs_per_approach: {num_runs_per_approach}')\n # #\n # # pizzas = []\n # # for i in range(10):\n # # pizzas.append(self.radio_topping_3(preferences))\n # '''for i in range(num_runs_per_approach[0]):\n # pizzas.append(self.circle_topping_3_v1(preferences))\n # for i in range(num_runs_per_approach[1]):\n # pizzas.append(self.circle_topping_3_v2(preferences))\n # for i in range(num_runs_per_approach[2]):\n # pizzas.append(self.circle_topping_3_v3(preferences))\n # for i in range(num_runs_per_approach[3]):\n # pizzas.append(self.lines_topping_3(preferences))'''\n # # for i in range(10):\n # # pizzas.append(self.lines_topping_3(preferences))\n # # return pizzas\n #\n # # 2 circles of each type (6) + 2 lines + 2 radial lines\n # # pizzas = [self.circle_topping_3_v1(preferences)] * 2 + [self.circle_topping_3_v2(preferences)] * 2 + [self.circle_topping_3_v3(preferences)] * 2 + [self.lines_topping_3(preferences)] * 2 + [self.radio_topping_3(preferences)] * 2\n #\n # # return pizzas\n #\n # return [self.circle_topping_3_v1(preferences)] * 10\n #\n # elif self.num_toppings == 4:\n #\n # \"\"\"once we have multiple approaches, we can randomly choose which one to use\n # THEN, we can loop through them for each value in the distribution to generate 10 pizzas\"\"\"\n #\n # # # For now, we have 2 approaches for 4 toppings. need to manually code for however many approaches we have\n # # num_runs_per_approach = self.list_sum_to_total(10, 9)\n # # print(f'num_runs_per_approach: {num_runs_per_approach}')\n # #\n # # pizzas = []\n # #\n # # for i in range(num_runs_per_approach[0]):\n # # pizzas.append(self.circle_topping_4_v1(preferences))\n # # for i in range(num_runs_per_approach[1]):\n # # pizzas.append(self.circle_topping_4_v2(preferences))\n # #\n # # for i in range(num_runs_per_approach[2]):\n # # pizzas.append(self.lines_topping_4(preferences))\n # # for i in range(num_runs_per_approach[3]):\n # # pizzas.append(self.lines_topping_4(preferences))\n # #\n # # for i in range(num_runs_per_approach[4]):\n # # pizzas.append(self.circle_topping_4_v3(preferences))\n # # for i in range(num_runs_per_approach[5]):\n # # pizzas.append(self.circle_topping_4_v4(preferences))\n # # for i in range(num_runs_per_approach[6]):\n # # pizzas.append(self.circle_topping_4_v5(preferences))\n # # for i in range(num_runs_per_approach[7]):\n # # pizzas.append(self.circle_topping_4_v6(preferences))\n # #\n # # for i in range(num_runs_per_approach[8]):\n # # pizzas.append(self.radio_topping_4(preferences))\n # # return pizzas\n #\n #\n # # 1 circle of each type (6) + 2 lines + 2 radial lines\n # pizzas = [self.circle_topping_4_v1(preferences)] * 1 + [self.circle_topping_4_v2(preferences)] * 1 + [self.circle_topping_4_v3(preferences)] * 1 + [self.circle_topping_4_v4(preferences)] * 1 + [self.circle_topping_4_v5(preferences)] * 1 + [self.circle_topping_4_v6(preferences)] * 1 + [self.lines_topping_4(preferences)] * 2 + [self.radio_topping_4(preferences)] * 2\n #\n # return pizzas\n\n #def play(self, cards: list[str], constraints: list[str], state: list[str], territory: list[int]) -> Tuple[int, str]:\n def choose_and_cut(self, pizzas, remaining_pizza_ids, customer_amounts):\n \"\"\"Function which based n current game state returns the distance and angle, the shot must be played\n\n Args:\n pizzas (list): List of size [10,24,3], where 10 is the pizza id, 24 is the topping id, innermost list of size 3 is [x coordinate of topping, y coordinate of topping, topping number of topping(1/2/3/4)]\n remaining_pizza_ids (list): A list of remaining pizza's ids\n customer_amounts (list): The amounts in which the customer wants their pizza\n\n Returns:\n Tuple[int, center, first cut angle]: Return the pizza id you choose, the center of the cut in format [x_coord, y_coord] where both are in inches relative of pizza center of radius 6, the angle of the first cut in radians. \n \"\"\"\n maximumS = -1000\n maximumCut = [self.xCenter, self.yCenter, np.pi/6, remaining_pizza_ids[0]] #default cut\n\n for pizza_id in remaining_pizza_ids:\n pizza = pizzas[pizza_id]\n for radius in range(0, 6): \n for x in range(-radius*5, radius*5):\n x = x/5\n for ySign in range(-1, 2, 2):\n y = self.circleCoordinates(x, ySign, radius)\n cut = [x, y, np.pi/6, pizza_id]\n \n xCord = (self.xCenter + x*self.multiplier)\n yCord = (self.yCenter - y*self.multiplier)\n obtained_pref, slice_areas_toppings = self.calculator.ratio_calculator(pizza, [xCord, yCord, np.pi/6], self.num_toppings, self.multiplier, self.xCenter, self.yCenter)\n obtained_pref = np.array(obtained_pref)\n random_pref, temp = self.calculator.ratio_calculator(pizza, [self.xCenter, self.yCenter, self.rng.random()*2*np.pi], self.num_toppings, self.multiplier, self.xCenter, self.yCenter)\n random_pref = np.array(random_pref)\n required_pref = np.array(customer_amounts)\n uniform_pref = np.ones((2, self.num_toppings))*(12/self.num_toppings)\n b = np.round(np.absolute(required_pref - uniform_pref), 3)\n c = np.round(np.absolute(obtained_pref - required_pref), 3)\n u = np.round(np.absolute(random_pref - uniform_pref), 3)\n s = (b-c).sum()\n if s > maximumS:\n maximumS = s\n maximumCut = cut \n x = maximumCut[0]\n y = maximumCut[1]\n theta = maximumCut[2]\n pizza_id = maximumCut[3]\n return pizza_id, [x,y], theta\n\n #this function will take in an x, sign of y, radius and return the y coordinate from the equation of a circle\n def circleCoordinates(self, x, ySign, radius):\n y = ySign*(math.sqrt((radius**2) - (x**2)))\n return y\n","repo_name":"akshay-022/Pizza-game","sub_path":"players/team_1.py","file_name":"team_1.py","file_ext":"py","file_size_in_byte":32310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12987146744","text":"# 30DaysOfPython Exercises \r\n\r\n\r\nprint(\"...................(\"\"Exercise Level 1 (Q.1)\"\")............................\")\r\n'''What is the most frequent word in the following paragraph? \r\nparagraph = 'I love teaching. If you do not love teaching what else can you love.\r\nI love Python if you do not love something which can give you all the capabilities\r\nto develop an application what else can you love.'''\r\n\r\nparagraph = 'I love teaching. If you do not love teaching what else can you love. I love Python if you do not love something which can give you all the capabilities to develop an application what else can you love.'\r\n\r\n# remove punctuation and convert to lowercase\r\nclean_paragraph = paragraph.lower().replace('.', '').replace(',', '')\r\n\r\n# split paragraph into individual words\r\nwords = clean_paragraph.split()\r\n\r\n# count the occurrences of each word\r\nword_counts = {}\r\nfor word in words:\r\n if word in word_counts:\r\n word_counts[word] += 1\r\n else:\r\n word_counts[word] = 1\r\n\r\n# find the word with the highest count\r\nmost_frequent_word = max(word_counts, key=word_counts.get)\r\n\r\nprint(\"The most frequesnt word is:\", most_frequent_word)\r\n\r\nprint(\"...................(\"\"Exercise Level 1 (Q.2)\"\")............................\")\r\n\r\n'''The position of some particles on the horizontal x-axis are -12, -4, -3 and -1 \r\nin the negative direction, 0 at origin, 4 and 8 in the positive direction. \r\nExtract these numbers from this whole text and find the distance between the \r\ntwo furthest particles. points = ['-1', '2', '-4', '-3', '-1', '0', '4', '8']\r\nsorted_points = [-4, -3, -1, -1, 0, 2, 4, 8]\r\ndistance = 8 -(-4) # 12.'''\r\n\r\n\r\ntext = \"The position of some particles on the horizontal x-axis are -12, -4, -3 and -1 in the negative direction, 0 at origin, 4 and 8 in the positive direction.\"\r\n\r\nimport re\r\n\r\n# extract the numbers from the text\r\npoints = re.findall(r'-?\\d+', text)\r\n\r\n# convert the numbers to integers\r\npoints = [int(p) for p in points]\r\n\r\n# sort the points in ascending order\r\nsorted_points = sorted(points)\r\n\r\n# calculate the distance between the furthest points\r\ndistance = abs(sorted_points[-1] - sorted_points[0])\r\n\r\nprint(distance)\r\n\r\n\r\nprint(\"...................(\"\"Exercise Level 2 (Q.1)\"\")............................\")\r\n\r\n''' Write a pattern which identifies if a string is a valid python variable\r\n\r\nis_valid_variable('first_name') # True\r\nis_valid_variable('first-name') # False\r\nis_valid_variable('1first_name') # False\r\nis_valid_variable('firstname') # True '''\r\n\r\ndef is_valid_variable(variable):\r\n pattern = r'^[a-zA-Z_]\\w*$'\r\n return re.match(pattern, variable) is not None\r\n\r\nprint(is_valid_variable('first_name')) # True\r\nprint(is_valid_variable('first-name')) # False\r\nprint(is_valid_variable('1first_name')) # False\r\nprint(is_valid_variable('firstname')) # True\r\n\r\n\r\nprint(\"...................(\"\"Exercise Level 3 (Q.1)\"\")............................\")\r\n\r\n''' Clean the following text. After cleaning, count three most frequent words in the string.\r\nsentence = %I $am@% a %tea@cher%, &and& I lo%#ve %tea@ching%;. There $is nothing;\r\n&as& mo@re rewarding as educa@ting &and& @emp%o@wering peo@ple. ;I found tea@ching \r\nm%o@re interesting tha@n any other %jo@bs. %Do@es thi%s mo@tivate yo@u to be a \r\ntea@cher!?\r\n\r\nprint(clean_text(sentence));\r\nI am a teacher and I love teaching There is nothing as more rewarding as educating\r\nand empowering people I found teaching more interesting than any other jobs Does \r\nthis motivate you to be a teacher print(most_frequent_words(cleaned_text)) \r\n# [(3, 'I'), (2, 'teaching'), (2, 'teacher')] '''\r\n\r\nfrom collections import Counter\r\n\r\ndef clean_text(text):\r\n pattern = r'[^a-zA-Z0-9\\s]'\r\n text = re.sub(pattern, '', text)\r\n text = text.lower()\r\n return text\r\n\r\ndef most_frequent_words(text):\r\n words = text.split()\r\n word_counts = Counter(words)\r\n return word_counts.most_common(3)\r\n\r\nsentence = '''%I $am@% a %tea@cher%, &and& I lo%#ve %tea@ching%;. There $is \r\nnothing; &as& mo@re rewarding as educa@ting &and& @emp%o@wering peo@ple. ;\r\nI found tea@ching m%o@re interesting tha@n any other %jo@bs. %Do@es thi%s \r\nmo@tivate yo@u to be a tea@cher!?'''\r\n\r\ncleaned_text = clean_text(sentence)\r\nprint(cleaned_text)\r\n# Output: i am a teacher and i love teaching there is nothing as more rewarding as educating and empowering people i found teaching more interesting than any other jobs does this motivate you to be a teacher\r\n\r\nprint(most_frequent_words(cleaned_text))\r\n# Output: [(3, 'i'), (2, 'teaching'), (2, 'teacher')]\r\n","repo_name":"abufaiz/30DaysOfPython_ArewaDataScience","sub_path":"Exercise_day_018.py","file_name":"Exercise_day_018.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9142318983","text":"import os\nimport sys\n\n\nclass Connector:\n\n def __init__(self):\n self.parameters = dict()\n\n def __str__(self):\n return \"AzureConnector\"\n\n def load(self, *args, **kwargs):\n if isinstance(args, dict):\n # print(args)\n self.parameters = args\n else:\n # print(args[0])\n self.parameters = args[0]\n\n def connect(self, azure_command: str):\n c = \"\"\"az cloud set --name %%AZURE_ENVIRONMENT%%;\\\naz login --service-principal -u %%AZURE_PRINCIPLE%% -p %%AZURE_SECRET%% --tenant %%AZURE_TENANT%%;\\\naz account set --subscription %%AZURE_SUBSCRIPTION%%;%%AZURE_COMMAND%%\"\"\"\n c = c.replace(\"%%AZURE_ENVIRONMENT%%\", apply_vars(self.parameters[\"cloud\"]))\\\n .replace(\"%%AZURE_SUBSCRIPTION%%\", apply_vars(self.parameters[\"subscription\"]))\\\n .replace(\"%%AZURE_PRINCIPLE%%\",apply_vars(self.parameters[\"principle\"]))\\\n .replace(\"%%AZURE_SECRET%%\", apply_vars(self.parameters[\"secret\"]))\\\n .replace(\"%%AZURE_TENANT%%\", apply_vars(self.parameters[\"tenant\"]))\\\n .replace(\"%%AZURE_COMMAND%%\", apply_vars(azure_command))\n\n if os.environ.get(\"ANSIBOOT_DRY_RUN\") is not None:\n print(\"AZURE STRING:\" + c)\n else:\n os.system(c)\n\n return c\n\n def play(self, play, variables=None):\n command = \"\"\n content = \"\"\n tag_name = \"\"\n tag_value = \"\"\n\n processor = \"RunPowerShellScript\"\n if \"processor\" in play:\n processor = play[\"processor\"]\n\n if \"script\" in play:\n # print(\"\\t\\tSCRIPT:\" + str(play))\n content = \"@\" + play[\"script\"]\n elif \"command\" in play:\n # print(\"\\t\\tCOMMAND:\" + str(play))\n content = \"'\" + play[\"command\"] + \"'\"\n\n command = \"az vm run-command invoke --command-id %%COMMAND_PROCESSOR%%\" + \\\n \" --name %%INGEST_NAME%% -g %%RESOURCE_GROUP%% --scripts %%COMMAND_TEXT%%\"\n\n if \"tag\" in play:\n tag_name = play[\"tag\"]\n tag_value = play[\"value\"]\n command = \"az vm update --resource-group $RESOURCE_GROUP --name $INGEST_NAME \" + \\\n \"--set tags.%%AZURE_TAG_NAME%%=%%AZURE_TAG_VALUE%%\"\n\n command_text = command.replace(\"%%COMMAND_PROCESSOR%%\", processor).\\\n replace(\"%%COMMAND_TEXT%%\", content). \\\n replace(\"%%AZURE_TAG_NAME%%\", tag_name). \\\n replace(\"%%AZURE_TAG_VALUE%%\", tag_value). \\\n replace(\"%%INGEST_NAME%%\", play[\"resource_name\"]). \\\n replace(\"%%RESOURCE_GROUP%%\", play[\"resource_group\"])\n\n c = self.connect(command_text)\n sys.stdout.flush()\n\n def get(self, name):\n if name in self.parameters:\n return self.parameters[name]\n else:\n return None\n\n\ndef apply_vars(var_value):\n value = var_value\n if value is None:\n return var_value\n for k, v in os.environ.items():\n rk = \"$\" + k\n if rk in value:\n value = str(value).replace(rk, v)\n #print(kv + \" = \" + str(value))\n return value","repo_name":"miketbush/ansiboot","sub_path":"azure_connector.py","file_name":"azure_connector.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38753513551","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"utils\"\n\nfrom typing import (TypeVar, Iterable, FrozenSet, Optional, Callable,\n Sequence, Dict, Any)\nimport inspect\nfrom copy import deepcopy, copy\nfrom functools import wraps\nfrom enum import Enum\n\nfrom .inspection import getlocals\n\nNoArgs = type('NoArgs', (), {})\nDefaultValue = NoArgs\n\ndef toenum(tpe, val):\n \"returns an enum object\"\n if not isinstance(tpe, type):\n tpe = type(tpe)\n if not issubclass(tpe, Enum):\n return val\n if isinstance(val, (int, str)):\n elem = next((i for i in tpe if i.value == val), None)\n if elem is None and isinstance(val, str):\n elem = getattr(tpe, val, None)\n if elem is None:\n elem = tpe(val)\n return elem\n if isinstance(val, tpe):\n return val\n if val is not None:\n raise TypeError('\"level\" attribute has incorrect type')\n return None\n\nclass ChangeFields:\n \"Context within which given fields are momentarily changed\"\n def __init__(self, obj, __items__ = None, **items):\n self.obj = obj\n if __items__ is not None:\n items.update(__items__)\n self.items = items\n self.olds: Dict[str, Any] = dict()\n\n def __enter__(self):\n for name, kwdef in self.items.items():\n self.olds[name] = old = getattr(self.obj, name)\n\n kwdef = toenum(old, kwdef)\n\n setattr(self.obj, name, kwdef)\n return self.olds\n\n def __exit__(self, *_):\n for i, j in self.olds.items():\n setattr(self.obj, i , j)\n\ndef changefields(obj, __items__ = None, **items):\n \"Context within which given fields are momentarily changed\"\n return ChangeFields(obj, __items__, **items)\n\ndef kwargsdefaults(*items, asinit = True):\n \"\"\"\n Keyword arguments are used for changing an object's fields before running\n the method.\n\n Using *asinit* means the same attributes as for __init__ will be allowed.\n This is only valid if initdefaults decorates the __init__\n \"\"\"\n def _fields(vals):\n assert len(vals) and all(isinstance(i, str) for i in vals)\n accepted = frozenset(vals)\n return lambda _: accepted\n\n if len(items) == 1 and isinstance(items[0], Iterable) and not isinstance(items[0], str):\n items = items[0] # type: ignore\n\n if len(items):\n items = tuple(i for i in items if i[0].upper() != i[0])\n\n call = len(items) == 1 and callable(items[0])\n if call:\n if not asinit:\n fields = fieldnames\n else:\n pot = getattr(getlocals(1).get('__init__', None), 'KEYS', None)\n fields = fieldnames if pot is None else _fields(pot)\n else:\n fields = fieldnames if not len(items) else _fields(items)\n\n def _wrapper(fcn):\n @wraps(fcn)\n def _wrap(self, *args, **kwargs):\n if len(kwargs):\n tochange = {i: kwargs.pop(i) for i in fields(self) & frozenset(kwargs)}\n if len(tochange):\n with changefields(self, tochange):\n return fcn(self, *args, **kwargs)\n\n return fcn(self, *args, **kwargs)\n return _wrap\n\n return _wrapper(items[0]) if call else _wrapper\n\ndef setdefault(self, name, kwargs, roots = ('',), # pylint: disable=too-many-arguments\n cpy = None, deepcpy = None, fieldname = None):\n \"\"\"\n Uses the class attribute to initialize the object's fields if no keyword\n arguments were provided.\n \"\"\"\n if fieldname is None:\n fieldname = name\n\n clsdef = getattr(type(self), fieldname)\n if isinstance(clsdef, property):\n clsdef = getattr(self, fieldname)\n\n for root in roots:\n kwdef = kwargs.get(root+name, NoArgs)\n\n if kwdef is NoArgs:\n continue\n\n setattr(self, fieldname, toenum(clsdef, kwdef))\n break\n else:\n if deepcpy is not None:\n setattr(self, fieldname, deepcopy(getattr(cpy, fieldname)))\n elif isinstance(cpy, dict):\n setattr(self, fieldname, cpy[fieldname])\n elif cpy is not None:\n setattr(self, fieldname, getattr(cpy, fieldname))\n else:\n setattr(self, fieldname, deepcopy(clsdef))\n\nclass _Updater:\n __slots__ = ('roots', 'mandatory', 'update', 'call', 'attrs', 'ignore')\n def __init__(self,\n attrs:Sequence[str],\n roots:Sequence[str],\n mandatory: bool,\n kwa:Dict[str,str]) -> None:\n self.roots = roots\n self.mandatory = mandatory\n kwa = {i: (j.lower() if isinstance(j, str) else j)\n for i, j in kwa.items()}\n self.update = tuple(i for i in attrs if kwa.get(i, '') == 'update') # type: ignore\n self.call = tuple((i, j) for i, j in kwa.items() if callable(j)) # type: ignore\n self.ignore = tuple(i for i in attrs if kwa.get(i, '') == 'ignore')\n self.attrs = tuple((i, i) for i in attrs if kwa.get(i, '') != 'ignore')\n self.attrs += tuple((i, j+i if j == '_' else j) for i, j in kwa.items()\n if isinstance(j, str) and j not in ('update', 'ignore'))\n assert len(set(i for i, _ in self.call) & set(self.attrs) ) == 0\n\n def __call__(self, obj, kwargs, cpy = None, deepcpy = None):\n if len(cpy) > 1:\n raise TypeError(\"__init__ takes at most a single positional\"\n +\" argument as a copy constructor\")\n cpy = cpy[0] if len(cpy) == 1 else None\n\n for name, field in self.attrs:\n if self.mandatory and cpy is None and deepcpy is None and name not in kwargs:\n raise KeyError(\"Missing keyword '%s' in __init__\" % name)\n setdefault(obj, name, kwargs, self.roots, cpy, deepcpy, field)\n\n for name in self.update:\n update(getattr(obj, name), **kwargs)\n\n for name, fcn in self.call:\n if name in kwargs:\n fcn(obj, kwargs[name])\n\ndef initdefaults(*attrs, roots = ('',), mandatory = False, **kwa):\n \"\"\"\n Creates an *__init__* such that instance fields and their default value are\n defined using class fields. The default values are deepcopied into each instance.\n\n Exemple:\n\n >>> class Cls:\n >>> attr = [] # the default list will be deepcopied in each instance\n >>> ignored = 0 # field will not be initialized\n >>> _protected = 1 # field will be initialized with a specific keyword\n >>> @initdefaults(frozenset(locals()),\n >>> ignored = 'ignore',\n >>> call = lambda self, value: setattr(self, 'ignored', 2*value),\n >>> protect = \"_protected\")\n >>> def __init__(self, **kwa):\n >>> pass\n\n >>> assert Cls().ignored == 0\n >>> assert Cls(call = 1).ignored == 2\n >>> assert Cls()._protected == 1\n >>> assert Cls(protect = 2)._protected == 2\n >>> assert Cls().attr == []\n >>> assert Cls().attr is not Cls.attr\n >>> lst = [2]\n >>> assert Cls(attr = lst).attr is lst # no deep copy of passed values\n\n By decorating the *__init__* as follows, it's possible to affect passed values\n\n >>> class Trans:\n >>> attr1 = 1\n >>> attr2 = 2\n >>> @initdefaults(frozenset(locals()))\n >>> def __init__(self, *kwa:dict, **_) -> None: # _ is needed by pylint\n >>> kwa[0].pop('attr1', None)\n >>> if 'attr2' in kwa[0]:\n >>> kwa[0]['attr2'] *= 2\n\n >>> assert Trans(attr1 = 100).attr1 == 1\n >>> assert Trans(attr2 = 100).attr2 == 200\n\n\n One can update grandchild fields using kwargs:\n\n >>> class Agg:\n >>> elem = Cls()\n >>> @initdefaults(frozenset(locals()), elem = 'update')\n >>> def __init__(self, **kwa):\n >>> pass\n\n >>> assert Agg(attr = [2]).elem.attr == [2]\n\n \"\"\"\n\n fcn = None\n if len(attrs) == 1 and isinstance(attrs[0], Iterable) and not isinstance(attrs[0], str):\n attrs = attrs[0] # type: ignore\n\n delayed = next((i for i in attrs if i == '__delayed_init__'), None) is not None\n attrs = tuple(i for i in attrs if i != '__delayed_init__')\n\n if len(attrs) == 1 and callable(attrs[0]):\n fcn = attrs[0]\n attrs = ()\n\n if len(attrs) == 0:\n attrs = tuple(i for i in getlocals(1).keys())\n\n assert len(attrs) and all(isinstance(i, str) for i in attrs)\n attrs = tuple(i for i in attrs if i[0].upper() != i[0])\n\n def _wrapper(fcn):\n sig = inspect.signature(fcn).parameters\n val = tuple(sig.values())[1]\n initafter = kwa.pop('initafter', False)\n updater = _Updater(attrs, roots, mandatory, kwa)\n if val.kind == val.VAR_KEYWORD and initafter:\n def __init__(self, *args, **kwargs):\n updater(self, kwargs, cpy = args)\n fcn (self, **kwargs)\n if delayed:\n self.__delayed_init__(kwargs)\n\n setattr(__init__, 'IS_GET_STATE', True)\n\n elif val.kind == val.VAR_KEYWORD and not initafter:\n def __init__(self, *args, **kwargs):\n fcn (self, **kwargs)\n updater(self, kwargs, cpy = args)\n if delayed:\n self.__delayed_init__(kwargs)\n setattr(__init__, 'IS_GET_STATE', True)\n\n elif initafter:\n def __init__(self, *args, **kwargs):\n updater(self, kwargs)\n fcn (self, *args, **kwargs)\n if delayed:\n self.__delayed_init__(*args, **kwargs)\n\n elif ((val.annotation, val.kind) == (dict, val.VAR_POSITIONAL)\n and (len(sig) == 2 or (len(sig) == 3 and tuple(sig)[2] == '_'))):\n def __init__(self, *args, **kwargs):\n fcn (self, kwargs)\n updater(self, kwargs, cpy = args)\n if delayed:\n self.__delayed_init__(kwargs)\n setattr(__init__, 'IS_GET_STATE', True)\n\n else:\n def __init__(self, *args, **kwargs):\n fcn (self, *args, **kwargs)\n updater(self, kwargs)\n if delayed:\n self.__delayed_init__(*args, **kwargs)\n\n setattr(__init__, 'UPDATER', updater)\n setattr(__init__, 'KEYS', attrs)\n return wraps(fcn)(__init__)\n\n return _wrapper if fcn is None else _wrapper(fcn)\n\ndef addattributes(cls, *_, protected: Dict[str, Any] = None, **kwa):\n \"Adds attributes to a class with its `__init__` previously decorated by `@initdefaults`\"\n if isinstance(protected, dict):\n for i, j in protected.items():\n setattr(cls, '_'+i, j)\n cls.__init__.UPDATER.attrs += tuple((i, '_'+i) for i in protected)\n\n for i, j in kwa.items():\n setattr(cls, i, j)\n cls.__init__.UPDATER.attrs += tuple((i, i) for i in kwa)\n\nType = TypeVar('Type')\ndef fieldnames(obj) -> FrozenSet[str]:\n \"Returns attribute and property names of the object\"\n dico = frozenset(name\n for name in getattr(obj, '__dict__', ())\n if 'a' <= name[0] <= 'z')\n desc = frozenset(name\n for name, tpe in obj.__class__.__dict__.items()\n if ('a' <= name[0] <= 'z'\n and callable(getattr(tpe, '__set__', None))\n and not getattr(tpe, 'fset', '') is None))\n return dico | desc\n\ndef _update(cpy: Optional[Callable[[Type], Type]], obj:Type, always:bool, **attrs) -> Type:\n \"Sets field to provided values\"\n fields = fieldnames(obj) & frozenset(attrs)\n if cpy is not None and (always or len(fields)):\n obj = cpy(obj)\n\n if len(fields):\n for name in fields:\n val = attrs[name]\n if val is not NoArgs:\n setattr(obj, name, val)\n return obj\n\ndef update(obj:Type, **attrs) -> Type:\n \"Sets field to provided values\"\n return _update(None, obj, False, **attrs)\n\ndef updatecopy(obj:Type, _always_ = False, **attrs) -> Type:\n \"Sets field to provided values on a copied object\"\n return _update(copy, obj, _always_, **attrs)\n\ndef updatedeepcopy(obj:Type, _always_ = False, **attrs) -> Type:\n \"Sets field to provided values on a deepcopied object\"\n return _update(deepcopy, obj, _always_, **attrs)\n","repo_name":"depixusgenome/libanalysis","sub_path":"utils/attrdefaults.py","file_name":"attrdefaults.py","file_ext":"py","file_size_in_byte":12717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15570321457","text":"from itertools import chain\nfrom collections import defaultdict\nfrom json import dumps\nfrom metapool import KLSampleSheet\nfrom os import makedirs, walk, listdir\nfrom os.path import join, exists, split, basename, dirname\nfrom sequence_processing_pipeline.ConvertJob import ConvertJob\nfrom sequence_processing_pipeline.FastQCJob import FastQCJob\nfrom sequence_processing_pipeline.GenPrepFileJob import GenPrepFileJob\nfrom sequence_processing_pipeline.PipelineError import PipelineError\nfrom sequence_processing_pipeline.Pipeline import Pipeline\nfrom sequence_processing_pipeline.QCJob import QCJob\nfrom subprocess import Popen, PIPE\nimport pandas as pd\nfrom glob import glob\nfrom shutil import copyfile\n\n\nclass FailedSamplesRecord:\n def __init__(self, output_dir, samples):\n # because we want to write out the list of samples that failed after\n # each Job is run, and we want to organize that output by project, we\n # need to keep a running state of failed samples, and reuse the method\n # to reorganize the running-results and write them out to disk.\n\n self.output_path = join(output_dir, 'failed_samples.html')\n\n # create an initial dictionary with sample-ids as keys and their\n # associated project-name and status as values. Afterwards, we'll\n # filter out the sample-ids w/no status (meaning they were\n # successfully processed) before writing the failed entries out to\n # file.\n self.sample_state = {x.Sample_ID: [x.Sample_Project, None] for x in\n samples}\n\n def write(self, failed_ids, job_name):\n for failed_id in failed_ids:\n # as a rule, if a failed_id were to appear in more than one\n # audit(), preserve the earliest failure, rather than the\n # latest one.\n if self.sample_state[failed_id][1] is None:\n self.sample_state[failed_id][1] = job_name\n\n # filter out the sample-ids w/out a failure status\n filtered_fails = {x: self.sample_state[x] for x in self.sample_state if\n self.sample_state[x][1] is not None}\n\n data = []\n for sample_id in filtered_fails:\n project_name = filtered_fails[sample_id][0]\n failed_at = filtered_fails[sample_id][1]\n data.append({'Project': project_name, 'Sample ID': sample_id,\n 'Failed at': failed_at})\n df = pd.DataFrame(data)\n\n with open(self.output_path, 'w') as f:\n f.write(df.to_html(border=2, index=False, justify=\"left\",\n render_links=True, escape=False))\n\n\nclass Step:\n '''\n The base Step class wraps the creation and running of the Job classes\n that are common to both Amplicon and Metagenomic Pipeline. Functionality\n specific to one pipeline or the other is handled in the appropriate\n subclass and makes calls to this base class as needed. In this way the\n codebase is kept DRY.\n '''\n\n AMPLICON_TYPE = 'Amplicon'\n METAGENOMIC_TYPE = 'Metagenomic'\n METATRANSCRIPTOMIC_TYPE = 'Metatranscriptomic'\n META_TYPES = {METAGENOMIC_TYPE, METATRANSCRIPTOMIC_TYPE}\n ALL_TYPES = META_TYPES.union(AMPLICON_TYPE)\n AMPLICON_SUB_TYPES = {'16S', '18S', 'ITS'}\n\n def __init__(self, pipeline, master_qiita_job_id,\n status_update_callback=None,\n lane_number=None):\n if pipeline is None:\n raise ValueError(\"A pipeline object is needed to initialize Step\")\n\n if master_qiita_job_id is None:\n raise ValueError(\"A Qiita job-id is needed to initialize Step\")\n\n self.pipeline = pipeline\n self.lane_number = lane_number\n self.master_qiita_job_id = master_qiita_job_id\n\n if status_update_callback is not None:\n self.update_callback = status_update_callback.update_job_status\n else:\n self.update_callback = None\n\n # for now, hardcode this at the legacy value, since we've never\n # changed it.\n self.job_pool_size = 30\n\n # initialize other member variables so that they're always present,\n # even when the step that populates them hasn't been run yet.\n self.project_names = None\n self.cmds = None\n self.cmds_log_path = None\n # set by child classes for use in parent class\n self.prep_file_paths = None\n # set by child classes for use in parent class\n self.has_replicates = None\n self.sifs = None\n self.tube_id_map = None\n self.samples_in_qiita = None\n self.output_path = None\n self.sample_state = None\n self.special_map = None\n self.touched_studies_prep_info = None\n self.run_prefixes = {}\n self.prep_copy_index = 0\n\n @classmethod\n def generate_pipeline(cls, pipeline_type, input_file_path, lane_number,\n config_fp,\n run_identifier, out_dir, job_id):\n if pipeline_type in Step.META_TYPES:\n cls.update_sample_sheet(input_file_path, lane_number)\n return Pipeline(config_fp, run_identifier, input_file_path, None,\n out_dir, job_id, pipeline_type)\n elif pipeline_type == Step.AMPLICON_TYPE:\n return Pipeline(config_fp, run_identifier, None, input_file_path,\n out_dir, job_id, pipeline_type)\n else:\n raise PipelineError(\n f\"'{pipeline_type}' is not a valid Pipeline type.\")\n\n @classmethod\n def update_sample_sheet(cls, sample_sheet_path, lane_number):\n # use KLSampleSheet functionality to add/overwrite lane number.\n sheet = KLSampleSheet(sample_sheet_path)\n for sample in sheet:\n sample['Lane'] = f'{lane_number}'\n\n with open(sample_sheet_path, 'w') as f:\n sheet.write(f)\n\n @classmethod\n def parse_prep_file(cls, prep_file_path, convert_to_dict=True):\n metadata = pd.read_csv(prep_file_path,\n dtype=str,\n delimiter='\\t',\n # forces Pandas to not make the first column\n # the index even when the values appear numeric.\n index_col=False)\n\n if metadata is None:\n raise ValueError(f\"{prep_file_path} does not exist.\")\n\n metadata.set_index('sample_name', inplace=True)\n\n if convert_to_dict:\n return metadata.to_dict('index')\n else:\n return metadata\n\n def generate_artifact_name(self, prep_file_path):\n a_name = f'{self.pipeline.run_id}_{self.lane_number}'\n repl_num = basename(dirname(prep_file_path))\n\n if self.has_replicates is True:\n # this is a replicate sheet file.\n # append a replication number to each name to\n # make it unique from other replicates.\n # return ('%s_r%s' % (a_name, result[1]), True)\n return ('%s_r%s' % (a_name, repl_num), True)\n else:\n # this is a normal pre-prep or sample-sheet.\n return (a_name, False)\n\n def generate_special_map(self, qclient):\n # this function should be able to be tested by passing in simulated =\n # results from qclient.\n\n # trimmed files are stored by qiita_id. Find the qiita_id\n # associated with each project and ensure a subdirectory exists\n # for when it comes time to move the trimmed files.\n\n special_map = []\n results = qclient.get(\"/qiita_db/artifacts/types/\")\n projects = self.pipeline.get_project_info()\n for project in projects:\n upload_path = join(results['uploads'], project['qiita_id'])\n makedirs(upload_path, exist_ok=True)\n special_map.append((project['project_name'], upload_path,\n project['qiita_id']))\n\n self.special_map = special_map\n\n def update_prep_templates(self, qclient):\n '''\n Update prep-template info in Qiita. Get dict of prep-ids by study-id.\n :param qclient:\n :return: A dict of lists of prep-ids, keyed by study-id.\n '''\n results = defaultdict(list)\n\n for study_id in self.prep_file_paths:\n for prep_fp in self.prep_file_paths[study_id]:\n metadata = Step.parse_prep_file(prep_fp)\n afact_name, is_repl = self.generate_artifact_name(prep_fp)\n data = {'prep_info': dumps(metadata),\n 'study': study_id,\n 'data_type': None,\n 'job-id': self.master_qiita_job_id,\n 'name': afact_name}\n if self.pipeline.pipeline_type in Step.META_TYPES:\n data['data_type'] = self.pipeline.pipeline_type\n elif self.pipeline.pipeline_type == Step.AMPLICON_TYPE:\n if 'target_gene' in metadata[list(metadata.keys())[0]]:\n tg = metadata[list(metadata.keys())[0]]['target_gene']\n for key in Step.AMPLICON_SUB_TYPES:\n if key in tg:\n data['data_type'] = key\n\n if data['data_type'] is None:\n raise ValueError(\"data_type could not be \"\n \"determined from target_gene \"\n \"column\")\n else:\n raise ValueError(\"target_gene must be specified for \"\n \"amplicon type\")\n else:\n raise ValueError(f\"'{self.pipeline.pipeline_type}' is not \"\n \" a valid pipeline type\")\n\n reply = qclient.post('/qiita_db/prep_template/', data=data)\n prep_id = reply['prep']\n results[study_id].append((prep_id, afact_name, is_repl))\n self.run_prefixes[prep_id] = [metadata[sample]['run_prefix']\n for sample in metadata]\n\n self.touched_studies_prep_info = results\n return results\n\n @classmethod\n def get_samples_in_qiita(cls, qclient, qiita_id):\n '''\n Obtain lists for sample-names and tube-ids registered in Qiita.\n :param qclient: QiitaClient object\n :param qiita_id: Qiita ID for the project in question.\n :return: a tuple of lists, one for sample-names, another for tube-ids.\n '''\n samples = qclient.get(f'/api/v1/study/{qiita_id}/samples')\n\n # remove Qiita ID as a prefix from the sample-names.\n samples = {x.replace(f'{qiita_id}.', '') for x in samples}\n\n # find out if tube-ids are registered in the study.\n categories = qclient.get(f'/api/v1/study/{qiita_id}'\n '/samples/info')['categories']\n\n if 'tube_id' in categories:\n tids = qclient.get(f'/api/v1/study/{qiita_id}/samples/'\n 'categories=tube_id')['samples']\n else:\n tids = None\n\n return (samples, tids)\n\n def _convert_bcl_to_fastq(self, config, input_file_path):\n convert_job = ConvertJob(self.pipeline.run_dir,\n self.pipeline.output_path,\n input_file_path,\n config['queue'],\n config['nodes'],\n config['nprocs'],\n config['wallclock_time_in_minutes'],\n config['per_process_memory_limit'],\n config['executable_path'],\n config['modules_to_load'],\n self.master_qiita_job_id)\n\n convert_job.run(callback=self.update_callback)\n\n return convert_job\n\n def _quality_control(self, config, input_file_path):\n qc_job = QCJob(join(self.pipeline.output_path, 'ConvertJob'),\n self.pipeline.output_path,\n input_file_path,\n config['minimap_databases'],\n config['kraken2_database'],\n config['queue'],\n config['nodes'],\n config['nprocs'],\n config['wallclock_time_in_minutes'],\n config['job_total_memory_limit'],\n config['fastp_executable_path'],\n config['minimap2_executable_path'],\n config['samtools_executable_path'],\n config['modules_to_load'],\n self.master_qiita_job_id,\n self.job_pool_size,\n config['job_max_array_length'])\n\n qc_job.run(callback=self.update_callback)\n\n return qc_job\n\n def _generate_reports(self):\n config = self.pipeline.configuration['fastqc']\n is_amplicon = self.pipeline.pipeline_type == Step.AMPLICON_TYPE\n fastqc_job = FastQCJob(self.pipeline.run_dir,\n self.pipeline.output_path,\n join(self.pipeline.output_path, 'ConvertJob'),\n join(self.pipeline.output_path, 'QCJob'),\n config['nprocs'],\n config['nthreads'],\n config['fastqc_executable_path'],\n config['modules_to_load'],\n self.master_qiita_job_id,\n config['queue'],\n config['nodes'],\n config['wallclock_time_in_minutes'],\n config['job_total_memory_limit'],\n self.job_pool_size,\n config['multiqc_config_file_path'],\n config['job_max_array_length'],\n is_amplicon)\n\n fastqc_job.run(callback=self.update_callback)\n\n return fastqc_job\n\n def _generate_prep_file(self, config, input_file_path, seqpro_path,\n project_names):\n is_amplicon = self.pipeline.pipeline_type == Step.AMPLICON_TYPE\n\n gpf_job = GenPrepFileJob(\n self.pipeline.run_dir,\n join(self.pipeline.output_path, 'ConvertJob'),\n join(self.pipeline.output_path, 'QCJob'),\n self.pipeline.output_path,\n input_file_path,\n seqpro_path,\n project_names,\n config['modules_to_load'],\n self.master_qiita_job_id,\n is_amplicon=is_amplicon)\n\n gpf_job.run(callback=self.update_callback)\n\n # concatenate the lists of paths across all study_ids into a single\n # list. Replace sample-names w/tube-ids in all relevant prep-files.\n preps = list(chain.from_iterable(gpf_job.prep_file_paths.values()))\n self._overwrite_prep_files(preps)\n\n return gpf_job\n\n def _helper_process_fastp_report_dirs(self):\n report_dirs = []\n\n for root, dirs, files in walk(self.pipeline.output_path):\n for dir_name in dirs:\n if dir_name == 'fastp_reports_dir':\n # generate the full path for this directory before\n # truncating everything up to the QCJob directory.\n full_path = join(root, dir_name).split('QCJob/')\n report_dirs.append(join('QCJob', full_path[1]))\n\n if report_dirs:\n report_dirs.sort()\n return 'tar zcvf reports-QCJob.tgz ' + ' '.join(report_dirs)\n else:\n # It is okay to return an empty list of commands if reports_dirs\n # is empty. Some pipelines do not generate fastp reports.\n return []\n\n def _helper_process_blanks(self):\n results = [x for x in listdir(self.pipeline.output_path) if\n x.endswith('_blanks.tsv')]\n\n results.sort()\n\n if len(results) > 0:\n return 'tar zcvf sample-files.tgz' + ' ' + ' '.join(results)\n\n def _helper_process_operations(self):\n RESULTS_DIR = 'final_results'\n TAR_CMD = 'tar zcvf'\n LOG_PREFIX = 'logs'\n REPORT_PREFIX = 'reports'\n PREP_PREFIX = 'prep-files'\n CONVERT_JOB = 'ConvertJob'\n QC_JOB = 'QCJob'\n FASTQC_JOB = 'FastQCJob'\n PREPFILE_JOB = 'GenPrepFileJob'\n TAR_EXT = 'tgz'\n\n op_meta = [(['ConvertJob/logs'], TAR_CMD,\n f'{LOG_PREFIX}-{CONVERT_JOB}.{TAR_EXT}', 'OUTPUT_FIRST'),\n\n (['ConvertJob/Reports', 'ConvertJob/logs'], TAR_CMD,\n f'{REPORT_PREFIX}-{CONVERT_JOB}.{TAR_EXT}',\n 'OUTPUT_FIRST'),\n\n (['QCJob/logs'], TAR_CMD,\n f'{LOG_PREFIX}-{QC_JOB}.{TAR_EXT}', 'OUTPUT_FIRST'),\n\n (['FastQCJob/logs'], TAR_CMD,\n f'{LOG_PREFIX}-{FASTQC_JOB}.{TAR_EXT}', 'OUTPUT_FIRST'),\n\n (['FastQCJob/fastqc'], TAR_CMD,\n f'{REPORT_PREFIX}-{FASTQC_JOB}.{TAR_EXT}', 'OUTPUT_FIRST'),\n\n (['GenPrepFileJob/logs'], TAR_CMD,\n f'{LOG_PREFIX}-{PREPFILE_JOB}.{TAR_EXT}', 'OUTPUT_FIRST'),\n\n (['GenPrepFileJob/PrepFiles'], TAR_CMD,\n f'{PREP_PREFIX}.{TAR_EXT}', 'OUTPUT_FIRST'),\n\n (['failed_samples.html', 'touched_studies.html'],\n 'mv', RESULTS_DIR, 'INPUTS_FIRST'),\n\n (['FastQCJob/multiqc'], 'mv', RESULTS_DIR, 'INPUTS_FIRST')]\n\n cmds = []\n\n for inputs, action, output, order in op_meta:\n confirmed_inputs = []\n for input in inputs:\n if exists(join(self.pipeline.output_path, input)):\n # it's expected that some inputs may not exist due to\n # different pipeline types. If one or more inputs do not\n # exist, do not include them in the command-line as they\n # may cause an error.\n confirmed_inputs.append(input)\n\n # do not add the command to the list unless at least one of\n # the inputs exists. It's okay for a command to go unprocessed.\n if confirmed_inputs:\n # convert to string form before using.\n confirmed_inputs = ' '.join(confirmed_inputs)\n if order == 'OUTPUT_FIRST':\n cmds.append(f'{action} {output} {confirmed_inputs}')\n elif order == 'INPUTS_FIRST':\n cmds.append(f'{action} {confirmed_inputs} {output}')\n else:\n raise ValueError(f\"'{order}' is not a defined order of \"\n \"operations\")\n\n return cmds\n\n def generate_commands(self):\n cmds = self._helper_process_operations()\n\n result = self._helper_process_fastp_report_dirs()\n\n if result:\n cmds.append(result)\n\n result = self._helper_process_blanks()\n\n if result:\n cmds.append(result)\n\n # if one or more tar-gzip files are found (which we expect there to\n # be), move them into the 'final_results' directory. However, if none\n # are present, don't raise an error.\n cmds.append('(find *.tgz -maxdepth 1 -type f | xargs mv -t '\n 'final_results) || true')\n\n # prepend each command with a change-directory to the correct\n # location.\n cmds = [f'cd {self.pipeline.output_path}; {cmd}' for cmd in cmds]\n\n self.cmds = cmds\n\n self.write_commands_to_output_path()\n\n def _get_fastq_files(self, out_dir, project):\n af = None\n sub_folders = ['amplicon', 'filtered_sequences', 'trimmed_sequences']\n for sub_folder in sub_folders:\n sf = f'{out_dir}/QCJob/{project}/{sub_folder}'\n if exists(sf):\n af = [f for f in glob(f'{sf}/*.fastq.gz')]\n break\n if af is None or not af:\n raise PipelineError(\"QCJob output not in expected location\")\n\n files = {'raw_barcodes': [], 'raw_forward_seqs': [],\n 'raw_reverse_seqs': []}\n\n for fastq_file in af:\n if '_I1_' in fastq_file:\n files['raw_barcodes'].append(fastq_file)\n elif '_R1_' in fastq_file:\n files['raw_forward_seqs'].append(fastq_file)\n elif '_R2_' in fastq_file:\n files['raw_reverse_seqs'].append(fastq_file)\n else:\n raise ValueError(f\"Unrecognized file: {fastq_file}\")\n\n files['raw_barcodes'].sort()\n files['raw_forward_seqs'].sort()\n files['raw_reverse_seqs'].sort()\n\n # Amplicon runs should contain raw_barcodes/I1 files.\n # Meta*omics files doesn't use them.\n if self.pipeline.pipeline_type != Step.AMPLICON_TYPE:\n del (files['raw_barcodes'])\n\n # confirm expected lists of reads are not empty.\n for f_type in files:\n if not files[f_type]:\n # if one or more of the expected list of reads is empty,\n # raise an Error.\n raise ValueError(f\"'{f_type}' is empty\")\n\n return files\n\n def _load_prep_into_qiita(self, qclient, prep_id, artifact_name,\n qiita_id, project, fastq_files, atype):\n surl = f'{qclient._server_url}/study/description/{qiita_id}'\n prep_url = (f'{qclient._server_url}/study/description/'\n f'{qiita_id}?prep_id={prep_id}')\n\n # ideally we would use the email of the user that started the SPP\n # run but at this point there is no easy way to retrieve it\n pdata = {'user_email': 'qiita.help@gmail.com',\n 'prep_id': prep_id,\n 'artifact_type': atype,\n 'command_artifact_name': artifact_name,\n 'add_default_workflow': True,\n 'files': dumps(fastq_files)}\n\n job_id = qclient.post('/qiita_db/artifact/', data=pdata)['job_id']\n\n return {'Project': project, 'Qiita Study ID': qiita_id,\n 'Qiita Prep ID': prep_id, 'Qiita URL': surl,\n 'Artifact Name': artifact_name,\n 'Prep URL': prep_url, 'Linking JobID': job_id}\n\n def _copy_files(self, files):\n # increment the prep_copy_index before generating a new set of copies.\n self.prep_copy_index += 1\n new_files = {}\n for key in files:\n new_files[key] = []\n for some_path in files[key]:\n path_name, file_name = split(some_path)\n path_name = join(path_name, f'copy{self.prep_copy_index}')\n makedirs(path_name, exist_ok=True)\n new_files[key].append(join(path_name, file_name))\n\n for key in files:\n for src, dst in zip(files[key], new_files[key]):\n copyfile(src, dst)\n return new_files\n\n def load_preps_into_qiita(self, qclient):\n atype = 'per_sample_FASTQ'\n if self.pipeline.pipeline_type == Step.AMPLICON_TYPE:\n atype = 'FASTQ'\n\n data = []\n for project, _, qiita_id in self.special_map:\n fastq_files = self._get_fastq_files(\n self.pipeline.output_path, project)\n\n for vals in self.touched_studies_prep_info[qiita_id]:\n prep_id, artifact_name, is_repl = vals\n if self.pipeline.pipeline_type == Step.AMPLICON_TYPE:\n if is_repl:\n # for Amplicon runs, each prep needs a copy of the\n # entire set of fastq files, because demuxing samples\n # happens downstream. If we don't make copies of the\n # files, Qiita will move the files when loading the\n # first prep and they won't be available for the\n # second prep and after.\n # Note that this will leave the original files present\n # in the working directory after processing instead of\n # being moved.\n working_set = self._copy_files(fastq_files)\n else:\n working_set = fastq_files\n else:\n # for meta*omics, generate the subset of files used by\n # this prep only.\n working_set = {}\n for key in fastq_files:\n working_set[key] = []\n for run_prefix in self.run_prefixes[prep_id]:\n working_set[key] += [fastq for fastq in\n fastq_files[key] if\n run_prefix in fastq]\n\n if is_repl:\n working_set = self._copy_files(working_set)\n\n data.append(self._load_prep_into_qiita(\n qclient, prep_id, artifact_name, qiita_id, project,\n working_set, atype))\n\n df = pd.DataFrame(data)\n opath = join(self.pipeline.output_path, 'touched_studies.html')\n with open(opath, 'w') as f:\n f.write(df.to_html(border=2, index=False, justify=\"left\",\n render_links=True, escape=False))\n\n return df\n\n def write_commands_to_output_path(self):\n self.cmds_log_path = join(self.pipeline.output_path, 'cmds.log')\n with open(self.cmds_log_path, 'w') as f:\n for cmd in self.cmds:\n f.write(f'{cmd}\\n')\n\n def execute_commands(self):\n # execute the list of commands in order\n for cmd in self.cmds:\n p = Popen(cmd, universal_newlines=True, shell=True,\n stdout=PIPE, stderr=PIPE)\n std_out, std_err = p.communicate()\n return_code = p.returncode\n\n if return_code != 0:\n # during testing, ignore processes that fail and continue\n # to test other commands.\n raise PipelineError(f\"'{cmd}' returned {return_code}\")\n\n def generate_sifs(self, qclient):\n from_qiita = {}\n\n for study_id in self.prep_file_paths:\n samples = list(qclient.get(f'/api/v1/study/{study_id}/samples'))\n from_qiita[study_id] = samples\n\n add_sif_info = []\n\n qid_pn_map = {proj['qiita_id']: proj['project_name'] for\n proj in self.pipeline.get_project_info()}\n\n # in case we really do need to query for samples again:\n # assume set of valid study_ids can be determined from prep_file_paths.\n for study_id in from_qiita:\n samples = from_qiita[study_id]\n # generate a list of (sample-name, project-name) pairs.\n project_name = qid_pn_map[study_id]\n samples = [(x, project_name) for x in samples]\n add_sif_info.append(pd.DataFrame(data=samples,\n columns=['sample_name',\n 'project_name']))\n\n # convert the list of dataframes into a single dataframe.\n add_sif_info = pd.concat(add_sif_info,\n ignore_index=True).drop_duplicates()\n\n # generate SIF files with add_sif_info as additional metadata input.\n # duplicate sample-names and non-blanks will be handled properly.\n self.sifs = self.pipeline.generate_sample_info_files(add_sif_info)\n\n return self.sifs\n\n def get_prep_file_paths(self):\n return self.prep_file_paths\n\n def _get_tube_ids_from_qiita(self, qclient):\n # Update get_project_info() so that it can return a list of\n # samples in projects['samples']. Include blanks in projects['blanks']\n # just in case there are duplicate qiita_ids\n qiita_ids = [proj['qiita_id'] for proj in\n self.pipeline.get_project_info(short_names=True)]\n\n tids_by_qiita_id = {}\n sample_names_by_qiita_id = {}\n\n for qiita_id in qiita_ids:\n # Qiita returns a set of sample-ids in qsam and a dictionary where\n # sample-names are used as keys and tube-ids are their values.\n qsam, tids = self.get_samples_in_qiita(qclient, qiita_id)\n\n if tids is None:\n sample_names_by_qiita_id[str(qiita_id)] = qsam\n else:\n # fix values in tids to be a string instead of a list of one.\n # also, remove the qiita_id prepending each sample-name.\n tids = {k.replace(f'{qiita_id}.', ''): tids[k][0] for k in\n tids}\n\n # the values Qiita returns for tids seems like it can include\n # empty strings if there is no tube-id associated with a\n # sample-name. For now assume it doesn't happen in production\n # and if prep-files have empty sample-names we'll know.\n tids_by_qiita_id[str(qiita_id)] = tids\n\n # use empty dict {} as an indication that get_tube_ids_from_qiita was\n # called but no tube-ids were found for any project.\n self.tube_id_map = tids_by_qiita_id\n # should samples_in_qiita be none if tube_id_map is not?\n self.samples_in_qiita = sample_names_by_qiita_id\n\n def _compare_samples_against_qiita(self, qclient):\n projects = self.pipeline.get_project_info(short_names=True)\n self._get_tube_ids_from_qiita(qclient)\n\n results = []\n for project in projects:\n project_name = project['project_name']\n qiita_id = str(project['qiita_id'])\n\n # get list of samples as presented by the sample-sheet or mapping\n # file and confirm that they are all registered in Qiita.\n samples = set(self.pipeline.get_sample_names(project_name))\n\n # do not include BLANKs. If they are unregistered, we will add\n # them downstream.\n samples = {smpl for smpl in samples\n if not smpl.startswith('BLANK')}\n\n # just get a list of the tube-ids themselves, not what they map\n # to.\n if qiita_id in self.tube_id_map:\n # if map is not empty\n tids = [self.tube_id_map[qiita_id][sample] for sample in\n self.tube_id_map[qiita_id]]\n\n not_in_qiita = samples - set(tids)\n\n if not_in_qiita:\n # strip any leading zeroes from the sample-ids. Note that\n # if a sample-id has more than one leading zero, all of\n # them will be removed.\n not_in_qiita = set([x.lstrip('0') for x in samples]) - \\\n set(tids)\n\n examples = tids[:5]\n used_tids = True\n else:\n # assume project is in samples_in_qiita\n not_in_qiita = samples - set(self.samples_in_qiita[qiita_id])\n examples = list(samples)[:5]\n used_tids = False\n\n # convert to strings before returning\n examples = [str(example) for example in examples]\n\n # return an entry for all projects, even when samples_not_in_qiita\n # is an empty list, as the information is still valuable.\n\n results.append({'samples_not_in_qiita': not_in_qiita,\n 'examples_in_qiita': examples,\n 'project_name': project_name,\n 'tids': used_tids})\n\n return results\n\n @classmethod\n def _replace_with_tube_ids(cls, prep_file_path, tube_id_map):\n # passing tube_id_map as a parameter allows for easier testing.\n df = pd.read_csv(prep_file_path, sep='\\t', dtype=str, index_col=False)\n # save copy of sample_name column as 'old_sample_name'\n df['old_sample_name'] = df['sample_name']\n for i in df.index:\n sample_name = df.at[i, \"sample_name\"]\n # blanks do not get their names swapped.\n if sample_name.startswith('BLANK'):\n continue\n\n # remove leading zeroes if they exist to match Qiita results.\n sample_name = sample_name.lstrip('0')\n\n reversed_map = {tube_id_map[k]: k for k in tube_id_map}\n if sample_name in reversed_map:\n df.at[i, \"sample_name\"] = reversed_map[sample_name]\n\n df.to_csv(prep_file_path, index=False, sep=\"\\t\")\n\n def _overwrite_prep_files(self, prep_file_paths):\n # replaces sample-names in prep-files with tube-ids according to\n # a dict with project-names as keys and another dict as a value.\n # this dict uses sample-names as keys and tube-ids as values.\n if self.tube_id_map is None:\n raise ValueError(\"get_tube_ids_from_qiita() was not called\")\n\n projects = self.pipeline.get_project_info(short_names=True)\n\n for project in projects:\n project_name = project['project_name']\n qiita_id = str(project['qiita_id'])\n\n if qiita_id not in self.tube_id_map:\n continue\n\n # prep files are named in the form:\n # 20220423_FS10001773_12_BRB11603-0615.Matrix_Tube_LBM_14332.1.tsv\n # search on project_name vs qiita_id since it's slightly more\n # unique.\n matching_files = [prep_file for prep_file in prep_file_paths if\n project_name in prep_file]\n\n if len(matching_files) == 0:\n continue\n\n if len(matching_files) > 1:\n raise ValueError(\"More than one match found for project \"\n f\"'{project_name}': {str(matching_files)}\")\n\n Step._replace_with_tube_ids(matching_files[0],\n self.tube_id_map[qiita_id])\n\n def update_blanks_in_qiita(self, qclient):\n for sif_path in self.sifs:\n # get study_id from sif_file_name ...something_14385_blanks.tsv\n study_id = sif_path.split('_')[-2]\n\n df = pd.read_csv(sif_path, delimiter='\\t', dtype=str)\n\n # Prepend study_id to make them compatible w/list from Qiita.\n df['sample_name'] = f'{study_id}.' + df['sample_name'].astype(str)\n\n # SIFs only contain BLANKs. Get the list of potentially new BLANKs.\n blank_ids = [i for i in df['sample_name'] if 'blank' in i.lower()]\n blanks = df[df['sample_name'].isin(blank_ids)]['sample_name']\n if len(blanks) == 0:\n # we have nothing to do so let's return early\n return\n\n # Get list of BLANKs already registered in Qiita.\n from_qiita = qclient.get(f'/api/v1/study/{study_id}/samples')\n from_qiita = [x for x in from_qiita if\n x.startswith(f'{study_id}.BLANK')]\n\n # Generate list of BLANKs that need to be ADDED to Qiita.\n new_blanks = (set(blanks) | set(from_qiita)) - set(from_qiita)\n\n if len(new_blanks):\n # Generate dummy entries for each new BLANK, if any.\n categories = qclient.get(f'/api/v1/study/{study_id}/samples/'\n 'info')['categories']\n\n # initialize payload w/required dummy categories\n data = {i: {c: 'control sample' for c in categories} for i in\n new_blanks}\n\n # populate payload w/additional columns and/or overwrite\n # existing columns w/metadata from SIF file.\n sif_data = df.set_index('sample_name').T.to_dict()\n for new_blank in new_blanks:\n for column in sif_data[new_blank]:\n data[new_blank][column] = sif_data[new_blank][column]\n\n # http_patch will raise Error if insert failed.\n qclient.http_patch(f'/api/v1/study/{study_id}/samples',\n data=dumps(data))\n\n def precheck(self, qclient):\n # compare sample-ids/tube-ids in sample-sheet/mapping file\n # against what's in Qiita. Results are a list of dictionaries, one\n # per project.\n results = self._compare_samples_against_qiita(qclient)\n\n # obtain a list of non-zero counts of samples missing in Qiita, one\n # for each project. The names of the projects are unimportant. We\n # want to abort early if any project in the sample-sheet/pre-prep file\n # contains samples that aren't registered in Qiita.\n tmp = [len(project['samples_not_in_qiita']) for project in results]\n missing_counts = [count for count in tmp if count != 0]\n\n if missing_counts:\n msgs = []\n for comparison in results:\n not_in_qiita = list(comparison['samples_not_in_qiita'])\n not_in_qiita_count = len(not_in_qiita)\n examples_in_qiita = ', '.join(comparison['examples_in_qiita'])\n p_name = comparison['project_name']\n uses_tids = comparison['tids']\n\n msgs.append(\n f\"
Project '{p_name}' has {not_in_qiita_count} \"\n f\"samples not registered in Qiita: {not_in_qiita[:5]}\")\n\n msgs.append(f\"Some registered samples in Project '{p_name}'\"\n f\" include: {examples_in_qiita}\")\n\n if uses_tids:\n msgs.append(f\"Project '{p_name}' is using tube-ids. You \"\n \"may be using sample names in your file.\")\n\n if msgs:\n raise PipelineError('\\n'.join(msgs))\n\n def execute_pipeline(self, qclient, increment_status, update=True):\n '''\n Executes steps of pipeline in proper sequence.\n :param qclient: Qiita client library or equivalent.\n :param increment_status: callback function to increment status.\n :param update: Set False to prevent updates to Qiita.\n :return: None\n '''\n self.generate_special_map(qclient)\n\n increment_status()\n self.convert_bcl_to_fastq()\n\n increment_status()\n self.quality_control()\n\n increment_status()\n self.generate_reports()\n\n increment_status()\n self.generate_prep_file()\n\n increment_status()\n self.sifs = self.generate_sifs(qclient)\n\n increment_status() # increment status regardless of update\n if update:\n self.update_blanks_in_qiita(qclient)\n\n increment_status() # status encompasses multiple operations\n if update:\n self.update_prep_templates(qclient)\n\n # before we load preps into Qiita we need to copy the fastq\n # files n times for n preps and correct the file-paths each\n # prep is pointing to.\n self.load_preps_into_qiita(qclient)\n\n increment_status()\n self.generate_commands()\n\n increment_status()\n if update:\n self.execute_commands()\n","repo_name":"qiita-spots/qp-knight-lab-processing","sub_path":"qp_klp/Step.py","file_name":"Step.py","file_ext":"py","file_size_in_byte":39481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31827586696","text":"\"\"\"\n\n102. Binary Tree Level Order Traversal\nMedium\n\nhttps://leetcode.com/problems/binary-tree-level-order-traversal/\n\nGiven the root of a binary tree, return the level order traversal of its nodes' values. (i.e., from left to right, level by level).\n\n \n\nExample 1:\n\n\nInput: root = [3,9,20,null,null,15,7]\nOutput: [[3],[9,20],[15,7]]\nExample 2:\n\nInput: root = [1]\nOutput: [[1]]\nExample 3:\n\nInput: root = []\nOutput: []\n \n\nConstraints:\n\nThe number of nodes in the tree is in the range [0, 2000].\n-1000 <= Node.val <= 1000\nAccepted\n1,159,513\nSubmissions\n1,918,065\n\n\"\"\"\n\n\n# Definition for a binary tree node.\nfrom collections import deque\nfrom typing import List, Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n res = []\n\n queue = deque([root])\n\n while queue:\n\n length = len(queue)\n level = []\n while length:\n popedNode = queue.popleft()\n if popedNode.left:\n queue.append(popedNode.left)\n if popedNode.right:\n queue.append(popedNode.right)\n level.append(popedNode.val)\n length -= 1\n res.append(level[::])\n return res\n","repo_name":"UdayKiranPadhy/DS-And-Algo","sub_path":"Data Structures/Graphs/46-Binary Tree Level Order Traversal.py","file_name":"46-Binary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"43570832742","text":"\nimport logging\nimport logging.handlers\n\n\nlogging.basicConfig(level=logging.DEBUG,#控制台打印的日志级别\n filename='log.log',\n filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format=\n '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n #日志格式\n )\n# logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n# level=logging.DEBUG)\ndef log(text):\n logging.info(text)","repo_name":"ericyhx/sgqyz","sub_path":"game/logutils/mylog.py","file_name":"mylog.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30296828742","text":"\n#%%\n\npoints = [(2, -2), (5, 6), (-4, -4), (-7, 1), (8, 14)]\n\nsum_mean_abs_error = 0\nsum_mean_squared_error = 0\n\nfor point in points:\n x = point[0]\n y = point[1]\n y_hat = 1.2 * x + 2\n sum_mean_abs_error = sum_mean_abs_error + abs(y - y_hat)\n sum_mean_squared_error = sum_mean_squared_error + pow(y - y_hat, 2)\n\n\nmean_abs_error = sum_mean_abs_error / len(points)\nmean_squared_error = sum_mean_squared_error / (len(points) * 2)\n\nprint ('mean abs error:' + str(mean_abs_error))\nprint ('mean squared error:' + str(mean_squared_error))\n\n#%%\nimport numpy as np\n\nlearn_rate = 0.01\nX = [2, 5, -4, -7, 8]\ny = [-2, 6, -4, 1, 14]\nW = [1.2]\nb = 2\n\ny_hat = np.multiply(W, X)\ny_hat += b\n\nerror = y - y_hat\n\nW_new = W + learn_rate * np.matmul(error, X)\nb_new = b + learn_rate * error.sum() \n\nprint(W_new)\nprint(b_new)","repo_name":"wegesdal/udacity-ml","sub_path":"Supervised_Learning/mean_squared_absolute_error.py","file_name":"mean_squared_absolute_error.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23587156361","text":"import sys, os, re, collections, heapq, math\n\ndef print_result (case_num, result):\n print('Case #{}: {}'.format(case_num + 1, result))\n\n\ndef phase1 (activities):\n old = activities[-1][2]\n changes = 0\n buf = 0\n old_end = 0\n reserves = {'C':[], 'J':[]}\n self_time = {'C':0, 'J':0}\n for i,act in enumerate(activities):\n dur = act[1]-act[0]\n dur_between = act[0] - old_end\n if old == act[2]:\n reserves[act[2]].append(dur_between)\n self_time[act[2]] += dur_between\n else:\n changes += 1\n buf += dur_between\n self_time[act[2]] += dur\n old_end = act[1]\n old = act[2]\n\n last_fellow = activities[-1][2]\n first_fellow = activities[0][2]\n if first_fellow == last_fellow:\n reserves[first_fellow][0] += 60*24 - activities[-1][1]\n self_time[first_fellow] += 60*24 - activities[-1][1]\n else:\n buf += 60*24 - activities[-1][1]\n return changes, buf, reserves, self_time\n\ndef phase2 (L_self_time, H_self_time, H_reserves, changes):\n assert L_self_time < H_self_time\n H_reserves.sort(reverse=True)\n for dur in H_reserves:\n assert dur > 0\n assert L_self_time + H_self_time == 60*24, (L_self_time, H_self_time)\n L_self_time += dur\n H_self_time -= dur\n changes += 2\n if L_self_time >= H_self_time:\n return changes\n assert False\n\ndef solve (activities):\n activities.sort(key=lambda a:a[0])\n changes, buf, reserves, self_time = phase1 (activities)\n if abs(self_time['C'] - self_time['J']) <= buf:\n return changes\n if self_time['J'] < self_time['C']:\n L,H = 'J', 'C'\n else:\n L,H = 'C', 'J'\n return phase2(self_time[L] + buf, self_time[H], reserves[H], changes)\n\ndef main():\n for case_num in range(int(input())):\n ac, aj = map(int,input().split())\n activities = []\n for _ in range(ac):\n s,e = map(int,input().split())\n activities.append((s,e,'C'))\n for _ in range(aj):\n s,e = map(int,input().split())\n activities.append((s,e,'J'))\n print_result(case_num,solve(activities))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/106.py","file_name":"106.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42522181967","text":"import subprocess\nfrom typing import Optional, Sequence, Union\n\nfrom build.makeproject import MakeProject\nfrom .toolchain import AnyToolchain\n\nclass OpenSSLProject(MakeProject):\n def __init__(self, url: Union[str, Sequence[str]], md5: str, installed: str,\n **kwargs):\n MakeProject.__init__(self, url, md5, installed, install_target='install_dev', **kwargs)\n\n def get_make_args(self, toolchain: AnyToolchain) -> list[str]:\n return MakeProject.get_make_args(self, toolchain) + [\n 'CC=' + toolchain.cc,\n 'CFLAGS=' + toolchain.cflags,\n 'CPPFLAGS=' + toolchain.cppflags,\n 'AR=' + toolchain.ar,\n 'RANLIB=' + toolchain.ranlib,\n 'build_libs',\n ]\n\n def get_make_install_args(self, toolchain: AnyToolchain) -> list[str]:\n # OpenSSL's Makefile runs \"ranlib\" during installation\n return MakeProject.get_make_install_args(self, toolchain) + [\n 'RANLIB=' + toolchain.ranlib,\n ]\n\n def _build(self, toolchain: AnyToolchain, target_toolchain: Optional[AnyToolchain]=None) -> None:\n src = self.unpack(toolchain, out_of_tree=False)\n\n # OpenSSL has a weird target architecture scheme with lots of\n # hard-coded architectures; this table translates between our\n # host triplet and the OpenSSL target\n openssl_archs = {\n # not using \"android-*\" because those OpenSSL targets want\n # to know where the SDK is, but our own build scripts\n # prepared everything already to look like a regular Linux\n # build\n 'armv7a-linux-androideabi': 'linux-generic32',\n 'aarch64-linux-android': 'linux-aarch64',\n 'i686-linux-android': 'linux-x86-clang',\n 'x86_64-linux-android': 'linux-x86_64-clang',\n\n # generic Linux (not used by XCSoar)\n 'arm-linux-gnueabihf': 'linux-generic32',\n\n # Kobo\n 'armv7a-kobo-linux-musleabihf': 'linux-generic32',\n\n # Windows\n 'i686-w64-mingw32': 'mingw',\n 'x86_64-w64-mingw32': 'mingw64',\n\n # Apple\n 'x86_64-apple-darwin': 'darwin64-x86_64-cc',\n 'aarch64-apple-darwin': 'darwin64-arm64-cc',\n }\n\n configure = [\n './Configure',\n 'no-shared',\n 'no-module',\n 'no-engine',\n 'no-static-engine',\n 'no-async',\n 'no-tests',\n 'no-makedepend',\n '--libdir=lib', # no \"lib64\" on amd64, please\n '--prefix=' + toolchain.install_prefix,\n ]\n\n if toolchain.is_windows:\n # workaround for build failures\n configure.append('no-asm')\n\n if toolchain.host_triplet is not None:\n configure.append(openssl_archs[toolchain.host_triplet])\n configure.append(f'--cross-compile-prefix={toolchain.host_triplet}-')\n\n subprocess.check_call(configure, cwd=src, env=toolchain.env)\n self.build_make(toolchain, src)\n","repo_name":"XCSoar/XCSoar","sub_path":"build/python/build/openssl.py","file_name":"openssl.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":311,"dataset":"github-code","pt":"61"} +{"seq_id":"20812902749","text":"import random\nimport requests\n\n\ndef random_pokemon():\n pokemon_number = random.randint(1, 898)\n url = 'https://pokeapi.co/api/v2/pokemon/{}/'.format(pokemon_number)\n response = requests.get(url)\n pokemon = response.json()\n return {\n 'name': pokemon['name'],\n 'hp': pokemon['stats'][0]['base_stat'],\n 'attack': pokemon['stats'][1]['base_stat'],\n 'defense': pokemon['stats'][2]['base_stat'],\n 'special attack': pokemon['stats'][3]['base_stat'],\n 'special defense': pokemon['stats'][4]['base_stat'],\n 'speed': pokemon['stats'][5]['base_stat'],\n }\n\n\nf = open('battle.txt', 'w+')\nf.write('A wild Pokemon has appeared! \\n')\nf.close()\n\n\ndef run():\n no_of_rounds = int(input('How many rounds do you want to play? '))\n for game_rounds in range(no_of_rounds):\n print('A wild Pokemon has appeared!')\n my_pokemon_1 = random_pokemon()\n my_pokemon_2 = random_pokemon()\n my_pokemon_3 = random_pokemon()\n print('You have the following Pokemon in your party:')\n print('Pokemon 1: {}'.format(my_pokemon_1['name'].title()))\n print('Pokemon 2: {}'.format(my_pokemon_2['name'].title()))\n print('Pokemon 3: {}'.format(my_pokemon_3['name'].title()))\n fighting_pokemon = input(\n 'Which Pokemon would you like to choose? (Pick: 1, 2, or 3) ')\n if fighting_pokemon == '1':\n print('{}, I choose you! '.format(my_pokemon_1['name'].title()))\n fighting_pokemon = my_pokemon_1\n f = open('battle.txt', 'a+')\n f.write('You chose to fight with {}! '.format(\n my_pokemon_1['name'].title()))\n f.close()\n elif fighting_pokemon == '2':\n print('{}, I choose you! '.format(my_pokemon_2['name'].title()))\n fighting_pokemon = my_pokemon_2\n f = open('battle.txt', 'a+')\n f.write('You chose to fight with {}! '.format(\n my_pokemon_2['name'].title()))\n f.close()\n elif fighting_pokemon == '3':\n print('{}, I choose you!'.format(my_pokemon_3['name'].title()))\n fighting_pokemon = my_pokemon_3\n f = open('battle.txt', 'a+')\n f.write('You chose to fight with {}! '.format(\n my_pokemon_3['name'].title()))\n f.close()\n stat_choice = input(\n 'Which stat do you want to use? \\n (hp, attack, defense, special attack, special defense, speed) '\n )\n opponent_pokemon = random_pokemon()\n print('The wild Pokemon is {}'.format(\n opponent_pokemon['name'].title()))\n f = open('battle.txt', 'a+')\n f.write('You fought against {}! '.format(\n opponent_pokemon['name'].title()))\n f.close()\n my_stat = (fighting_pokemon[stat_choice])\n print(f'Your Pokemon\\'s stat is {my_stat}')\n opponent_stat = opponent_pokemon[stat_choice]\n print(f'The opposing Pokemon\\'s stat is {opponent_stat}')\n if my_stat > opponent_stat:\n print(\n 'Yay!! Your Pokemon has won! You throw a Pokeball and capture the wild Pokemon! \\n'\n )\n f = open('battle.txt', 'a+')\n f.write('You won! \\n')\n f.close()\n elif my_stat < opponent_stat:\n print(\n 'Oh no!! Your Pokemon was defeated! The wild Pokemon escapes! \\n'\n )\n f = open('battle.txt', 'a+')\n f.write('You lost! \\n')\n f.close()\n else:\n print('Your Pokemon and the wild Pokemon are at a stalemate! \\n')\n f = open('battle.txt', 'a+')\n f.write('It was a draw! \\n')\n f.close()\n\n\nrun()\n","repo_name":"quitepeculiar/Intro_To_Python_Final_Group_Project_CFG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7041655002","text":"\"\"\"\n Module tests Layer's methods.\n\"\"\"\n\nimport numpy as np\nfrom pytest import approx\n\nfrom layer import InputLayer, OutputLayer\nfrom neuron import OutputNeuron\n\nWINDOW = np.array([0.5, 0.3, 0.4,\n 0.3, 0.6, 0.2,\n 1.0, 0.4, 0.3,\n 0.4, 0.2, 0.4,\n 0.1, 0.5])\n\ndef test_make_candidate():\n \"\"\"\n Test asserts if candidate creation working correctly.\n \"\"\"\n output_layer = OutputLayer(10)\n\n order, mod, c_coef, neuron_age = np.array(\n [6, 5, 3, 1, 0, 2, 4]), 0.5, 0.5, 10\n\n candidate = output_layer.make_candidate(\n WINDOW, order, mod, c_coef, neuron_age)\n\n correct_weights = np.array(\n [0.015625, 0.03125, 0.125, 0.5, 1, 0.25, 0.0625])\n\n assert isinstance(candidate.output_value, float)\n\n assert candidate.addition_time == neuron_age\n assert candidate.psp == 0\n assert candidate.modification_count == 1\n\n np.testing.assert_array_almost_equal(\n candidate.weights, correct_weights, decimal=3)\n assert candidate.max_psp == approx(1.333, abs=1e-3)\n assert candidate.gamma == approx(0.666, abs=1e-3)\n\ndef test_find_most_similar_without_neurons():\n \"\"\"\n Test asserts if method return None and np.if when there aren't neurons in output layer.\n \"\"\"\n output_layer = OutputLayer(10)\n candidate = OutputNeuron(\n np.array([0.25, 0.25, 0.25]), 0.25, 0.1, 1, 0.25, 0.75, 2)\n\n neuron_result, distance = output_layer.find_most_similar(candidate)\n\n assert neuron_result is None\n assert np.isinf(distance)\n\ndef test_find_most_similar_with_neurons():\n \"\"\"\n Test asserts if method return nearest neuron when there are neurons in output layer.\n \"\"\"\n output_layer = OutputLayer(10)\n\n neuron_out1 = OutputNeuron(\n np.array([0.26, 0.26, 0.26]), 0.25, 0.1, 1, 0.25, 0.75, 2)\n neuron_out2 = OutputNeuron(\n np.array([1.0, 1.0, 1.0]), 0.25, 0.1, 1, 0.25, 0.75, 2)\n neuron_out3 = OutputNeuron(\n np.array([0.0, 0.0, 0.0]), 0.25, 0.1, 1, 0.25, 0.75, 2)\n\n output_layer.add_new_neuron(neuron_out1)\n output_layer.add_new_neuron(neuron_out2)\n output_layer.add_new_neuron(neuron_out3)\n\n c_neuron = OutputNeuron(\n np.array([0.25, 0.25, 0.25]), 0.25, 0.1, 1, 0.25, 0.75, 2)\n\n neuron_result, distance = output_layer.find_most_similar(c_neuron)\n\n assert neuron_result == neuron_out1\n assert distance == approx(0.0173, abs=1e-4)\n\ndef test_reset_psp():\n \"\"\"\n Test assert if zeroing PSP working correctly.\n \"\"\"\n output_layer = OutputLayer(10)\n neuron_out1 = OutputNeuron(\n np.array([]), 0.0, 0.0, 0.0, 0.0, PSP=5.0, max_PSP=10.0)\n neuron_out2 = OutputNeuron(\n np.array([]), 0.0, 0.0, 0.0, 0.0, PSP=6.0, max_PSP=10.0)\n neuron_out3 = OutputNeuron(\n np.array([]), 0.0, 0.0, 0.0, 0.0, PSP=7.0, max_PSP=10.0)\n\n output_layer.add_new_neuron(neuron_out1)\n output_layer.add_new_neuron(neuron_out2)\n output_layer.add_new_neuron(neuron_out3)\n\n output_layer.reset_psp()\n\n for neuron in output_layer:\n assert neuron.psp == 0.0\n\ndef test_add_new_neuron():\n \"\"\"\n Test assert if method add new neuron correctly.\n \"\"\"\n output_layer = OutputLayer(10)\n assert output_layer.num_neurons == 0\n assert len(output_layer) == 0\n\n neuron_out1 = OutputNeuron(np.array([]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n output_layer.add_new_neuron(neuron_out1)\n assert output_layer.num_neurons == 1\n assert len(output_layer) == 1\n assert output_layer[0] == neuron_out1\n\n neuron_out2 = OutputNeuron(np.array([]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n output_layer.add_new_neuron(neuron_out2)\n assert output_layer.num_neurons == 2\n assert len(output_layer) == 2\n assert output_layer[1] == neuron_out2\n\ndef test_replace_oldest():\n \"\"\"\n Test assert if method replace oldest neuron correctly.\n \"\"\"\n output_layer = OutputLayer(10)\n\n neuron_out1 = OutputNeuron(np.array([]), 0.0, 0.0, 0.0,\n PSP=0.0, max_PSP=0.0, addition_time=1.0)\n neuron_out2 = OutputNeuron(np.array([]), 0.0, 0.0, 0.0,\n PSP=0.0, max_PSP=0.0, addition_time=2.0)\n neuron_out3 = OutputNeuron(np.array([]), 0.0, 0.0, 0.0,\n PSP=0.0, max_PSP=0.0, addition_time=3.0)\n candidate = output_layer.make_candidate(\n WINDOW, np.array([1, 2, 3]), 0.0, 0.0, 10)\n\n output_layer.add_new_neuron(neuron_out1)\n output_layer.add_new_neuron(neuron_out2)\n output_layer.add_new_neuron(neuron_out3)\n\n output_layer.replace_oldest(candidate)\n\n assert neuron_out1 not in output_layer\n assert candidate in output_layer\n\ndef test_len_magic_method():\n \"\"\"\n Test assert if len method working correctly for Layer's classes.\n \"\"\"\n input_layer = InputLayer(10)\n\n assert len(input_layer) == 10\n\n output_layer = OutputLayer(10)\n\n assert len(output_layer) == 0\n\n output_layer.add_new_neuron(OutputNeuron(\n np.array([]), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n\n assert len(output_layer) == 1\n","repo_name":"MariuszPaluch2001/OeSNN-Anomaly-Detector","sub_path":"OeSNN-AD/test_layers.py","file_name":"test_layers.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15105357652","text":"import math\n\ndef findAmicable(x):\n\tdivisors=[1]\n\tfor i in range(2, math.floor(x**(0.5))):\n\t\tif x%i == 0:\n\t\t\tdivisors.append(i)\n\t\t\tdivisors.append(x/i)\n\treturn int(math.fsum(divisors))\n#print(findAmicable(1184))\n\nrealAmicables=[]\nfor i in range(4, 10000):\n\td = findAmicable(i)\n\tp = findAmicable(d)\n\tif p == i:\n\t\trealAmicables.append(d)\n\t\trealAmicables.append(p)\n#temp = set(realAmicables)\n#realAmicables = temp\nprint(realAmicables)\nprint (math.fsum(realAmicables))\n","repo_name":"Michaelborsellino/Euler-Project-Python-3.0","sub_path":"projEuler21.py","file_name":"projEuler21.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70966073156","text":"from Game_Of_Life import next_board_state, get_live_neighbors\r\n\r\n# TODO: there's a lot of repeated code here. Can\r\n# you move some of into reusable functions to\r\n# make it shorter and neater?\r\n\r\ndef do_test(expected_state, actual_state, test_number):\r\n if expected_state == actual_state:\r\n print(\"PASSED \" + str(test_number))\r\n else:\r\n print(\"FAILED \" + str(test_number) + \"!\")\r\n print(\"Expected:\")\r\n print(expected_state)\r\n print(\"Actual:\")\r\n print(actual_state)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # TEST 1: dead cells with no live neighbors\r\n # should stay dead.\r\n test_number = 1\r\n\r\n init_state1 = [\r\n [0,0,0],\r\n [0,0,0],\r\n [0,0,0]\r\n ]\r\n expected_next_state1 = [\r\n [0,0,0],\r\n [0,0,0],\r\n [0,0,0]\r\n ]\r\n actual_next_state1 = next_board_state(init_state1)\r\n do_test(expected_next_state1, actual_next_state1, test_number)\r\n\r\n\r\n # TEST 2: dead cells with exactly 3 neighbors\r\n # should come alive.\r\n test_number = 2\r\n\r\n init_state2 = [\r\n [0,0,1],\r\n [0,1,1],\r\n [0,0,0]\r\n ]\r\n expected_next_state2 = [\r\n [0,1,1],\r\n [0,1,1],\r\n [0,0,0]\r\n ]\r\n actual_next_state2 = next_board_state(init_state2)\r\n do_test(expected_next_state2, actual_next_state2, test_number)\r\n\r\n\r\n # TEST 3: any live cell with 0 or 1 live neighbors becomes dead\r\n test_number = 3\r\n\r\n init_state3 = [\r\n [1,0,0],\r\n [0,0,1],\r\n [0,0,1]\r\n ]\r\n expected_next_state3 = [\r\n [0,0,0],\r\n [0,0,0],\r\n [0,0,0]\r\n ]\r\n actual_next_state3 = next_board_state(init_state3)\r\n do_test(expected_next_state3, actual_next_state3, test_number)\r\n\r\n\r\n # TEST 4: any live cell 2 or 3 live neighbors stays alive\r\n test_number = 4\r\n\r\n init_state4 = [\r\n [1,1,0],\r\n [0,1,1],\r\n [0,0,0]\r\n ]\r\n expected_next_state4 = [\r\n [1,1,1],\r\n [1,0,1],\r\n [0,0,0]\r\n ]\r\n actual_next_state4 = next_board_state(init_state4)\r\n do_test(expected_next_state4, actual_next_state4, test_number)\r\n\r\n # TEST 5: any live cell with more than 3 live neighbors becomes dead\r\n test_number = 5\r\n\r\n init_state5 = [\r\n [1,1,0],\r\n [0,1,1],\r\n [0,0,1]\r\n ]\r\n expected_next_state5 = [\r\n [1,1,1],\r\n [1,0,1],\r\n [0,1,1]\r\n ]\r\n actual_next_state5 = next_board_state(init_state5)\r\n do_test(expected_next_state5, actual_next_state5, test_number)","repo_name":"davidwliu/game-of-life","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2288852652","text":"import json\nfrom datetime import datetime\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom django.db import transaction\n\nfrom .models import Transactions, Wallet\nfrom .serializers import WalletSerializer, TransactionSummarySerializer, TransactionPayloadSerializer\n\n\n# Create your views here.\n\nclass AccountsView(APIView):\n \"\"\"\n View class for account specific operation\n \"\"\"\n def get(self, request, account_id=None):\n \"\"\"\n Get Accounts or get Account by account_id\n @param request: api request\n @param account_id: int\n The method returns all accounts if account_id is None or not provided,\n else returns the account for the same account_id.\n \"\"\"\n print(\"Incoming Request - \", request)\n if account_id:\n account_id = int(account_id)\n list_accounts = Wallet.objects.filter(account=account_id).select_related(\"account\")\n serializer = WalletSerializer(list_accounts, many=True)\n else:\n list_accounts = Wallet.objects.all().select_related(\"account\")\n serializer = WalletSerializer(list_accounts, many=True)\n\n return Response(serializer.data)\n\n\nclass TransactionsView(GenericAPIView):\n \"\"\"\n View class for transaction related operations\n \"\"\"\n serializer_class = TransactionPayloadSerializer\n\n def get(self, request, wallet_id):\n \"\"\"\n Get transactions for wallet_id\n @param request: api request\n @param wallet_id: int - required\n The method returns all transactions related to the specific wallet for customer\n \"\"\"\n print(\"Incoming Request - \", request)\n wallet_id = int(wallet_id)\n\n wallet_transactions = Transactions.objects.filter(wallet_id=wallet_id).select_related(\"wallet_id\").\\\n select_related(\"wallet_id__account\")\n\n serializer = TransactionSummarySerializer(wallet_transactions, many=True)\n return Response(serializer.data)\n\n def post(self, request, wallet_id):\n \"\"\"\n Transfer money between wallets\n @param request: api request\n @param wallet_id: int - required\n @body amount: float - required\n @body referenced_wallet_id - required (recipient_wallet_id)\n The method transfers money from wallet_id to referenced_wallet_id, if operation could not be performed,\n 4xx error is returned\n \"\"\"\n try:\n request_body = request.body\n request_dict = json.loads(request_body)\n if not request_dict.get(\"referenced_wallet_id\") or not request_dict.get(\"amount\") or \\\n request_dict[\"amount\"] <= 0:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n amount = request_dict[\"amount\"]\n\n with transaction.atomic():\n sender_obj = Wallet.objects.select_for_update().get(wallet_id=wallet_id)\n if sender_obj.balance - amount <= 0:\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n receiver_obj = Wallet.objects.select_for_update().get(wallet_id=request_dict[\"referenced_wallet_id\"])\n\n payout_tran = Transactions(wallet_id=sender_obj, amount=amount,\n transaction_type=\"payout\", status=\"success\",\n referenced_wallet_id=receiver_obj, created_at=datetime.now(),\n created_by=\"payment_module\")\n\n payin_tran = Transactions(wallet_id=receiver_obj, amount=amount,\n transaction_type=\"payin\", status=\"success\",\n referenced_wallet_id=sender_obj, created_at=datetime.now(),\n created_by=\"payment_module\")\n\n sender_obj.balance = sender_obj.balance - amount\n receiver_obj.balance = receiver_obj.balance + amount\n\n payout_tran.save()\n payin_tran.save()\n sender_obj.save()\n receiver_obj.save()\n except ObjectDoesNotExist as e:\n print(\"Exception raised -\", str(e))\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(status=status.HTTP_200_OK)\n","repo_name":"mcrispinjebin/coins","sub_path":"wallet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28439459061","text":"import string\nfrom itertools import product\nimport time\nfrom shutil import copyfile\n\nfrom crypo.utils import Menu\nfrom hashlib import sha256, sha512, sha384, md5, sha1, sha224\nfrom .containers import Container\nfrom .services import IOService\nfrom dependency_injector.wiring import inject, Provide\n\n@inject\ndef hash_function(service: IOService = Provide[Container.service]):\n service.print(\"Hashing\", mode='header')\n message = service.input(\"enter the message to hash\")\n hashing_algorithm = service.Menu([\n (\"SHA1 \", lambda: sha1_hash),\n (\"SHA224\", lambda: sha224_hash),\n (\"SHA256\", lambda: sha256_hash),\n (\"SHA384\", lambda: sha384_hash),\n (\"SHA512\", lambda: sha512_hash),\n (\"MD5\", lambda: md5_hash),\n ],choice_message=\"choose a hashing algorithm\").run()\n hashed = hashing_algorithm(message)\n service.print(\"hashed:\")\n service.print(hashed, mode='code')\n\ndef sha1_hash(message):\n return sha1(message.encode()).hexdigest()\n\ndef sha224_hash(message):\n return sha224(message.encode()).hexdigest()\n\ndef sha256_hash(message):\n return sha256(message.encode()).hexdigest()\n\ndef sha384_hash(message):\n return sha384(message.encode()).hexdigest()\n\ndef sha512_hash(message):\n return sha512(message.encode()).hexdigest()\n\ndef md5_hash(message):\n return md5(message.encode()).hexdigest()\n\ndef hash_word(word, hash_algo):\n if hash_algo.upper() == 'SHA256':\n return sha256(word.encode()).hexdigest()\n elif hash_algo.upper() == 'SHA512':\n return sha512(word.encode()).hexdigest()\n elif hash_algo.upper() == 'SHA384':\n return sha384(word.encode()).hexdigest()\n elif hash_algo.upper() == 'SHA1':\n return sha1(word.encode()).hexdigest()\n elif hash_algo.upper() == 'MD5':\n return md5(word.encode()).hexdigest()\n elif hash_algo.upper() == 'SHA224':\n return sha224(word.encode()).hexdigest()\n\n@inject\ndef detect_hash(hashed, service: IOService = Provide[Container.service]):\n if len(hashed) == 128:\n return 'SHA512'\n elif len(hashed) == 96:\n return 'SHA384'\n elif len(hashed) == 64:\n return 'SHA256'\n elif len(hashed) == 40:\n return 'SHA1'\n elif len(hashed) == 32:\n return 'MD5'\n elif len(hashed) == 56:\n return 'SHA224'\n else:\n service.print('Could not auto detect hash type\\n', mode='warning')\n return None\n\n\n@inject\ndef crack_hash(service: IOService = Provide[Container.service]):\n service.print(\"Hash Cracking\", mode='header')\n hached = service.input(\"enter the message to crack\").strip()\n cracking_technique = service.Menu([\n (\"Dictionary attack\", lambda: dictionary_attack),\n (\"Brute force attack\", lambda: brute_force_attack),\n ],choice_message=\"choose the technique to use\").run()\n \n message = cracking_technique(hached)\n service.print(message, mode='code')\n\n@inject\ndef import_dict(service: IOService = Provide[Container.service]):\n source = service.read_file(\"Provide the dictionnary filepath\")\n imported_filename = f'imported{time.strftime(\"%Y%m%d-%H%M%S\")}.txt'\n destination = f\"./crypo/dictionnaries/{imported_filename}\"\n copyfile(source, destination)\n service.print(\"Imported\", mode='success')\n return imported_filename\n\n@inject\ndef dictionary_attack(hashed, service: IOService = Provide[Container.service]):\n algo = detect_hash(hashed)\n\n if not algo:\n return \"Failed to crack hash\"\n\n dictionary = service.Menu([\n (\"Plaint text Dictionnary\", lambda: \"plaintext.txt\"),\n (\"French Dictionnary\", lambda: \"french.txt\"),\n (\"English Dictionnary\", lambda: \"english.txt\"),\n (\"Import and use Dictionnary\", import_dict)\n ],choice_message=\"choose a dictionary\").run()\n\n service.submit(message='Submit')\n with open(\"./crypo/dictionnaries/\" + dictionary) as dictionary:\n for line in dictionary:\n words = line.split()\n for word in words:\n hashed_word = hash_word(word, algo)\n if hashed_word == hashed:\n return word\n return \"Failed to crack hash\"\n\n \n@inject\ndef brute_force_attack(hashed, service: IOService = Provide[Container.service]):\n algo = detect_hash(hashed)\n\n charset = service.Menu([\n (\"Letters\", lambda: string.ascii_letters),\n (\"Lowercase Letters\", lambda: string.ascii_lowercase),\n (\"Uppercase Letters\", lambda: string.ascii_uppercase),\n (\"Digits\", lambda: string.digits),\n (\"Alphanumerical Characters\", lambda: string.digits + string.ascii_letters),\n ],choice_message=\"choose the charset to use\").run()\n\n service.submit(message='Submit')\n\n for length in range(4, 11):\n words = product(charset, repeat=length)\n for word in words:\n word = \"\".join(word)\n hashed_word = hash_word(word, algo)\n if hashed_word == hashed:\n return word\n\n\n","repo_name":"medazizammari/crypo","sub_path":"crypo/hashing.py","file_name":"hashing.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6725932396","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 24 23:07:42 2022\n\n@author: akseluhr\n\"\"\"\n\nimport numpy as np\nimport pickle\nimport numpy.matlib\nimport matplotlib.pyplot as plt\nimport sys\n\n###############################################################\n\n# Matlab -> Python functions\n\n###############################################################\n\n# Loades an entire batch\ndef LoadBatch(filename):\n\t\"\"\" Copied from the dataset website \"\"\" \n\twith open('Datasets/'+filename, 'rb') as fo:\n\t\tdict = pickle.load(fo, encoding='bytes') \n\treturn dict\n\n# Calculate softmax for class class estimation of each image (vector)\ndef softmax(x):\n\t\"\"\" Standard definition of the softmax function \"\"\"\n\texp_x = np.exp(x)\n\treturn exp_x / np.sum(exp_x, axis=0)\n\n#\ndef ComputeGradsNumSlow(X, Y, W, b, lamba, k, h, beta=None, gamma=None, mean=None, var=None, batch_norm=False):\n \"\"\"\n Compute gradients numerically via the centred difference method\n :param X: numpy array of shape dxN, batch of N input images\n :param Y: numpy array of shape KxN, batch of N one-hot image labels\n :param W: list of nxm numpy arrays, containing (in order) the weights of the first layer, then the second, etc.\n :param b: list of nx1 numpy arrays, containing (in order) the biases of the first layer, then the second, etc.\n :param lamba: float, lambda parameter for loss function\n :param h: float, step size for numerical analysis\n :return: list of numpy arrays grad_W, grad_b, of same format as W and b, respectively\n \"\"\"\n grad_W = [np.zeros(w_n.shape) for w_n in W]\n grad_b = [np.zeros(b_n.shape) for b_n in b]\n \n if(batch_norm):\n grad_gamma = [gamma_l.copy() for gamma_l in gamma]\n grad_beta = [beta_l.copy() for beta_l in beta]\n c = ComputeCost(X, Y, W, b, lambd, k, gamma, beta, mean, var, batch_norm)\n\n\n for j in range(len(grad_b)):\n bj_size = grad_b[j].shape[0] #\n for i in range(bj_size):\n b_try = [bj.copy() for bj in b]\n b_try[j][i] -= h\n c1 = ComputeCost(X, Y, W, b_try, lambd, k)\n\n b_try = [bj.copy() for bj in b]\n b_try[j][i] += h\n c2 = ComputeCost(X, Y, W, b_try, lambd, k)\n\n grad_b[j][i] = (c2 - c1) / (2*h)\n\n for j in range(len(grad_W)):\n Wj = grad_W[j]\n for i in range(Wj.shape[0]):\n for k in range(Wj.shape[1]):\n W_try = [wj.copy() for wj in W]\n W_try[j][i,k] -= h\n c1 = ComputeCost(X, Y, W_try, b, lambd, k)\n\n W_try = [wj.copy() for wj in W]\n W_try[j][i,k] += h\n c2 = ComputeCost(X, Y, W_try, b, lambd, k)\n grad_W[j][i,k] = (c2 - c1) / (2*h)\n \n if j<(k-1) and batch_norm:\n # Gradients for gamma\n for i in range(gamma[j].shape[0]):\n gamma_try = [gamma_l.copy() for gamma_l in gamma]\n gamma_try[j][i,0] += h\n c2 = ComputeCost(X, Y, W, b, lambd, k, gamma_try, beta, mean, var, batch_norm)\n grad_gamma[j][i,0] = (c2-c)/h\n\n # Gradients for betas\n for i in range(beta[j].shape[0]):\n beta_try = [beta_l.copy() for beta_l in beta]\n beta_try[j][i,0] += h\n c2 = ComputeCost(X, Y, W, b, lambd, k, gamma, beta_try, mean, var, batch_norm)\n grad_beta[j][i,0] = (c2-c)/h\n \n if batch_norm:\n return grad_W, grad_b, grad_gamma, grad_beta\n else:\n return grad_W, grad_b\n\n return grad_W, grad_b\n\n\n# Allows for efficiently view the images in a directory or \n# in a *Matlab* array or cell array\ndef montage(W):\n\t\"\"\" Display the image for each label in W \"\"\"\n\timport matplotlib.pyplot as plt\n\tfig, ax = plt.subplots(2,5)\n\tfor i in range(2):\n\t\tfor j in range(5):\n\t\t\tim = W[i*5+j,:].reshape(32,32,3, order='F')\n\t\t\tsim = (im-np.min(im[:]))/(np.max(im[:])-np.min(im[:]))\n\t\t\tsim = sim.transpose(1,0,2)\n\t\t\tax[i][j].imshow(sim, interpolation='nearest')\n\t\t\tax[i][j].set_title(\"y=\"+str(5*i+j))\n\t\t\tax[i][j].axis('off')\n\tplt.show()\n \n###############################################################\n\n# My functions\n\n###############################################################\n\n# Read pixel data, labels (classes), one-hot rep. of labels (classes)\n# Divide pixel data by 255 for correct format\ndef ReadData(filename):\n data_batch = LoadBatch(filename)\n pixel_data = data_batch[b'data'].T\n labels = data_batch[b'labels']\n one_hot = np.eye(10)[labels].T\n return pixel_data, one_hot, labels \n\n# Normalize w.r.t. training data mean and standard deviation\n# Normalization of input so that the inputs are at a comparable range\ndef Normalize(train, validation,test=None):\n train=np.float64(train)\n validation=np.float64(validation)\n \n mean_X =train.mean(axis=1)\n\n std_X=train.std(axis=1)\n\n train=train-mean_X[:,None]\n\n train=train/std_X[:,None]\n validation=validation-mean_X[:,None]\n validation=validation/std_X[:,None]\n \n if(test is not None):\n test=np.float64(test)\n test=test-mean_X[:,None]\n test=test/std_X[:,None]\n return train,validation,test;\n \n return train,validation;\n\n# First init of model params W(eights) and b(ias)\n# Init done with 0 mean and 1 / sqrt of d and m\n# Random seed for selecting the same rndm numbers for each execution\ndef GetWeightAndBias(X, Y, k, hidden_dimension, batch_norm=False):\n \n W=[None]*(k+1)\n b=[None]*(k+1)\n beta=[None]*k\n gamma=[None]*k\n\n # Image pixles (input öayer)\n d=X.shape[0]\n # Num of classes (output layer)\n K = 10\n \n for i in range(k):\n \n # Changes dimensions for each iteration\n m=hidden_dimension[i]\n\n # He initialization for weights\n W[i] = np.random.normal(size=(m, d), loc=0, scale=(np.sqrt(2/d)))\n b[i] = np.zeros(shape=(m,1))\n \n if(batch_norm):\n beta[i] = np.zeros(shape=(m,1))\n gamma[i] = np.ones(shape=(m,1))\n \n # Update d\n d = m\n \n # Final output layer\n W[k] = np.random.normal(size=(K, d), loc=0, scale=(np.sqrt(2/d)))\n b[k] = np.zeros(shape=(K,1))\n \n if(batch_norm):\n return W, b, beta, gamma\n \n return W, b\n\n# This is an second attemt for weight init since the former didn't work for 9 layers.\ndef GetWeightAndBiasProper(X, k, hidden_dimension, batch_norm=False):\n \n input_dimension = 3072\n # print(input_dimension)\n output_dimension = 10\n k = len(hidden_dimension)+1\n W, b, gamma, beta = [None]*k, [None]*k, [None]*(k-1), [None]*(k-1)\n n_nodes = [input_dimension]+hidden_dimension+[output_dimension]\n \n np.random.seed(400)\n for l in range(k):\n inputs = n_nodes[l]\n outputs = n_nodes[l+1]\n \n # He init\n scale = np.sqrt(2/inputs) \n \n W[l] = np.random.normal(size=(outputs, inputs), loc=0, scale=scale)\n b[l] = np.zeros((outputs, 1))\n if l<(k-1):\n gamma[l] = np.ones((outputs, 1))\n beta[l] = np.zeros((outputs, 1))\n \n return W, b, gamma, beta\n\n# Weight init for sigma experiments\ndef GetWeightsAndBiasSigma(X,input_dimension,hidden_dimension,output_dimension,k,batch_norm=False,std=0.01):\n W=[None]*(k+1)\n b=[None]*(k+1)\n if(batch_norm):\n beta = [None]*k\n gamma = [None]*k\n d = input_dimension\n \n # Essentially the same as former one \n # but we keep inputting new standard deviation values (see next function)\n for i in range(k):\n m=hidden_dimension[i]\n W[i] = np.random.normal(size=(m, d), loc=0, scale=std)\n b[i] = np.zeros(shape=(m,1))\n if(batch_norm):\n beta[i] = np.zeros(shape=(m,1))\n gamma[i] = np.ones(shape=(m,1))\n d = m\n \n W[k] = np.random.normal(size=(output_dimension, d), loc=0, scale=std)\n b[k] = np.zeros(shape=(output_dimension,1))\n \n if(batch_norm):\n return W,b,beta, gamma\n else:\n return W,b\n\n# Inputs every specified sigmas in the for loop to check weight initializaiton sensitivity. \ndef SigmaExperiments(X, Y, y, hidden_dimension, GDparams, X_val,\n Y_val, y_val,X_test,Y_test,y_test):\n \n for sigma in [1e-1,1e-3,1e-4]:\n W, b, gamma, beta=GetWeightsAndBiasSigma(X,input_dimension=X.shape[0],hidden_dimension=[50,50],output_dimension=Y.shape[0],k = 2,batch_norm=True,std=sigma)\n P, S_batch_Norm, S, H, mean_inp, var_inp = EvaluateClassifier(X_new_norm, W, b, 2, gamma, beta, batch_norm=True)\n W_upd, b_upd, metrics, gamma_upd, beta_upd, mean_final, var_final = MiniBatchGD2_BN(X, Y, y, GDparams, W, b, 2, 0.002, beta, gamma, mean_inp, var_inp, True, 0.9, X_val,Y_val,y_val)\n acc = ComputeAccuracy(X_test, y_test, W_upd, b_upd, 2, gamma_upd, beta_upd, batch_norm=True)\n print('Sigma: '+str(sigma)+ ', accuracy: '+ str(acc) + '. With batch norm.')\n\n\n W, b = GetWeightsAndBiasSigma(X,input_dimension=X.shape[0],hidden_dimension=[50,50],output_dimension=Y.shape[0],k = 2,batch_norm=False,std=sigma)\n W_upd, b_upd, metrics= MiniBatchGD2(X, Y, y, GDparams, W_upd, b_upd, 2, 0.002, X_val,Y_val,y_val)\n acc = ComputeAccuracy(X_test, y_test, W_upd, b_upd, 2, batch_norm=False)\n print('Sigma: '+str(sigma)+ ', accuracy: '+ str(acc) + '. Without batch norm.')\n print('-'*20)\n\n# Evaluation of the network function\n# Agan, Softmax returns each probability for each class label\ndef EvaluateClassifier(X, W, b, k, gamma=None, beta=None, mean=None, var=None, batch_norm=False):\n\n # Initialize different lists to correct dimensions\n k = len(b)-1\n S = [None]*(k)\n H = [X.copy()]+[None]*(k)\n S_batch_Norm = [None]*k\n S_scaled = [None]*k\n \n if(batch_norm):\n mean=[None]*k\n var=[None]*k\n \n for l in range(k):\n # Probas\n S[l]=np.dot(W[l],H[l])+b[l]\n \n # Batch norm will include gammas and betas\n if(batch_norm):\n mean[l]=S[l].mean(axis=1).reshape(-1,1)\n var[l]=S[l].var(axis=1).reshape(-1,1)\n S_batch_Norm[l]=(S[l]-mean[l])/(np.sqrt(var[l]+1e-15))\n S_scaled[l] = S_batch_Norm[l]* gamma[l]+beta[l]\n H[l+1]=np.maximum(0, S_scaled[l])\n # Else not\n else:\n H[l+1]=np.maximum(0,S[l])\n \n # Make prediction\n P=softmax(np.dot(W[k],H[k])+b[k])\n \n if(batch_norm):\n return P, S_batch_Norm, S, H[1:], mean, var\n\n return P,H[1:]\n\n# Total cost of a set of images:\n# 1. Regularization term, calculate: lambda * sum(W^2 ij)\n# 2. Sum it with l_cross + regularization term -> for each x,y in D\n# 3. Multiply everything with 1 / length of D\ndef ComputeCost(X, Y, W, b, lambd, k, gamma=None, beta=None, mean=None, var=None, batch_norm=False):\n \n if(batch_norm):\n P, S_batch_Norm, S, H, mean, var = EvaluateClassifier(X, W, b, k, gamma, beta, batch_norm=True)\n else:\n P, H = EvaluateClassifier(X, W, b, k)\n \n sqrt_W = 0 \n for i in range(len(W)):\n sqrt_W += (W[i]**2).sum()\n \n lcr = -np.sum(np.multiply(Y, np.log(P)))\n Reg_term = lambd*sqrt_W\n J = lcr/X.shape[1]+Reg_term\n \n return J\n\n\n# Accuracy of the network's predictions\n# Percentage of examples for which it gets the correct answer\ndef ComputeAccuracy(X, y, W, b, k, gamma=None, beta=None, mean=None, var=None, batch_norm=False):\n \n if(batch_norm):\n P, S_batch_Norm, S, H, mean, var = EvaluateClassifier(X, W, b, k, gamma, beta, batch_norm=True)\n else: \n P, H = EvaluateClassifier(X, W, b, k)\n\n acc = np.mean(y == np.argmax(P, axis=0))\n \n return acc\n\n# Back propagation if batch normalization is set to True\ndef BatchNormBackPass(G, S, mean, var):\n \n n = S.shape[1]\n sigma1 = ((var+1e-15)**(-0.5))\n sigma2 = ((var+1e-15)**(-1.5))\n G1 = G*(sigma1@(np.ones((1,n))))\n G2 = G*(sigma2@(np.ones((1,n))))\n D = S-(mean@np.ones((1,n)))\n c = (G2*D)@np.ones((n,1))\n G = (G1-1/n*(G1@np.ones((n,1))))*np.ones((1,n))-(1/n*(D*(c@np.ones((1,n)))))\n return G\n \n# Compute gradients of the cost function to see the curve of cost decrease \n# Forward pass is already done since we have already calculated P\ndef ComputeGradients(H, X, Y, P, W, b, lambd, k, gamma=None, beta=None, mean=None, var=None, S_batch_Norm=None,S=None,batch_norm=False):\n \n # Init of lists\n k = len(b)-1\n grad_W = [None]*(k+1)\n grad_b = [None]*(k+1)\n grad_gamma = [None]*(k)\n grad_beta = [None]*k\n \n n = X.shape[1]\n H = [X.copy()]+H\n G = -(Y-P)\n \n grad_W[k] = np.dot(G,H[k].T)/n +2*lambd*W[k]\n grad_b[k] = (np.dot(G,np.ones(shape=(n,1)))/n)\n \n G = np.dot(W[k].T,G)\n G = G*(H[k]>0)\n \n for l in range(k-1,-1,-1):\n if(batch_norm):\n grad_beta[l] = 1/n*(G@np.ones((n,1)))\n grad_gamma[l] = 1/n*((G*S_batch_Norm[l])@np.ones((n,1)))\n \n G = G*(gamma[l]@np.ones((1,n)))\n G = BatchNormBackPass(G,S[l],mean[l],var[l])\n \n grad_W[l] = np.dot(G,H[l].T)/n+2*lambd*W[l]\n grad_b[l] = (np.dot(G,np.ones(shape=(n,1)))/n)\n G = np.dot(W[l].T,G)\n G = G*(H[l]>0)\n \n if(batch_norm):\n return grad_W, grad_b, grad_gamma, grad_beta\n else:\n return grad_W, grad_b\n\ndef ComputeGradsNum_BN(X, Y, lambd, W, b, gamma, beta, mean, var, batch_norm, h=0.000001):\n \n # Create lists for saving the gradients by layers\n grad_W = [W_l.copy() for W_l in W]\n grad_b = [b_l.copy() for b_l in b]\n if batch_norm:\n grad_gamma = [gamma_l.copy() for gamma_l in gamma]\n grad_beta = [beta_l.copy() for beta_l in beta]\n \n # Compute initial cost and iterate layers k\n k = len(W)\n c = ComputeCost(X, Y, W, b, lambd, k, gamma, beta, mean, var, batch_norm)\n\n for l in range(k):\n \n # Gradients for bias\n for i in range(b[l].shape[0]):\n b_try = [b_l.copy() for b_l in b]\n b_try[l][i,0] += h\n c2 = ComputeCost(X, Y, W, b_try, lambd, k, gamma, beta, mean, var, batch_norm)\n grad_b[l][i,0] = (c2-c)/h\n \n # Gradients for weights\n for i in range(W[l].shape[0]):\n for j in range(W[l].shape[1]):\n W_try = [W_l.copy() for W_l in W]\n W_try[l][i,j] += h\n c2 = ComputeCost(X, Y, W_try, b, lambd, k, gamma, beta, mean, var, batch_norm)\n grad_W[l][i,j] = (c2-c)/h\n \n if l<(k-1) and batch_norm:\n \n # Gradients for gamma\n for i in range(gamma[l].shape[0]):\n gamma_try = [gamma_l.copy() for gamma_l in gamma]\n gamma_try[l][i,0] += h\n c2 = ComputeCost(X, Y, W, b, lambd, k, gamma_try, beta, mean, var, batch_norm)\n grad_gamma[l][i,0] = (c2-c)/h\n \n # Gradients for betas\n for i in range(beta[l].shape[0]):\n beta_try = [beta_l.copy() for beta_l in beta]\n beta_try[l][i,0] += h\n c2 = ComputeCost(X, Y, W, b, lambd, k, gamma, beta_try, mean, var, batch_norm)\n grad_beta[l][i,0] = (c2-c)/h\n \n if batch_norm:\n return grad_W, grad_b, grad_gamma, grad_beta\n else:\n return grad_W, grad_b\n\n# Check if my analytical gradients \n# Using centered difference function\n# If the differenc is < 1e-6, the analytical gradients are fine\ndef CompareGradients(X,Y, W, b, lambd, k, threshold, gamma=None, beta=None, batch_norm=False):\n \n h=0.000001\n \n # Compute gammas, betas, Ws and bs\n if(batch_norm): \n P, S_batch_Norm, S, H, mean, var = EvaluateClassifier(X, W, b, k, gamma, beta, mean=None, var=None, batch_norm=True)\n grad_Ws_a, grad_bs_a, grad_gamma_a, grad_beta_a = ComputeGradients(H, X, Y, P, W, b, lambd, k, gamma, beta, mean, var, S_batch_Norm, S, batch_norm=True) \n grad_Ws_n, grad_bs_n, grad_gamma_n, grad_beta_n = ComputeGradsNum_BN(X, Y, lambd, W, b, gamma, beta, mean, var, batch_norm, h)\n grad_gamma_abs_err = [None]*(len(grad_gamma_a)) \n grad_beta_abs_err = [None]*(len(grad_beta_a))\n grad_gamma_rel_err = [None]*(len(grad_gamma_a))\n grad_beta_rel_err = [None]*(len(grad_beta_a))\n \n # Ws and bs\n else:\n P, H = EvaluateClassifier(X, W, b, k)\n grad_Ws_a, grad_bs_a = ComputeGradients(H, X, Y,P, W, b, lambd, k)\n grad_Ws_n, grad_bs_n = ComputeGradsNumSlow(X, Y, W, b, lambd, k, h)\n\n w_rel_error = list()\n b_rel_error = list()\n \n # Calculate differences\n print(\"Checking errors for \", k, \"layers\",\"\\n\")\n print(\"--------------------------------------------\")\n\n\n for i in range(len(W)):\n w_rel_error.append(np.sum(np.abs(grad_Ws_a[i] - grad_Ws_n[i])) / np.maximum(0.00000001, np.sum(np.abs(grad_Ws_a[i]) + np.abs(grad_Ws_n[i]))))\n b_rel_error.append(np.sum(np.abs(grad_bs_a[i] - grad_bs_n[i])) / np.maximum(0.00000001, np.sum(np.abs(grad_bs_a[i]) + np.abs(grad_bs_n[i]))))\n\n # Check differences \n if (w_rel_error[i] < threshold and b_rel_error[i] < threshold):\n print(\"Relative error for layer: \", [i+1], \" is ok.\")\n print(\"Weights relative error: \", w_rel_error[i])\n print(\"Bias relative error: \", b_rel_error[i], \"\\n\")\n\n else:\n print(\"Relative error for layer: \", [i+1], \" is too high.\")\n print(\"Weights relative error: \", w_rel_error[i])\n print(\"Bias relative error: \", b_rel_error[i], \"\\n\")\n\n print(\"--------------------------------------------\")\n\n # Calculate gamma and beta differences\n if(batch_norm):\n for i in range(len(grad_gamma_a)):\n grad_gamma_abs_err[i] = np.abs(grad_gamma_n[i]-grad_gamma_a[i])\n grad_beta_abs_err[i] = np.abs(grad_beta_n[i]-grad_beta_a[i])\n print('gamma'+str(i)+str(' ')+str(np.mean(grad_gamma_abs_err[i]<1e-6)*100)+\"% of absolute errors below 1e-6\")\n print('beta'+str(i)+str(' ')+str(np.mean(grad_beta_abs_err[i]<1e-6)*100)+\"% of absolute errors below 1e-6\")\n grad_gamma_rel_err[i] = np.maximum(np.abs(grad_gamma_n[i])+np.abs(grad_gamma_a[i]), 0.00000001)\n grad_beta_rel_err[i] = np.maximum(np.abs(grad_beta_n[i])+np.abs(grad_beta_a[i]), 0.00000001)\n print('gamma'+str(i)+str(' ')+str(np.mean(grad_gamma_abs_err[i]/grad_gamma_rel_err[i]<1e-6)*100)+\n \"% of relative errors below 1e-6\")\n print('beta'+str(i)+str(' ')+str(np.mean(grad_beta_abs_err[i]/grad_beta_rel_err[i]<1e-6)*100)+\n \"% of relative errors below 1e-6\")\n \n\n# Mini batch GD for NO BN\ndef MiniBatchGD2(X, Y, y, GDparams, W, b, k, lambd, X_val=None, Y_val=None, y_val=None):\n n = X.shape[1]\n (eta_min,eta_max,step_size,n_batch,cycles)=GDparams\n metrics = {'updates':[-1], \n 'Loss_scores':[ComputeCost(X, Y, W, b, lambd, k)], \n 'acc_scores':[ComputeAccuracy(X, y, W, b, k)]}\n if X_val is not None:\n metrics['Loss_val_scores'] = [ComputeCost(X_val, Y_val, W, b, lambd, k)]\n metrics['acc_val_scores'] = [ComputeAccuracy(X_val, y_val, W, b, k)]\n batches = dict()\n\n # N batching \n for j in range(n//n_batch):\n j_start = (j)*n_batch\n j_end = (j+1)*n_batch\n inds = range(j_start,j_end)\n y_batch = [y[index] for index in inds]\n X_batch = X[:, inds]\n Y_batch = Y[:, inds]\n batches[j]=(X_batch,Y_batch,y_batch)\n j = 0\n \n for l in range(cycles):\n for t in range(2*l*step_size, 2*(l+1)*step_size):\n \n if t>= 2*l*step_size and t<(2*l+1)*step_size:\n eta = eta_min+(t-2*l*step_size)/step_size*(eta_max-eta_min)\n elif t>=(2*l+1)*step_size and t<2*(l+1)*step_size:\n eta = eta_max-(t-(2*l+1)*step_size)/step_size*(eta_max-eta_min)\n\n X_batch, Y_batch, y_batch = batches[j]\n P_batch, H_batch = EvaluateClassifier(X_batch, W, b, k) \n grad_Ws, grad_bs = ComputeGradients(H_batch, X_batch, Y_batch, P_batch, W, b, lambd, k)\n\n for i in range(len(W)):\n W[i] = W[i]-eta*grad_Ws[i]\n b[i] = b[i]-eta*grad_bs[i]\n\n j += 1\n if j>(n//n_batch-1):\n # set j = 0 will start new epoch\n j = 0\n metrics['updates'].append(t+1)\n metrics['acc_scores'].append(ComputeAccuracy(X, y, W, b, k))\n metrics['Loss_scores'].append(ComputeCost(X, Y, W, b,lambd, k))\n\n if X_val is not None:\n metrics['acc_val_scores'].append(ComputeAccuracy(X_val, y_val, W, b, k))\n metrics['Loss_val_scores'].append(ComputeCost(X_val, Y_val, W, b, lambd, k))\n \"\"\" message = \"In update \"+str(t+1)+'/'+str(2*cycles*step_size)+\" finishes epoch \"+ \\\n str(len(metrics['updates'])-1)+\": loss=\"+str(metrics['Loss_scores'][-1])+ \\\n \" and accuracy=\"+str(metrics['acc_scores'][-1])+\" (training set) \\r\"\n sys.stdout.write(message) \"\"\"\n \n # Shuffling for each epoch\n rand_index=np.random.permutation(n)\n X=X[:,rand_index]\n Y=Y[:,rand_index]\n y = [y[c] for c in rand_index]\n batches=dict()\n for f in range(n//n_batch):\n j_start = (f)*n_batch ;\n j_end = (f+1)*n_batch;\n inds = range(j_start,j_end);\n y_batch = [y[index] for index in inds]\n X_batch = X[:, inds];\n Y_batch = Y[:, inds];\n batches[f]=(X_batch,Y_batch,y_batch)\n \n \n \n return W, b, metrics \n\n# MiniBatchGD WITH BN.\ndef MiniBatchGD2_BN(X, Y, y, GDparams, W, b, k, lambd, beta, gamma, mean_input, var_input, batch_norm, alpha, X_val=None, Y_val=None, y_val=None):\n n = X.shape[1]\n (eta_min,eta_max,step_size,n_batch,cycles)=GDparams\n metrics = {'updates':[-1], \n 'Loss_scores':[ComputeCost(X, Y, W, b, lambd, k, gamma, beta, mean_input, var_input, batch_norm)], \n 'acc_scores':[ComputeAccuracy(X, y, W, b, k, gamma, beta, mean_input, var_input, batch_norm)]}\n if X_val is not None:\n metrics['Loss_val_scores'] = [ComputeCost(X, Y, W, b, lambd, k, gamma, beta, mean_input, var_input, batch_norm)]\n metrics['acc_val_scores'] = [ComputeAccuracy(X_val, y_val, W, b, k, gamma, beta, mean_input, var_input, batch_norm)]\n batches = dict()\n # N batching \n for j in range(n//n_batch):\n j_start = (j)*n_batch\n j_end = (j+1)*n_batch\n inds = range(j_start,j_end)\n y_batch = [y[index] for index in inds]\n X_batch = X[:, inds]\n Y_batch = Y[:, inds]\n #gamma_batch = gamma[:, inds]\n #beta_batch = beta[:, inds]\n batches[j]=(X_batch,Y_batch,y_batch)#, gamma_batch, beta_batch)\n j = 0\n \n for l in range(cycles):\n for t in range(2*l*step_size, 2*(l+1)*step_size):\n \n if t>= 2*l*step_size and t<(2*l+1)*step_size:\n eta = eta_min+(t-2*l*step_size)/step_size*(eta_max-eta_min)\n elif t>=(2*l+1)*step_size and t<2*(l+1)*step_size:\n eta = eta_max-(t-(2*l+1)*step_size)/step_size*(eta_max-eta_min)\n \n \n X_batch, Y_batch, y_batch = batches[j]\n \n # init list of mean and var\n if j == 0 and t == 0:\n mean_av = mean_input\n var_av = var_input\n P_batch, S_batch_Norm, S_batch, H_batch, mean_batch, var_batch = EvaluateClassifier(X_batch, W, b, k, gamma, beta, mean_av, var_av, batch_norm=True)\n\n else:\n mean_av = [[alpha * x for x in mean_av][y] + [(1 - alpha) * x for x in mean_batch][y]\n for y in range(len(mean_batch))]\n var_av = [[alpha * x for x in var_av][y] + [(1 - alpha) * x for x in var_batch][y]\n for y in range(len(var_batch))]\n P_batch, S_batch_Norm, S_batch, H_batch, mean_batch, var_batch = EvaluateClassifier(X_batch, W, b, k, gamma, beta, mean_av, var_av, batch_norm=True)\n\n grad_Ws, grad_bs, grad_gamma, grad_beta = ComputeGradients(H_batch, X_batch, Y_batch,P_batch,W, b, lambd, k, gamma, beta, mean_batch, var_batch, S_batch_Norm, S_batch, True)\n\n \n #Update b + g\n for i in range(len(grad_gamma)):\n gamma[i] = gamma[i]-eta*grad_gamma[i]\n beta[i] = beta[i]-eta*grad_beta[i]\n \n # Update W + b\n for i in range(len(W)):\n W[i] = W[i]-eta*grad_Ws[i]\n b[i] = b[i]-eta*grad_bs[i]\n\n j += 1\n if j>(n//n_batch-1):\n # set j = 0 will start new epoch\n j = 0\n metrics['updates'].append(t+1)\n metrics['acc_scores'].append(ComputeAccuracy(X, y, W, b, k, gamma, beta, mean_av, var_av, batch_norm))\n metrics['Loss_scores'].append(ComputeCost(X, Y, W, b, lambd, k, gamma, beta, mean_av, var_av, batch_norm))\n\n if X_val is not None:\n metrics['acc_val_scores'].append(ComputeAccuracy(X_val, y_val, W, b, k, gamma, beta, mean_av, var_av, batch_norm))\n metrics['Loss_val_scores'].append(ComputeCost(X_val, Y_val, W, b, lambd, k, gamma, beta, mean_av, var_av, batch_norm))\n \"\"\" message = \"In update \"+str(t+1)+'/'+str(2*cycles*step_size)+\" finishes epoch \"+ \\\n str(len(metrics['updates'])-1)+\": loss=\"+str(metrics['Loss_scores'][-1])+ \\\n \" and accuracy=\"+str(metrics['acc_scores'][-1])+\" (training set) \\r\"\n sys.stdout.write(message) \"\"\"\n \n # Shuffling for each epoch\n rand_index=np.random.permutation(n)\n X=X[:,rand_index]\n Y=Y[:,rand_index]\n y = [y[c] for c in rand_index]\n batches=dict()\n for f in range(n//n_batch):\n j_start = (f)*n_batch ;\n j_end = (f+1)*n_batch;\n inds = range(j_start,j_end);\n y_batch = [y[index] for index in inds]\n X_batch = X[:, inds];\n Y_batch = Y[:, inds];\n batches[f]=(X_batch,Y_batch,y_batch)\n \n \n return W, b, metrics, gamma, beta, mean_av[-1], var_av[-1]\n\n# Plotting metrics\ndef plot(metrics):\n \n # Cost plot\n fig, ax = plt.subplots() \n ax.plot(metrics['updates'], metrics['Loss_scores'], 'b', label='Training cost') \n ax.plot(metrics['updates'], metrics['Loss_val_scores'], 'r', label='Validation cost') \n ax.set_xlabel('Update step') \n ax.set_ylabel('Cost') \n plt.legend()\n plt.show()\n \n # Acc plot \n fig, ax = plt.subplots() \n ax.plot(metrics['updates'], metrics['acc_scores'], 'b', label='Training accuracy') \n ax.plot(metrics['updates'], metrics['acc_val_scores'], 'r', label='Validation accuracy') \n ax.set_xlabel('Update step') \n ax.set_ylabel('Accuracy') \n plt.legend()\n plt.show()\n \ndef plot_metrics_grid(metrics_grid,lambds):\n \n # Loss plot\n label1='Training: '+str(min(metrics_grid['Loss_scores']))+ ' lambda: '+str(lambds[metrics_grid['Loss_scores'].index(min(metrics_grid['Loss_scores']))])\n label2='Validation: '+str(min(metrics_grid['Loss_val_scores']))+ ' lambda: '+str(lambds[metrics_grid['Loss_val_scores'].index(min(metrics_grid['Loss_val_scores']))])\n plt.plot(lambds,metrics_grid['Loss_scores'], color='b', marker='o',mfc='pink',label=label1 )\n plt.plot(lambds,metrics_grid['Loss_val_scores'], color='r', marker='o',mfc='green',label=label2 ) \n plt.ylabel('loss') \n plt.xlabel('lambda')\n plt.legend(loc=\"upper right\")\n plt.show()\n \n # Accuracy plot\n label1='Training: '+str(max(metrics_grid['acc_scores']))+ ' lambda: '+str(lambds[metrics_grid['acc_scores'].index(max(metrics_grid['acc_scores']))])\n label2='Validation: '+str(max(metrics_grid['acc_val_scores']))+ ' lambda: '+str(lambds[metrics_grid['acc_val_scores'].index(max(metrics_grid['acc_val_scores']))])\n plt.plot(lambds,metrics_grid['acc_scores'], color='b', marker='o',mfc='green',label=label1 )\n plt.plot(lambds,metrics_grid['acc_val_scores'], color='r', marker='o',mfc='magenta',label=label2 )\n plt.ylabel('accuracy') \n plt.xlabel('lambda') \n plt.legend(loc=\"upper right\")\n plt.show()\n\n###############################################################\n\n# Testing K-layered network\n\n###############################################################\n \n# Read data\nX_train, Y_train, y_test = ReadData('data_batch_1')\nX_val_train, Y_val_train, y_val_test = ReadData('data_batch_2')\nX_test_train, Y_test_train, y_test_test = ReadData('test_batch')\n\n# Normalize all data w.r.t. mean and std of training data\nX_train_normalized, X_val_train_normalized, X_test_train_normalized = Normalize(X_train, X_val_train, X_test_train)\n\n# Create model params W and b\nhidden_layers = [50, 50, 50, 50]\nW, b = GetWeightAndBias(X_train_normalized, Y_train, 4, hidden_layers)\n\n# Model evaluation (softmax)\nP, act_vals = EvaluateClassifier(X_train_normalized, W, b, 4)\n\n# Cost function\nlambd = 0.0\nA = ComputeCost(X_train_normalized, Y_train, W, b, lambd, 4)\nprint(\"Cost: \", A)\n# Accuracy\nA = ComputeAccuracy(X_train_normalized, y_test, W, b, 4)\nprint(\"Accuracy: \", A,\"\\n\")\n\n# Compute gradients\nlambd = 0\n#grad_Ws, grad_bs = ComputeGradients(act_vals, X_train_normalized, \n# Y_train,P,\n# W,lambd, 4)\n\nthreshold = 1e-5\n\nW_sub = list()\n\ncount = 1\nfor w in W:\n if count == 1:\n W_sub.append(w[:, 0:10])\n else:\n W_sub.append(w)\n count += 1\n \n# Compare numerical gradients with analytical\nCompareGradients(X_train_normalized[0:10, [0]],Y_train[:, [0]],\n W_sub, b, lambd, 4, threshold)\n\n###############################################################\n\n# Testing K-layered network with all data\n\n###############################################################\n\n# Read all data\nX1,Y1,y1=ReadData('data_batch_1')\nX2,Y2,y2=ReadData('data_batch_2')\nX3,Y3,y3=ReadData('data_batch_3')\nX4,Y4,y4=ReadData('data_batch_4')\nX5,Y5,y5=ReadData('data_batch_5')\nX_test,Y_test,y_test=ReadData('test_batch')\n\n\n# Stack it up\nX_new = np.hstack((X1, X2, X3, X4, X5))\nY_new = np.hstack((Y1, Y2, Y3, Y4,Y5))\ny_new = y1+y2+y3+y4+y5\n\n# Choose 5k random instances for new validation data\n# (More data = less overfitting)\nnp.random.seed(0)\nrand_indexes = np.random.choice(range(X_new.shape[1]), 5000, replace=False)\nX_val_new = X_new[:,rand_indexes]\nY_val_new = Y_new[:,rand_indexes]\ny_val_new = [y_new[i] for i in rand_indexes]\ny_new = [y_new[i] for i in range(X_new.shape[1]) if i not in rand_indexes]\nX_new = np.delete(X_new, rand_indexes, 1)\nY_new = np.delete(Y_new, rand_indexes, 1)\n\nX_new_norm, X_val_new_norm, X_test_new_norm = Normalize(X_new, X_val_new, X_test_train)\n\n###############################################################\n\n# Test 1: 2-layered network \n\n###############################################################\nhidden_layers = [50]\nW,b=GetWeightAndBias(X_new_norm, Y_new, 1, hidden_layers)\n\nlambd= 0.02\neta_min=1e-5\neta_max=1e-1\nstep_size=900\nn_batch=100\ncycles=3 \n\nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\nW_upd, b_upd, metrics= MiniBatchGD2(X_new_norm, Y_new, y_new,\n GDparams, W, b, 1, lambd, X_val_new_norm, \n Y_val_new, y_val_new)\nplot(metrics)\nprint(\"Final Accuracy on val set: \", metrics['acc_val_scores'][-1])\n\n###############################################################\n\n# Test 2: 3-layered network\n\n###############################################################\nhidden_layers = [50, 50]\nW,b=GetWeightAndBias(X_new_norm, Y_new, 2, hidden_layers)\nlambd= 0.005\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=2 \nalpha=0.9\nk=2\n\nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\nW_upd, b_upd, metrics= MiniBatchGD2(X_new_norm, Y_new, y_new,\n GDparams, W, b, 2, lambd, X_val_new_norm, \n Y_val_new, y_val_new)\n\nplot(metrics)\nprint(\"Final Accuracy on val set: \", metrics['acc_val_scores'][-1])\n\n###############################################################\n\n# Test 3: 9-layered network\n\n###############################################################\nhidden_layers = [50, 30, 20, 20, 10, 10, 10, 10]\nW,b=GetWeightAndBias(X_new_norm, Y_new, 8, hidden_layers)\nlambd= 0.005\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=2 \n\nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\nW_upd, b_upd, metrics= MiniBatchGD2(X_new_norm, Y_new, y_new,\n GDparams, W, b, 8, lambd, X_val_new_norm, \n Y_val_new, y_val_new)\n\nplot(metrics)\nprint(\"Final Accuracy on val set: \", metrics['acc_val_scores'][-1])\n\n###############################################################\n\n# Test 4: Testing batch norm implementation\n\n###############################################################\n\n# Read data\nX_train, Y_train, y_test = ReadData('data_batch_1')\nX_val_train, Y_val_train, y_val_test = ReadData('data_batch_2')\nX_test_train, Y_test_train, y_test_test = ReadData('test_batch')\n\n# Normalize all data w.r.t. mean and std of training data\nX_train_normalized, X_val_train_normalized, X_test_train_normalized = Normalize(X_train, X_val_train, X_test_train)\n\n# Create model params W and b\nhidden_layers = [50, 50, 50, 50]\nW, b, gamma, beta = GetWeightAndBias(X_train_normalized, Y_train, 4, hidden_layers, batch_norm=True)\n\n# Model evaluation (softmax)\nP, S_batch_Norm, S, H, mean, var = EvaluateClassifier(X_train_normalized, W, b, 4, gamma, beta, mean=None, var=None, batch_norm=True)\n\n# Cost function\nlambd = 0.0\nA = ComputeCost(X_train_normalized, Y_train, W, b, lambd, 4, gamma, beta, mean=None, var=None, batch_norm=False)\nprint(\"Cost: \", A)\n# Accuracy\nA = ComputeAccuracy(X_train_normalized, y_test, W, b, 4, gamma, beta, mean=None, var=None, batch_norm=True)\nprint(\"Accuracy: \", A,\"\\n\")\n\n\n# Compute gradients\nlambd = 0\ngrad_W,grad_b,grad_gamma,grad_beta = ComputeGradients(H, X_train_normalized, Y_train,P,W, b, lambd, 4, gamma, beta, mean, var, S_batch_Norm, S, True)\n\n# Compare numerical gradients with analytical\nthreshold = 1e-5\nW_sub = [sub_W[:, 0:20] for sub_W in W]\nh_sub = [sub_H[0:20, :] for sub_H in act_vals]\nb_sub = [sub_b[:, 0:20] for sub_b in b]\n\n\nW_cut = list()\n\ncount = 1\nfor w in W:\n if count == 1:\n W_cut.append(w[:, 0:10])\n else:\n W_cut.append(w)\n count += 1\n \n\nCompareGradients(X_train_normalized[0:10, [0]],Y_train[:, [0]],\n W_cut, b, lambd, 4, threshold,gamma, beta, batch_norm=True)\n\n###############################################################\n\n# Test 5: Testing batch norm training with 2 hidden layers\n\n###############################################################\n\nhidden_layers = [50, 50]\nW, b, gamma, beta = GetWeightAndBias(X_new_norm, Y_train, 2, hidden_layers, True)\nP, S_batch_Norm, S, H, mean_inp, var_inp = EvaluateClassifier(X_new_norm, W, b, 2, gamma, beta, batch_norm=True)\n\nlambd= 0.005\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=2 \nalpha=0.9\nk=2\nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\n\nW_fin, b_fin, metrics = MiniBatchGD2_BN(X_new_norm, Y_new, y_new,\n GDparams, W, b, k, lambd,\n beta, gamma, mean_inp, var_inp, True, alpha,\n X_val_new_norm, Y_val_new, y_val_new)\n\nplot(metrics)\nprint(\"Final Accuracy on val set: \", metrics['acc_val_scores'][-1])\n\n###############################################################\n\n# Test 6: coarse and fine search of an optimal lambda value\n\n###############################################################\n\ndef gridsearchCV(lambds, hidden_dimension, X, Y, y, GDparams, X_val=None,\n Y_val=None, y_val=None):\n metrics_grid = {'Loss_scores':[], 'acc_scores':[]}\n \n # Include validation data if there is some\n if X_val is not None:\n metrics_grid['Loss_val_scores'] = []\n metrics_grid['acc_val_scores'] = []\n \n for lambd in lambds:\n \n # Re-init weights for each lambda value\n W, b, gamma, beta = GetWeightAndBias(X_new_norm, Y_train, 2, hidden_layers, True)\n P, S_batch_Norm, S, H, mean_inp, var_inp = EvaluateClassifier(X_new_norm, W, b, 2, gamma, beta, batch_norm=True)\n \n # Build model with current lambda value \n W, b, metrics, gamma, beta, mean_final, var_final = MiniBatchGD2_BN(X, Y, y, GDparams, W, b, 2, lambd, beta, gamma, mean_inp, var_inp, True, 0.9,X_val,Y_val,y_val)\n \n \n # Append to the metrics\n metrics_grid['Loss_scores'].append(metrics['Loss_scores'][-1])\n metrics_grid['acc_scores'].append(metrics['acc_scores'][-1])\n \n # Include validation data if there is some\n if X_val is not None:\n metrics_grid['Loss_val_scores'].append(metrics['Loss_val_scores'][-1])\n metrics_grid['acc_val_scores'].append(metrics['acc_val_scores'][-1])\n return metrics_grid\n\n# Random lambda values\nnp.random.seed(0)\nl_max, l_min = -1, -5\nl = l_min+(l_max-l_min)*np.random.rand(7)\nprint(l)\nlambds = list(10**l)\nlambds.sort()\nprint(lambds)\n\n# New params\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=2 \nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\n\n# Run test with random lambdas\nmetrics_grid=gridsearchCV(lambds, hidden_layers, X_new_norm, Y_new, y_new, GDparams, X_val_new_norm,\n Y_val_new, y_val_new)\n\n\n# Plot of the random search\nplot_metrics_grid(metrics_grid, lambds)\n\nlambdas_list=[0.0005,0.001,0.0015,0.002,0.0025,0.003,0.005, 0.01]\nprint(lambdas_list)\n\n# Run test with best lambdas \nmetrics_grid=gridsearchCV(lambdas_list, hidden_layers, X_new_norm, Y_new, y_new, GDparams, X_val_new_norm,\n Y_val_new, y_val_new)\n\n# Plot of the fine search\nplot_metrics_grid(metrics_grid,lambdas_list)\n\n###############################################################\n\n# Test 7: Testing batch norm training with 3 layers and\n# optimal lambda\n\n###############################################################\n\nhidden_layers = [50, 50]\nW, b, gamma, beta = GetWeightAndBiasProper(X_new_norm, 2, hidden_layers, True)\nP, S_batch_Norm, S, H, mean_inp, var_inp = EvaluateClassifier(X_new_norm, W, b, 2, gamma, beta, batch_norm=True)\n\nlambd= 0.002\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=3 \nalpha=0.9\nk=2\nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\n\nW, b, metrics, gamma, beta, mean_final, var_final = MiniBatchGD2_BN(X_new_norm, Y_new, y_new,\n GDparams, W, b, k, lambd,\n beta, gamma, mean_inp, var_inp, True, alpha,\n X_val_new_norm, Y_val_new, y_val_new)\n\nplot(metrics)\nprint(\"Final Accuracy on val set: \", metrics['acc_val_scores'][-1])\n\n###############################################################\n\n# Test 8: Testing batch norm training with 2 hidden layers and\n# optimal lambda\n\n###############################################################\n\nhidden_layers = [50, 30, 20, 20, 10, 10, 10, 10] \nW, b, gamma, beta = GetWeightAndBias(X_new_norm, Y_new, 8, hidden_layers, True)\nW, b, gamma, beta = GetWeightAndBiasProper(X_new_norm, 8, hidden_layers, True)\nP, S_batch_Norm, S, H, mean_inp, var_inp = EvaluateClassifier(X_new_norm, W, b, 8, gamma, beta, batch_norm=True)\n\nlambd= 0.005\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=2 \nalpha=0.9\nk=8\nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\n\nW, b, metrics, gamma, beta, mean_final, var_final = MiniBatchGD2_BN(X_new_norm, Y_new, y_new,\n GDparams, W, b, k, lambd,\n beta, gamma, mean_inp, var_inp, True, alpha,\n X_val_new_norm, Y_val_new, y_val_new)\n\nplot(metrics)\nprint(\"Final Accuracy on val set: \", metrics['acc_val_scores'][-1])\n\n###############################################################\n\n# Test 9: Testing batch norm vs no batch norm weight init sensitivity \n# on the test set\n\n###############################################################\n\nhidden_dimension= [50, 50]\neta_min=1e-5\neta_max=1e-1\nstep_size=int((5*45000)/100)\nn_batch=100\ncycles=2 \nGDparams=(eta_min,eta_max,step_size,n_batch,cycles)\n\nSigmaExperiments(X_new_norm, Y_new, y_new, hidden_dimension, GDparams, \n X_val_new_norm, Y_val_new, y_val_new, X_test,Y_test,y_test)\n","repo_name":"Akseluhr/Image-classification-vol3","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":41456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4235048753","text":"# -- coding: utf-8 --\nimport os\nimport traceback\nimport utils\nimport dateutil.parser as dparser\nimport json\n\nfrom .base_template import BaseTemplate\n\n\nclass HashesParser(BaseTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # main function\n self.main()\n\n def main(self):\n for file_name in os.listdir(self.folder_path):\n try:\n path = os.path.join(self.folder_path, file_name)\n if os.path.isdir(path):\n continue\n\n print(f'Proceeding for File {file_name}')\n\n input_file_path = path\n output_file_name = file_name.rstrip('.txt')\n output_file_path = os.path.join(\n self.output_folder, f'{output_file_name}.json'\n )\n\n self.process_file(\n input_file_path, output_file_path\n )\n\n print('----------------------------------------\\n')\n except Exception:\n traceback.print_exc()\n continue\n\n def process_file(self, input_file_path, output_file_path):\n with open(output_file_path, 'a', encoding='utf-8') as file_pointer:\n with open(input_file_path, 'r') as fp:\n fp.seek(0)\n\n while True:\n row = fp.readline()\n if not row:\n break\n row = row.strip()\n \n if row:\n hash_type = row.split(\" \")[0]\n hash_content = row.split(\" \")[1]\n\n if len(hash_content.split(\":\")) == 3:\n hash_value = hash_content.split(\":\")[0]\n hash_salt = hash_content.split(\":\")[1]\n hash_plain = hash_content.split(\":\")[2]\n else:\n hash_value = hash_content.split(\":\")[0]\n hash_salt = ''\n hash_plain = hash_content.split(\":\")[1]\n \n data = {\n '_source': {\n 'source': 'hashes.org',\n \"type\": 'hash',\n 'hashtype': hash_type,\n 'hash': hash_value,\n 'salt': hash_salt,\n 'value': hash_plain\n }\n }\n\n json_file = json.dumps(data, indent=4, ensure_ascii=False)\n file_pointer.write(json_file)\n \n print(f'Json for paste_id {input_file_path} '\n f'written in {output_file_path}')\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"templates/hashes_template.py","file_name":"hashes_template.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2324656554","text":"from django.urls import path\nfrom . import views\nfrom rest_framework.routers import DefaultRouter\n\napp_name = 'dispensary'\n\nrouter = DefaultRouter()\nrouter.register('api/medicine', views.MedicineView, basename='api_medicine')\nrouter.register('api/bill', views.BillView, basename='api_bill')\nrouter.register('api/transaction', views.TransactionView, basename='api_transaction')\n\nurlpatterns = [\n]\n\nurlpatterns += router.urls","repo_name":"RahulMukeshSingh/HospitalManagement","sub_path":"dispensary/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74179107394","text":"from hypothesis import given\n\nfrom ground.base import Relation\nfrom tests.utils import equivalence\nfrom . import strategies\n\nSYMMETRICAL_RELATIONS = (Relation.CROSS, Relation.DISJOINT, Relation.EQUAL,\n Relation.OVERLAP, Relation.TOUCH)\n\n\n@given(strategies.relations)\ndef test_basic(relation: Relation) -> None:\n result = relation.complement\n\n assert result in Relation\n\n\n@given(strategies.relations)\ndef test_value(relation: Relation) -> None:\n result = relation.complement\n\n assert equivalence(result is relation, result in SYMMETRICAL_RELATIONS)\n","repo_name":"lycantropos/ground","sub_path":"tests/base_tests/relation_tests/test_complement.py","file_name":"test_complement.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"34525734701","text":"from datetime import datetime\n\nfrom hyacinth.db.models import DbListing\nfrom hyacinth.db.session import Session\nfrom hyacinth.models import Listing, SearchSpec\n\n\ndef get_listings(search_spec: SearchSpec, after_time: datetime) -> list[Listing]:\n with Session() as session:\n listings: list[DbListing] = (\n session.query(DbListing)\n .filter(DbListing.created_at > after_time.timestamp())\n .filter(DbListing.search_spec_json == search_spec.json())\n .order_by(DbListing.created_at.desc())\n .all()\n )\n return [db_listing.to_listing() for db_listing in listings]\n\n\ndef get_last_listing(search_spec: SearchSpec) -> Listing | None:\n with Session() as session:\n db_listing: DbListing | None = (\n session.query(DbListing)\n .filter(DbListing.search_spec_json == search_spec.json())\n .order_by(DbListing.created_at.desc())\n .first()\n )\n return db_listing.to_listing() if db_listing is not None else None\n\n\ndef save_listings(search_spec: SearchSpec, listings: list[Listing]) -> None:\n db_listings = [DbListing.from_listing(search_spec, listing) for listing in listings]\n with Session() as session:\n session.add_all(db_listings)\n session.commit()\n","repo_name":"lmesa008/bot","sub_path":"hyacinth/db/listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15426072905","text":"def is_sorted(ls):\n\tcount = 0\n\tfor i in range(len(ls)-1):\n\t\tfor j in range(i,len(ls)):\n\t\t\tif ls[i]>ls[j]:\n\t\t\t\tcount += 1\n\t\t\t\n\tif count != 0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\nls_check = [3,5,7,8]\nres = is_sorted(ls_check)\nprint(res)","repo_name":"ArmineGyulazyan/homework23_06","sub_path":"is_sorted.py","file_name":"is_sorted.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34025733646","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\n__author__ = 'jingslaw'\r\n__version__ = 1.0\r\n\r\nimport numpy as np\r\nfrom scipy import special\r\nimport time\r\nfrom method.read import poscar\r\nimport spglib\r\n\r\n\r\ndef madelung_potential(cell, position, extend=(100, 100, 100), eta=10):\r\n volume = np.linalg.det(cell)\r\n reciprocal_vector1 = 2*np.pi*np.cross(cell[1], cell[2])/volume\r\n reciprocal_vector2 = 2*np.pi*np.cross(cell[2], cell[0])/volume\r\n reciprocal_vector3 = 2*np.pi*np.cross(cell[0], cell[1])/volume\r\n mm, nn, ll = extend[:]\r\n potential = 0.0\r\n for i in range(-mm, mm+1):\r\n for j in range(-nn, nn+1):\r\n for k in range(-ll, ll+1):\r\n g_vector = i*reciprocal_vector1+j*reciprocal_vector2+k*reciprocal_vector3\r\n vector = i*cell[0]+j*cell[1]+k*cell[2]\r\n if np.linalg.norm(g_vector) != 0:\r\n potential += 4*np.pi*np.exp(2*np.pi*complex(0, 1)*np.dot(g_vector, position))*np.exp(-np.linalg.norm(g_vector)**2/(4*eta**2))/((np.linalg.norm(g_vector)**2)*abs(volume))\r\n if np.linalg.norm(vector - position) == 0:\r\n potential -= 2 * eta / np.sqrt(np.pi)\r\n else:\r\n potential += special.erfc(eta * np.linalg.norm(vector - position)) / np.linalg.norm(vector - position)\r\n return potential\r\n\r\n\r\ndef madelung_potential_new(cell, position, extend=(100, 100, 100), eta=10):\r\n fractional = np.dot(position, np.linalg.inv(cell))\r\n volume = np.linalg.det(cell)\r\n reciprocal_vector1 = 2 * np.pi * np.cross(cell[1], cell[2]) / volume\r\n reciprocal_vector2 = 2 * np.pi * np.cross(cell[2], cell[0]) / volume\r\n reciprocal_vector3 = 2 * np.pi * np.cross(cell[0], cell[1]) / volume\r\n mm, nn, ll = extend[:]\r\n x = np.linspace(-mm, mm, num=2*mm+1)\r\n y = np.linspace(-nn, nn, num=2*nn+1)\r\n z = np.linspace(-ll, ll, num=2*ll+1)\r\n xx, yy, zz = np.meshgrid(x, y, z, sparse=True, indexing='ij')\r\n\r\n coeff1 = np.exp(2*np.pi*complex(0, 1)*(xx*fractional[0]+yy*fractional[1]+zz*fractional[2]))\r\n coeff2 = 0.0\r\n coeff3 = 0.0\r\n for i in range(3):\r\n tmp = xx*reciprocal_vector1[i]+yy*reciprocal_vector2[i]+zz*reciprocal_vector3[i]\r\n tmp = tmp*tmp\r\n coeff2 += tmp\r\n tmp2 = (xx-fractional[0])*cell[0][i]+(yy-fractional[1])*cell[1][i]+(zz-fractional[2])*cell[2][i]\r\n tmp2 = tmp2*tmp2\r\n coeff3 += tmp2\r\n sum1 = 4*np.pi*coeff1*np.exp(-coeff2/(4*eta**2))/(coeff2*abs(volume))\r\n sum1[np.isinf(np.real(sum1))] = 0\r\n coeff3 = np.sqrt(coeff3)\r\n sum2 = special.erfc(eta*coeff3)/coeff3\r\n sum2[np.isinf(sum2)] = -2*eta/np.sqrt(np.pi)\r\n result = np.sum(sum1) + np.sum(sum2)\r\n if np.imag(result) > 1e-10:\r\n print('\\nERROR: the potential {0} is not a real, unknown problem, please check the code!\\n'.format(result))\r\n exit(1)\r\n return np.real(result)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tic = time.time()\r\n file_name = 'YPO4primitive.vasp'\r\n charge = {'Y': 3, 'O': -2, 'P': 5}\r\n structure = poscar(file_name)\r\n atom_list = [[], []]\r\n multipler = (120, 120, 120)\r\n eta = 1*2*np.pi\r\n for atom in structure:\r\n atom_list[0].append(atom.pos)\r\n atom_list[1].append(atom.type)\r\n for atom in structure:\r\n vectors = atom.pos - np.array(atom_list[0])\r\n potential = 0.0\r\n for i in range(len(vectors)):\r\n potential += charge[atom_list[1][i]]*madelung_potential_new(structure.cell.T, vectors[i],\r\n extend=multipler, eta=eta)\r\n print(atom, potential)\r\n toc = time.time()\r\n print('Time:{0}'.format(toc-tic))\r\n\r\n","repo_name":"jingslaw/cluster_embedded_model","sub_path":"method/madelung.py","file_name":"madelung.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14297316619","text":"\"\"\"\n652. Find Duplicate Subtrees\nhttps://leetcode.com/problems/find-duplicate-subtrees/\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:\n\n # Note the postorder traversal a node's children uniquely serializes the tree.\n # [1] Encode each node as node value and Post order traversal each child of node\n # [2] Store encoding in a hash_table\n # O(n) average time and O(log n) worst case\n # O(n) space for hash map O(n^2) worst case\n\n from collections import Counter\n res = []\n hash_map = Counter()\n\n def postorder(node):\n if not node:\n return None\n\n node_key = (node.val, postorder(node.left), postorder(node.right))\n hash_map[node_key] += 1\n\n if hash_map[node_key] == 2:\n res.append(node)\n return node_key\n\n postorder(root)\n return res\n","repo_name":"mathvolcano/leetcode","sub_path":"0652_findDuplicateSubtrees.py","file_name":"0652_findDuplicateSubtrees.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18560861714","text":"from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\n\n\nasync def get_link_keyboard(data) -> InlineKeyboardMarkup:\n buttons = []\n lstbtn = []\n for i in range(len(data)):\n lstbtn.append(\n InlineKeyboardButton(\n text=data[i][\"name\"], url=data[i][\"url\"]\n )\n )\n if i % 2 == 0:\n buttons.append(lstbtn)\n lstbtn = []\n elif i == len(data) - 1:\n buttons.append(lstbtn)\n return InlineKeyboardMarkup(inline_keyboard=[*buttons])\n","repo_name":"parciffal/literate-bassoon","sub_path":"app/keyboards/inline/rug_check_keyboard.py","file_name":"rug_check_keyboard.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15530412623","text":"import os.path\n\nfrom dotblox.tools import codewall\nfrom dotbloxmaya.core.ui import dockwindow\n\nfrom maya import cmds, mel\n\npython_file_command = \"\"\"\nwith open(\"{file_path}\") as f:\n exec(compile(f.read(), \"{file_path}\", \"exec\"), globals(), locals())\n\"\"\"\n\npython_text_command = \"\"\"\nexec(compile(\\\"\"\"{text}\\\"\"\", \"interactive\", \"exec\"), globals(), locals())\n\"\"\"\n\n\nclass MayaHook(codewall.Hook):\n APP = \"maya\"\n\n def get_supported_extensions(self):\n extensions = codewall.Hook.get_supported_extensions(self)\n extensions.append(\".mel\")\n return extensions\n\n def run_file(self, file_path):\n if os.path.isfile(file_path):\n if file_path.endswith(\".py\"):\n cmds.evalDeferred(python_file_command.format(file_path=file_path))\n elif file_path.endswith(\".mel\"):\n mel.eval('source \\\"{file_path}\\\"'.format(file_path=file_path))\n\n def execute_text(self, text, file_name):\n print(text)\n if file_name.endswith(\".py\"):\n cmds.evalDeferred(python_text_command.format(text=text))\n elif file_name.endswith(\".mel\"):\n mel.eval(text)\n\n\nclass MayaCodeWallWidget(codewall.CodeWallWidget):\n def __init__(self, parent=None):\n codewall.CodeWallWidget.__init__(self, hook=MayaHook(), parent=parent)\n\n\ndock = dockwindow.DockWindowManager(MayaCodeWallWidget)\n","repo_name":"dotRyan/dotblox","sub_path":"python/dotbloxmaya/general/codewall.py","file_name":"codewall.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"33450824028","text":"class UnionFind():\n def __init__(self,size) -> None:\n self.root = {i:i for i in range(size)}\n self.sizes = [1 for _ in range(size)]\n\n def find(self, x):\n if self.root[x] != x:\n self.root[x] = self.find(self.root[x])\n return self.root[x]\n \n def union(self,x,y):\n repX = self.find(x)\n repY = self.find(y)\n\n if repX != repY:\n if self.sizes[repX] > self.sizes[repY]:\n self.root[repY] = repX\n self.sizes[repX] += self.sizes[repY]\n else:\n self.root[repX] = repY\n self.sizes[repY] += self.sizes[repX]\n\n\n def connected(self,x,y):\n return self.find(x) == self.find(y)\n \n\n def connected(self,x,y):\n return self.find(x) == self.find(y)\n \n def get(self):\n return self.root\n \n \nclass Solution:\n def smallestStringWithSwaps(self, s: str, pairs: List[List[int]]) -> str:\n uf = UnionFind(len(s))\n group_char = defaultdict(list)\n group_i = defaultdict(list)\n \n for x,y in pairs:\n uf.union(x,y)\n \n \n for i in range(len(s)):\n group_i[uf.find(i)].append(i)\n group_char[uf.find(i)].append(s[i])\n \n ans= [''] * len(s) \n for g,idx in group_i.items():\n idx.sort()\n char = sorted(group_char[g])\n \n for i,char in zip(idx,char):\n ans[i] = char\n \n return ''.join(ans)\n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n\n ","repo_name":"yonasengdu/Compitative-programming","sub_path":"1202-smallest-string-with-swaps/1202-smallest-string-with-swaps.py","file_name":"1202-smallest-string-with-swaps.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37585875890","text":"from pyzbar.pyzbar import decode\nimport pygame.camera\nfrom PIL import Image\nfrom pygame.locals import *\n\nDEVICE = '/dev/video0'\nSIZE = (640, 480)\n\npygame.init()\npygame.camera.init()\ndisplay = pygame.display.set_mode(SIZE, 0)\ncamera = pygame.camera.Camera(DEVICE, SIZE)\n\ncamera.start()\ncapture = True\nwhile capture:\n screen = pygame.surface.Surface(SIZE, 0, display)\n screen = camera.get_image(screen)\n screen_save = pygame.transform.rotate(screen, 90)\n screen_save = pygame.transform.flip(screen_save, False, True)\n\n screen = pygame.transform.flip(screen, True, False)\n screen = pygame.transform.rotate(screen, 90)\n img = pygame.surfarray.array3d(screen)\n test = Image.fromarray(img)\n dec = decode(test)\n print(dec)\n pygame.surfarray.blit_array(screen, img)\n screen = pygame.transform.flip(screen, False, True)\n screen = pygame.transform.rotate(screen, -90)\n display.blit(screen, (0, 0))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n capture = False\n\n","repo_name":"aelmochankun/cloudAPI_test","sub_path":"test_QR.py","file_name":"test_QR.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73021039234","text":"\"\"\"BAP Proje Arama View Modulu\"\"\"\n\nfrom flask import render_template, request, jsonify\nfrom sqlalchemy.sql import desc\nfrom flask_classful import FlaskView, route\nfrom flask_babel import gettext as _\n\nfrom zopsedu.auth.lib import auth\nfrom zopsedu.bap.models.proje_turu import ProjeTuru\nfrom zopsedu.bap.proje.forms.proje_arama.proje_arama import BAPProjeSearchForm\nfrom zopsedu.lib.db import DB\nfrom zopsedu.models import Proje, AppState\n\n\nclass BapProjeleriAramaView(FlaskView):\n \"\"\"Proje Arama\"\"\"\n\n excluded_methods = [\n \"qry\"\n ]\n\n def process_data(self, result, form_data, total_record):\n data = [[\n index + 1,\n r.proje_turu_ad,\n r.proje_no if r.proje_no else '-',\n r.proje_basligi] for index, r in enumerate(result)]\n\n return jsonify({\"data\": data,\n \"draw\": form_data['draw'],\n \"recordsFiltered\": total_record,\n \"recordsTotal\": total_record}\n )\n\n @property\n def qry(self):\n \"\"\"\n Proje ve ilgili alanlari icin query nesnesi olustur.\n Returns:\n \"\"\"\n return DB.session.query(\n Proje\n ).join(ProjeTuru, ProjeTuru.id == Proje.proje_turu).join(AppState, Proje.proje_durumu_id == AppState.id).\\\n filter(AppState.state_code == 'P22').\\\n add_columns(\n Proje.id,\n Proje.bitis_tarihi,\n Proje.proje_basligi,\n Proje.proje_turu_numarasi,\n Proje.proje_no,\n ProjeTuru.ad.label(\"proje_turu_ad\")\n )\n\n @route('/', methods=[\"GET\"])\n @auth.requires(menu_registry={'path': '.bap_proje_arama', 'title': _(\"BAP Projeleri\"), 'order': 5})\n def bap_projeleri_listele(self):\n \"\"\"\n Projeler arama index sayfasi\n Returns:\n http response\n\n \"\"\"\n search_form = BAPProjeSearchForm()\n\n return render_template('/proje_arama/bap_projeleri.html', form=search_form)\n\n @route('/data', methods=[\"POST\"], endpoint=\"bap_projeler_search\")\n def projeler_arama(self): # pylint: disable=too-many-branches\n \"\"\"\n Bap projelerinde POST ile gelen parametrelere gore arama yapip, sonuclari dondurur.\n\n Returns:\n http response\n\n \"\"\"\n qry = self.qry\n total_record = qry.count()\n form_data = request.form.to_dict()\n search_form = BAPProjeSearchForm(**form_data)\n\n if not search_form.validate():\n result = qry.order_by(desc(Proje.id)).offset(form_data['start']).limit(\n form_data['length']).all()\n total_record = qry.count()\n return self.process_data(result, form_data, total_record)\n\n proje_basligi = search_form.proje_ad.data.strip()\n proje_turu_adi = search_form.proje_turu_adi.data\n bitis_tarihi = search_form.date.bitis_tarihi.data\n bitis_tarihi_option = search_form.date.bitis_tarihi_option.data\n\n if proje_basligi:\n qry = qry.filter(Proje.proje_basligi.ilike('%' + proje_basligi + '%'))\n\n if proje_turu_adi:\n qry = qry.filter(ProjeTuru.ad.ilike('%' + proje_turu_adi + '%'))\n\n if bitis_tarihi:\n if bitis_tarihi_option == '0':\n qry = qry.filter(Proje.bitis_tarihi <= bitis_tarihi)\n if bitis_tarihi_option == '1':\n qry = qry.filter(Proje.bitis_tarihi == bitis_tarihi)\n if bitis_tarihi_option == '2':\n qry = qry.filter(bitis_tarihi <= Proje.bitis_tarihi)\n\n result = qry.order_by(desc(Proje.id)).offset(form_data['start']).limit(\n form_data['length']).all()\n\n return self.process_data(result, form_data, total_record)\n","repo_name":"kunthar/zopsedu","sub_path":"zopsedu/bap/proje/views/proje_arama/bap_projeleri_arama.py","file_name":"bap_projeleri_arama.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"4234537653","text":"import os\nimport re\nimport json\nimport scrapy\nfrom scrapy.http import Request, FormRequest\nfrom datetime import datetime\nfrom scraper.base_scrapper import SiteMapScrapper\n\n\nclass PasteBinSpider(scrapy.Spider):\n name = 'pastebin_spider'\n start_url = 'https://scrape.pastebin.com/api_scraping.php?limit=250'\n\n def __init__(self, *args, **kwargs):\n today = datetime.now().strftime('%Y-%m-%d')\n self.output_path = f'{kwargs.get(\"output_path\")}/{today}'\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n\n def start_requests(self):\n yield Request(\n url=self.start_url\n )\n\n def parse(self, response):\n json_data = json.loads(response.text)\n for data in json_data:\n paste_url = data['full_url']\n yield Request(\n url=paste_url,\n callback=self.save_paste,\n meta={'dump_id': data['key']}\n )\n\n def save_paste(self, response):\n dump_id = response.meta['dump_id']\n dump_file = '{}/{}.txt'.format(\n self.output_path, dump_id\n )\n if os.path.exists(dump_file):\n print('{} already exists..!'.format(dump_file))\n return\n\n content = response.xpath(\n '//textarea[@id=\"paste_code\"]/text()').extract()\n content = ''.join(content)\n with open(dump_file, 'w') as f:\n f.write(content)\n print('{} done..!'.format(dump_file))\n self.crawler.stats.inc_value(\"mainlist/detail_saved_count\")\n\n\nclass PasteBinScrapper(SiteMapScrapper):\n\n spider_class = PasteBinSpider\n site_name = 'pastebin.com'\n site_type = 'paste'\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"scraper/pastebin.py","file_name":"pastebin.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72476252675","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nimport tkinter\n\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom PIL import Image, ImageTk\nfrom tkinter import ttk\nimport numpy\n\n\ntop = Tk()\ntop.title = 'cifar10'\ntop.geometry('1000x650')\ncanvas = Canvas(top, width=560,height=560, bd=0,bg='white')\ncanvas.grid(sticky=\"W\", row=3)\n\ndef showImg():\n '''\n\tFile = askopenfilename(title='Open Image')\n\n e.set(File)\n\n load = Image.open(e.get())\n w, h = load.size\n load = load.resize((1*w, 1*h))\n imgfile = ImageTk.PhotoImage(load )\n\n canvas.image = imgfile # <--- keep reference of your image\n canvas.create_image(20,20,anchor='nw',image=imgfile)\n\t'''\n frame.grid(row=1, column=0)\n\t\n\t\ne = StringVar()\n\nsubmit_button = Button(top, text =\"Generate test patient's data\", command = showImg)\nsubmit_button.grid(row=0, column=0)\n\nlabel_name = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']\n\ndef Predict():\n\n pred = e.get().split('/')[-1][:-4]\n\n\n\n\n #textvar = \"The prediction is : '%s' image\" %(pred)\n textvar = \"The prediction is : \\n P1: readmission in 72 hours \\n P2 readmission in 24 hours \\n P3: non-readmission \\n P4: readmission in 7 days \\n P5: readmission in 30 days\" \n\t\n\t\n t1.delete(0.0, tkinter.END)\n t1.insert('insert', textvar+'\\n')\n t1.update()\n\t\n textvar_2 = \"The ground truth is : \\n \\u2717 P1: readmission in 48 hours \\n \\u2713 P2 readmission in 24 hours \\n \\u2713 P3: non-readmission \\n \\u2717 P4: readmission in 24 \\n \\u2713 P5: readmission in 30 days\" \n\t\n\t\n t2.delete(0.0, tkinter.END)\n t2.insert('insert', textvar_2+'\\n')\n t2.update()\n\t\ndef show_AUC():\n\t# show AUC\n load = Image.open('AUROC.PNG')\n w, h = load.size\n load = load.resize((1*w, 1*h))\n imgfile = ImageTk.PhotoImage(load )\n\n canvas.image = imgfile # <--- keep reference of your image\n canvas.create_image(20,20,anchor='nw',image=imgfile)\n\t\n\t\nsubmit_button = Button(top, text ='Predict', command = Predict)\nsubmit_button.grid(row=0, column=1)\n\nresult_button = Button(top, text ='Show result', command = show_AUC)\nresult_button.grid(row=2, column=1)\n\nl1=Label(top,text='Plot of AUROC results')\nl1.grid(row=2, column=0)\n\n#l2=Label(top,text='Showing AUROC evaluation')\n#l2.grid(row=0, column=3)\n\nt1=Text(top,bd=0, width=27,height=10,font='Fixdsys -14')\nt1.grid(row=1, column=1)\n\nt2=Text(top,bd=0, width=27,height=10,font='Fixdsys -14')\nt2.grid(row=1, column=2)\n\n#########################################################################################################\ndata = [ \n [\"1\", \"87\", \"120/77\"],\n [\"1\", \"66\", \"113/88\"],\n [\"3\", \"110\", \"130/98\"],\n [\"4\", \"72\", \"141/99\"],\n [\"5\", \"99\", \"135/89\"] ]\n\nframe = Frame(top)\n\n\ntree = ttk.Treeview(frame, columns = (1,2,3), height = 5, show = \"headings\")\ntree.pack(side = 'left')\n\ntree.heading(1, text=\"Patient ID\")\ntree.heading(2, text=\"Heart Rate\")\ntree.heading(3, text=\"Blodd Pressure\")\n\ntree.column(1, width = 100)\ntree.column(2, width = 100)\ntree.column(3, width = 100)\n\nscroll_y = ttk.Scrollbar(frame, orient=\"vertical\", command=tree.yview)\nscroll_y.pack(side = 'right', fill = 'y')\n\n\nscroll_x = ttk.Scrollbar(frame, orient=\"horizontal\", command=tree.xview)\nscroll_x.pack(side = BOTTOM, fill = 'x')\n\ntree.configure(yscrollcommand=scroll_y.set, xscrollcommand=scroll_x.set)\n\nfor val in data:\n tree.insert('', 'end', values = (val[0], val[1], val[2]) )\n\n\n\t\n###############################################################################################################\ntop.mainloop()\n","repo_name":"ZepengHuo/deep_learning_class","sub_path":"medical_prediction.py","file_name":"medical_prediction.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19547481080","text":"#一个函数在内部调用自身本身,这个函数就是递归函数。\n\n\n# 利用递归函数计算阶乘\n# n! = 1 x 2 x 3 x ... x n\ndef fact(n):\n if n == 1:\n return 1\n return n * fact(n - 1)\n\n\nfact(5) #120\n\n\n# 利用递归函数移动汉诺塔: https://www.cnblogs.com/forever-snow/p/8316218.html https://www.cnblogs.com/haidaojiege/p/7764012.html\ndef move(n, a, b, c):\n if n == 1:\n print('move', a, '-->', c)\n else:\n move(n - 1, a, c, b)\n move(1, a, b, c)\n move(n - 1, b, a, c)\n\n\nmove(3, 'A', 'B', 'C')\n# move A --> C\n# move A --> B\n# move C --> B\n# move A --> C\n# move B --> A\n# move B --> C\n# move A --> C","repo_name":"hacklinshell/learn-python","sub_path":"函数/recur.py","file_name":"recur.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22945249673","text":"\nfrom vsg.rules import token_case as Rule\n\nfrom vsg import token\n\nlTokens = []\nlTokens.append(token.context_reference.context_name)\n\n\nclass rule_501(Rule):\n '''\n This rule checks the context name called out in the selected name has proper case.\n\n |configuring_uppercase_and_lowercase_rules_link|\n\n **Violation**\n\n .. code-block:: vhdl\n\n context my_lib.INTERFACES;\n\n **Fix**\n\n .. code-block:: vhdl\n\n context my_lib.interfaces;\n '''\n\n def __init__(self):\n Rule.__init__(self, 'context_ref', '501', lTokens)\n self.groups.append('case::name')\n self.configuration.append('case_exceptions')\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/context_ref/rule_501.py","file_name":"rule_501.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"3239248546","text":"class Solution:\n def findOcurrences(self, text: str, first: str, second: str) -> List[str]:\n # split the text\n # from words[2] to words[n-1], check words[x-1] == second and words[x-2] == first\n # if yes, append to the result\n \n result = []\n words = text.split()\n for i in range(2, len(words)):\n if first == words[i-2] and second == words[i-1]:\n result.append(words[i])\n return result","repo_name":"chien-wei/LeetCode","sub_path":"1078_Occurrences_After_Bigram.py","file_name":"1078_Occurrences_After_Bigram.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19053056390","text":"#coding:utf-8\n\"\"\"Write a Python program to print the even numbers from a given list. Go to the editor\nSample List : [1, 2, 3, 4, 5, 6, 7, 8, 9]\nExpected Result : [2, 4, 6, 8]\"\"\"\ndef even_num(liste):\n new_list=[]\n for i in liste:\n if i%2==0:\n new_list.append(i)\n return new_list\nprint(even_num( [1, 2, 3, 4, 5, 6, 7, 8, 9]))\n","repo_name":"DonaFidele/PythonExercices","sub_path":"functions/exo_10.py","file_name":"exo_10.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5772257324","text":"import json\nfrom pathlib import Path\nfrom typing import List\n\nfrom processes.estat.utils import download_stats_data_as_CSV\n\n# 小売物価統計調査 小売物価統計調査(動向編)\n# 主要品目の都市別小売価格-都道府県庁所在市及び人口15万以上の市(2000年1月~)\n# https://www.e-stat.go.jp/dbview?sid=0003421913\n\n\ndef get_cat02_codes_from_meta_json(\n meta_json_file_path: Path,\n) -> List[str]:\n \"\"\"指定した json ファイルから cat02_code を取得する関数\"\"\"\n with open(meta_json_file_path, \"r\") as file:\n meta_json = json.load(file)\n class_object_list = meta_json[\"GET_META_INFO\"][\"METADATA_INF\"][\"CLASS_INF\"][\n \"CLASS_OBJ\"\n ]\n\n # https://stackoverflow.com/questions/9868653/find-first-sequence-item-that-matches-a-criterion\n dict_with_id_of_cat02 = next(\n obj for obj in class_object_list if obj[\"@id\"] == \"cat02\"\n )\n cat02_dict_list = dict_with_id_of_cat02[\"CLASS\"]\n cat02_code_list = [cat02_dict[\"@code\"] for cat02_dict in cat02_dict_list]\n\n return cat02_code_list\n\n\ndef download_csv_for_each_cat02_code(\n download_dir: Path,\n estat_app_id: str,\n stats_data_id: str,\n cat02_code_list: List[str],\n section_header: bool,\n) -> None:\n \"\"\"指定した cat02_code の csv をダウンロードする関数\"\"\"\n for cat02_code in cat02_code_list:\n file_name = f\"{stats_data_id}_{cat02_code}.csv\"\n if not Path(download_dir, file_name).exists():\n download_stats_data_as_CSV(\n download_dir,\n file_name,\n estat_app_id,\n stats_data_id,\n section_header,\n cdCat02=cat02_code,\n )\n print(f\"Downloaded {file_name}\")\n","repo_name":"MToyokura/open-data-viz","sub_path":"data_preparation/processes/commodity_prices/utils/download_utils.py","file_name":"download_utils.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21944757903","text":"from google.cloud import vision\nimport logging\n\ndef web_entities_include_geo_results_uri(uri):\n \"\"\"Detects web annotations given an image in the file located in\n Google Cloud Storage., using the geotag metadata in the image to\n detect web entities.\"\"\"\n client = vision.ImageAnnotatorClient()\n\n image = vision.types.Image()\n image.source.image_uri = uri\n\n web_detection_params = vision.types.WebDetectionParams(\n include_geo_results=True)\n image_context = vision.types.ImageContext(\n web_detection_params=web_detection_params)\n\n response = client.web_detection(image=image, image_context=image_context)\n res = {}\n res['entity_score'] = -1\n res['entity_description'] = ''\n res['best_guess_label'] = ''\n res['full_match_image_cnt'] = 0\n res['full_match_image_urls'] = []\n res['full_match_page_urls'] = []\n annotations = response.web_detection\n if annotations.web_entities:\n for entity in annotations.web_entities:\n if entity.score <= 0.90:\n # logging.info(f'Found less probable entity {entity.description}, continue')\n continue\n else:\n # this is assuming the score is sorted in descending order\n logging.info('\\n\\tScore : {}'.format(entity.score))\n logging.info(u'\\tDescription: {}'.format(entity.description))\n res['entity_score'] = entity.score\n res['entity_description'] = entity.description\n\n # here we only generate best_guess_label when the entity is meaningful -- very probable\n if annotations.best_guess_labels:\n for label in annotations.best_guess_labels:\n logging.info('\\nBest guess label: {}'.format(label.label))\n res['best_guess_label'] = label.label\n break\n\n if annotations.pages_with_matching_images:\n logging.info('\\n{} Pages with matching images found:'.format(\n len(annotations.pages_with_matching_images)))\n res['full_match_image_cnt'] = len(annotations.pages_with_matching_images)\n\n full_match_image_url_list = []\n full_match_page_url_list = []\n for page in annotations.pages_with_matching_images:\n logging.info('\\n\\tPage url : {}'.format(page.url))\n full_match_page_url_list.append(page.url)\n\n if page.full_matching_images:\n for image in page.full_matching_images:\n logging.info('\\t\\tImage url : {}'.format(image.url))\n full_match_image_url_list.append(image.url)\n res['full_match_image_urls'] = full_match_page_url_list\n res['full_match_image_urls'] = full_match_image_url_list\n\n if res['full_match_image_cnt'] > 0:\n res['potential_risk'] = True\n else:\n res['potential_risk'] = False\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n return res","repo_name":"arthurcm/influencer","sub_path":"python_functions/cv_gcp.py","file_name":"cv_gcp.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19650249776","text":"# -*- encoding: utf-8 -*-\nimport random\n\nfrom flask import Blueprint, render_template\n\nfrom App.models import User, Address\n\nblue = Blueprint('blue', __name__, template_folder=\"../../templates\")\n\n\n@blue.route(\"/index/\")\ndef index():\n return render_template('index.html')\n\n\n@blue.route(\"/add_user/\")\ndef add_user():\n u = User()\n u.u_name = \"lionel %s \" % random.randrange(100)\n u.u_name = 14\n u.a_id = Address.query.order_by(Address.id.desc()).first().id\n u.save()\n return \"add user success\"\n\n\n@blue.route(\"/add_address/\")\ndef add_address():\n a = Address()\n a.address = \"长岛%s号\" % random.randrange(10000)\n a.save()\n\n return \"add address success\"\n","repo_name":"aaaasule/stu_flask","sub_path":"day20/App/views/frist_blue.py","file_name":"frist_blue.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20603338428","text":"for name in ['Christopher', 'Susan']:\n \tprint(name)\n# In python we have builtIn function \"Range\" for creating range on numbers. \n\nfor item in range (5, 10):\n print(item)\n\n#lets calculate the total prices in a shop.\n\nprices = [10, 20, 30]\n\ntotal = 0\n\nfor price in prices:\n total += price\n#print should be seperet\nprint(f\"Total: {total}\")\n ","repo_name":"MANakib/Python-Code-sample-","sub_path":"forloop.py","file_name":"forloop.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30095385281","text":"from flask import Flask\nfrom flask_cors import CORS, cross_origin\nfrom flask import flash, redirect, render_template, request, session, abort, make_response\nfrom flask import jsonify\nimport os\nfrom os import listdir\nimport pandas\nfrom pandas import Series\nimport psycopg2\n\nimport pickle\nimport codecs\n\nfrom patsy import dmatrices\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\n\nroot = \"/home/mighty/gmc/gmc-backend\"\nDBNAME = os.environ[\"GMC_DBNAME\"]\nHOST = os.environ[\"GMC_HOST\"]\nPORT = os.environ[\"GMC_PORT\"]\nDBUSER = os.environ[\"GMC_USER\"]\nPASSWORD = os.environ[\"GMC_PASSWORD\"]\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'XYZ')\n\nCORS(app)\n\ndef createModel(user, data):\n y, X = dmatrices('qual~danceability+energy+key+loudness+mode+speechiness+acousticness+instrumentalness+liveness+valence+tempo+duration_ms+time_signature', data, return_type=\"dataframe\")\n\n y = np.ravel(y)\n\n model = LogisticRegression()\n model.fit(X,y)\n\n model_bytes = codecs.encode(pickle.dumps(model), \"base64\").decode()\n\n conn = psycopg2.connect(host=HOST ,database=DBNAME, user=DBUSER, password=PASSWORD)\n cur = conn.cursor()\n\n query = \"\"\"INSERT INTO gmc VALUES(%s, %s)\"\"\"\n cur.execute(query, (user, model_bytes))\n\n conn.commit()\n cur.close()\n\n\ndef prediction(user, data):\n # Retrieve the model from the database\n conn = psycopg2.connect(host=HOST ,database=DBNAME, user=DBUSER, password=PASSWORD)\n cur = conn.cursor()\n\n query = \"\"\"SELECT model FROM gmc WHERE id=%s\"\"\"\n cur.execute(query, ('nothing_faith',))\n b = cur.fetchone()\n\n new_model = pickle.loads(codecs.decode(b[0].encode(), \"base64\"))\n\n conn.commit()\n cur.close()\n\n y, X = dmatrices('qual ~ danceability + energy + key + loudness + mode + speechiness + acousticness + instrumentalness + liveness + valence + tempo + duration_ms + time_signature',\n data, return_type=\"dataframe\")\n\n preds = new_model.predict(X)\n\n # the sk-learn model is much different from the R version, figure out why\n # later, but the software engineering aspect is more improtant right now.\n\n list = []\n for x in preds:\n if x == 1:\n list.append('good')\n else:\n list.append('bad')\n\n return list\n\n@app.route(\"/train\", methods=['POST'])\ndef train():\n USER = request.get_json()[\"user\"]\n GOOD_AUDIO_FEATURES = request.get_json()[\"good\"]\n BAD_AUDIO_FEATURES = request.get_json()[\"bad\"]\n\n df = pandas.DataFrame(eval(str(GOOD_AUDIO_FEATURES)))\n col = []\n for x in range(df.shape[0]):\n col.append(1)\n df['qual'] = Series(col)\n\n col = []\n df2 = pandas.DataFrame(eval(str(BAD_AUDIO_FEATURES)))\n for x in range(df2.shape[0]):\n col.append(0)\n df2['qual'] = Series(col)\n\n frames = [df, df2]\n data = pandas.concat(frames, sort=False)\n\n # Rather than saving a .csv file, how can we directly use the pandas data\n # frame from python and send it to the R kernel to create the model?\n # data.to_csv('models/' + str(USER) + '.csv')\n\n createModel(USER, data)\n\n # do something with the audio features. i.e: Call RPy2 and train model.\n return jsonify({\"result\": True})\n\n@app.route(\"/models/\", methods=['GET'])\ndef models(user):\n print(\"Checking for model for \", user, \"...\")\n\n conn = psycopg2.connect(host=HOST ,database=DBNAME, user=DBUSER, password=PASSWORD)\n cur = conn.cursor()\n\n query = \"\"\" SELECT id FROM gmc WHERE id = %s\"\"\"\n cur.execute(query, (user,))\n\n res = cur.fetchone()\n if res is None:\n print(\"No model for \", user, \" exists. Create a new model.\")\n fileExists = False\n else:\n print(\"A model exists for \", user, \"! Welcome back!\")\n fileExists = True\n\n cur.close()\n\n return jsonify({\"result\": fileExists})\n\n@app.route(\"/delete/\", methods=['GET'])\ndef delete(user):\n conn = psycopg2.connect(host=HOST ,database=DBNAME, user=DBUSER, password=PASSWORD)\n cur = conn.cursor()\n\n query = \"\"\"DELETE FROM gmc WHERE id = %s\"\"\"\n cur.execute(query, (user,))\n print(\"Deleted \", cur.rowcount, \" rows in the DB.\")\n\n conn.commit()\n cur.close()\n\n return jsonify({\"result\": True})\n\n@app.route(\"/all\", methods=['GET'])\ndef all():\n l = os.listdir()\n print(l)\n return ('yo')\n\n@app.route(\"/predict/\", methods=['POST'])\ndef predict(user):\n\n AUDIO_FEATURES = request.get_json()[\"test\"]\n\n df = pandas.DataFrame(eval(str(AUDIO_FEATURES)))\n col = []\n for x in range(df.shape[0]):\n col.append('bad')\n df['qual'] = Series(col)\n\n res = prediction(user, df)\n print(res)\n\n return jsonify({\"results\": res})\n\n@app.route('/')\ndef index():\n return (\"hello world\")\n","repo_name":"mchen0037/gmc-backend","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23423857221","text":"def main():\n cases = int(input())\n line = ''\n file = open('output.txt', 'w')\n for columbia in range(cases):\n case = columbia + 1\n line += 'Case #{:d}: '.format(case)\n ln = input().split(\" \")\n t = 0\n C = float(ln[0])\n F = float(ln[1])\n f = 2\n X = float(ln[2])\n while (X/f > (C/f + (X)/(F+f))):\n t += C/f\n f = f+F\n line += \"{:.7f}\".format(t+X/f)\n line += '\\n'\n file.write(line)\nmain()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2092.py","file_name":"2092.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7061305388","text":"from .config import get_value, set_value\nfrom .repository import require_repo\n\n\ndef list_plugins(directory=None):\n \"\"\"Gets a list of the installed themes.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n if not plugins or not isinstance(plugins, dict):\n return None\n return plugins.keys()\n\n\ndef add_plugin(plugin, directory=None):\n \"\"\"Adds the specified plugin. This returns False if it was already added.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins', expect_type=dict)\n if plugin in plugins:\n return False\n\n plugins[plugin] = {}\n set_value(repo, 'plugins', plugins)\n return True\n\n\ndef remove_plugin(plugin, directory=None):\n \"\"\"Removes the specified plugin.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins', expect_type=dict)\n if plugin not in plugins:\n return False\n\n del plugins[plugin]\n set_value(repo, 'plugins', plugins)\n return True\n\n\ndef get_plugin_settings(plugin, directory=None):\n \"\"\"Gets the settings for the specified plugin.\"\"\"\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n return plugins.get(plugin) if isinstance(plugins, dict) else None\n","repo_name":"joeyespo/gitpress","sub_path":"gitpress/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"61"} +{"seq_id":"24886192334","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\n#Authon:芳芳\n\n'''\n执行模块中某一条测试用例的写法:\npytest.main([\"-s\",\"-v\",\"test_book.py::TestBook::test_book_002\"])\ntest_book.py:模块\nTestBook:是类\ntest_book_002:测试用例\n'''\n\n'''\n添加书籍的目的就是获取id,后面才可以查看这本书籍的信息,删除这本书的信息,\n这就涉及到参数的关联。处理的思路:将ID写到文件中,然后将它读取出来,然后传进去。\n就涉及到url的处理\n'''\nfrom base.method import Requests\nfrom utils.operationYaml import OperationYaml\nfrom utils.operationExcel import OperationExcel\nfrom common.public import *\nimport pytest\nimport json\n\n\nobj=Requests()\nobjYaml=OperationYaml()\n\n\n\nclass TestBook:\n\texcel=OperationExcel()\n\tobj=Requests()\n\n\tdef result(self,r,row):\n\t\t'''将期望结果和协议状态码封装到一个函数'''\n\t\tassert r.status_code==200\n\t\tassert self.excel.getExpect(row=row) in json.dumps(r.json(), ensure_ascii=False)\n\n\tdef test_book_001(self):\n\t\t'''获取所有书籍的信息'''\n\t\tr=self.obj.get(url=self.excel.getUrl(row=1))\n\t\t# print(r.json())\n\t\t# print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')\n\t\t# print(self.excel.getExpect(row=2))\n\t\t# print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')\n\t\tassert self.excel.getExpect(row=1) in json.dumps(r.json(),ensure_ascii=False)\n\t\tself.result(r=r,row=1)\n\t#\n\tdef test_book_002(self):\n\t\t'''添加书籍'''\n\t\tr=self.obj.post(\n\t\t\turl=self.excel.getUrl(row=2),\n\t\t\tjson=self.excel.getJson(row=2)\n\t\t)\n\t\t# print(r.text)\n\t\t# print(type(json.dumps(r.json(), ensure_ascii=False))) #结果:\n\t\t# print(json.dumps(r.json(),ensure_ascii=False))\n\t\t# print(type(r.json())) #\n\t\t# print(r.json())\n\t\t# print(type(r.json()[0]['datas']['id'])) #获取id 结果: \n\t\tprint(r.json()[0]['datas']['id']) #获取id\n\t\tbookID=r.json()[0]['datas']['id']\n\t\twriteContent(content=bookID) #将ID写入到bookID中\n\t\t# self.result(r=r,row=2) #断言封装为函数,调用函数报错\n\t\tself.excel.getExpect(row=2) in json.dumps(r.json(), ensure_ascii=False) #断言\n\n\tdef test_book_003(self):\n\t\t'''查看书籍'''\n\t\tr=self.obj.get(url=self.excel.getUrl(row=3))\n\t\tprint(r.url) #获取响应的url\n\t\tprint(r.json())\n\t\tprint(r.json()['datas'][0]['id'])\n\t\tself.result(r=r,row=3)\n\t\t# self.excel.getExpect(row=2) in json.dumps(r.json(),ensure_ascii=False)\n\n\tdef test_book_004(self):\n\t\t'''编辑书籍信息'''\n\t\tr=self.obj.put(\n\t\t\turl=self.excel.getUrl(row=4),\n\t\t\tjson=self.excel.getJson(row=4)\n\t\t)\n\t\tself.result(r=r, row=4)\n\n\tdef test_book_005(self):\n\t\t'''删除一条记录'''\n\t\tr=self.obj.delete(\n\t\t\turl=self.excel.getUrl(row=5)\n\t\t)\n\t\t# print(r.json)\n\t\tself.result(r=r,row=5)\n\n\nif __name__=='__main__':\n\tpytest.main([\"-s\", \"-v\", \"test_book.py\"])\n\t# pytest.main([\"-s\",\"-v\",\"test_book.py::TestBook::test_book_002\"])\n\t'''执行模块中某一条测试用例的写法'''\n\n","repo_name":"leguanpangpang/pn","sub_path":"tests/test_book.py","file_name":"test_book.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33659008503","text":"import requests\nimport csv\nfrom bs4 import BeautifulSoup as bs\ndef get_html(url):\n r = requests.get(url) #Response\n return r.content #html_code\ndef get_streets(html):\n streets = []\n soup = bs(html,'html.parser')\n links = soup.find_all('div', class_='span4')\n for div in links:\n for a in div.find_all('a'):\n streets.append(a.get_text().strip().split(' '))\n return list(map(lambda x: (' '.join(x[:-1]), x[-1]),streets))\ndef write_csv(data):\n with open('streats.csv', 'a') as f:\n writer = csv.writer(f)\n for row in data:\n writer.writerow(row)\n\n\ndef main():\n page = get_html('https://kladr-rf.ru/46/000/001/')\n streets = get_streets(page)\n write_csv(streets)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"spectrmen/kivy_bord","sub_path":"ParsingStreets.py","file_name":"ParsingStreets.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16244925037","text":"from backorder.exception import BackOrderException\nfrom backorder.logger import logging\nfrom backorder.utils import load_object\nfrom backorder.model_resolving import ModelResolver\nfrom datetime import datetime\nfrom backorder.utils import TARGET_COLUMN\nimport os,sys \nimport pandas as pd \nimport numpy as np\nPREDICTION_DIR = 'predictions'\n\ndef start_batch_prediction(df=None ,input_file_path='sample_bo.parquet.gzip'):\n try:\n if 'sku' in df.columns:\n df.drop('sku',axis=1,inplace=True)\n model_resolver = ModelResolver(model_registry='saved_models')\n logging.info(f\"Shape of the data set {df.shape}\")\n logging.info(\"Replacing the null values with np.Nan in Dataset\")\n df.replace(to_replace='na',value=np.nan,inplace=True)\n df.dropna(thresh=10,inplace=True)\n logging.info(\"Handling unusaul values in 2 columns\")\n df['perf_6_month_avg'].replace({-99:np.nan},inplace=True)\n df['perf_12_month_avg'].replace({-99:np.nan},inplace=True)\n\n logging.info(\"Loading the numerical transformer\")\n numerical_transformer = load_object(model_resolver.get_latest_transformer_path())\n input_feature_names1 = numerical_transformer.feature_names_in_\n df[input_feature_names1] = numerical_transformer.transform(df[input_feature_names1])\n\n logging.info(\"Loading the categorical Transformer\")\n catergorical_transformer = load_object(model_resolver.get_latest_categorical_encoder_path())\n input_feature_names2 = catergorical_transformer.feature_names_in_\n df[input_feature_names2] = catergorical_transformer.transform(df[input_feature_names2])\n \n logging.info(\"Loading the latest Model\")\n\n model = load_object(model_resolver.get_latest_model_path())\n logging.info('predicting using the latest model')\n ypred = model.predict(df.drop(TARGET_COLUMN,axis=1))\n \n logging.info(\"Loading the target encoder\")\n targer_encoder = load_object(model_resolver.get_latest_target_encoder_path()) \n pred = targer_encoder.inverse_transform(ypred.astype(int))\n\n df[input_feature_names2] = catergorical_transformer.inverse_transform(df[input_feature_names2])\n df['prediction'] = ypred\n df['cat_prediction'] = pred\n prediction_file_name = os.path.basename(input_file_path).replace(\".parquet.gzip\",f\"{datetime.now().strftime('%m%d%Y__%H%M%S')}.csv\")\n prediction_file_path = os.path.join(PREDICTION_DIR,prediction_file_name)\n #df.to_csv(prediction_file_path,index=False,header=True)\n return df\n \n \n except Exception as e:\n raise BackOrderException(error=e, error_detail=sys)\n","repo_name":"medashabari/BackorderPrediction","sub_path":"backorder/pipeline/batch_prediction.py","file_name":"batch_prediction.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408653535","text":"import urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\nclass newbosneighborhoods(dml.Algorithm):\n contributor = 'jtbloom_rfballes_medinad'\n reads = []\n writes = ['jtbloom_rfballes_medinad.neighborhoods']#'medinad.meters'\n\n @staticmethod\n def execute(trial = False):\n '''Retrieve some data sets (not using the API here for the sake of simplicity).'''\n startTime = datetime.datetime.now()\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('jtbloom_rfballes_medinad', 'jtbloom_rfballes_medinad')\n\n url = 'http://datamechanics.io/data/jb_rfb_dm_proj2data/bos_neighborhoods_shapes.json' #'http://bostonopendata-boston.opendata.arcgis.com/datasets/962da9bb739f440ba33e746661921244_9.geojson'\n response = urllib.request.urlopen(url).read().decode(\"utf-8\")\n r = json.loads(response)\n s = json.dumps(r, sort_keys=True, indent=2)\n\n\n neighborhood_list = list(r)\n nid_list = [{'Neighborhood':x[\"fields\"][\"neighborho\"], 'Geo Shape':x[\"fields\"][\"geo_shape\"]} for x in neighborhood_list]\n\n\n #print(neighborhood_list[0]['fields']['geo_shape']['coordinates'][0][0])\n\n \n\n repo.dropCollection(\"jtbloom_rfballes_medinad.neighborhoods\")\n repo.createCollection(\"jtbloom_rfballes_medinad.neighborhoods\")\n repo['jtbloom_rfballes_medinad.neighborhoods'].insert_many(nid_list)\n repo['jtbloom_rfballes_medinad.neighborhoods'].metadata({'complete':True})\n print(repo['jtbloom_rfballes_medinad.neighborhoods'].metadata())\n\n \n repo.logout()\n\n endTime = datetime.datetime.now()\n\n return {\"start\":startTime, \"end\":endTime}\n \n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n pass\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('jtbloom_rfballes_medinad', 'jtbloom_rfballes_medinad')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('dm', 'http://datamechanics.io/data/jb_rfb_dm_proj2data/')\n \n this_script = doc.agent('dmLjtbloom_rfballes_medinad#bos_neighborhoods_shapes', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n neighborhood_resource = doc.entity('nei:boston-neighborhoods', {'prov:label':'boston neighborhoods', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'json'})\n \n this_run = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(this_run, this_script)\n\n doc.usage(this_run, resource, startTime, None,\n {prov.model.PROV_TYPE:'ont:Retrieval',\n 'ont:Query':'/boston-neighborhoods.json'\n }\n )\n\n neighbor = doc.entity('dat:medinad#neighbor', {prov.model.PROV_LABEL:'NEIGHBOR', prov.model.PROV_TYPE:'ont:DataSet'})\n doc.wasAttributedTo(neighbor, this_script)\n doc.wasGeneratedBy(neighbor, this_run, endTime)\n doc.wasDerivedFrom(neighbor, resource, this_run, this_run, this_run)\n\n\n repo.logout()\n \n return doc\n\nnewbosneighborhoods.execute()\n#doc = newbosneighborhoods.provenance()\n#print(doc.get_provn())\n#print(json.dumps(json.loads(doc.serialize()), indent=4))\n\n## eof\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"jtbloom_rfballes_medinad/newbosneighborhoods.py","file_name":"newbosneighborhoods.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7113034023","text":"from random import randrange\n\narray_to_revert = [randrange(0, 1000) for _ in range(20)]\n\n\ndef reverse_in_place(array):\n last_item_index = len(array) - 1\n visited_indexes = []\n for index, _ in enumerate(array):\n if (last_item_index - index) not in visited_indexes:\n temp = array[last_item_index - index]\n array[last_item_index - index] = array[index]\n array[index] = temp\n visited_indexes.append(index)\n return array\n\n\nif __name__ == \"__main__\":\n print(\"original array: \", array_to_revert)\n print(\"reversed array: \", reverse_in_place(array_to_revert))","repo_name":"radekBednarik/this-and-that","sub_path":"reverse_array_in_place.py","file_name":"reverse_array_in_place.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4887430311","text":"from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, '__readme.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='PyFFF',\n version='0.1.0',\n description='Python For Filesystem Forensics',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://gitlab.com/xinhuang/pyfff',\n author='Xin Huang',\n author_email='xinhuang@protomail.com',\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n install_requires=['tabulate', 'hexdump', 'python-magic', 'ipython', 'mypy', 'Pillow'],\n extras_require={\n 'dev': [],\n 'test': [],\n },\n test_suite='nose.collector',\n tests_require=['nose'],\n)\n","repo_name":"xinhuang/PyFFF","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23549407591","text":"#!/usr/bin/python3\n\nimport itertools, sys\n\ndef gf2div(dividend, divisor):\n \"\"\"Does a Galois field division in a Galois field of characteristic 2.\"\"\"\n divisor = tuple(itertools.dropwhile(lambda x: not x,\n ((x and 1 or 0) for x in divisor)))\n dividend = list(itertools.dropwhile(lambda x: not x,\n ((x and 1 or 0) for x in dividend)))\n quotient = []\n while len(divisor) <= len(dividend):\n if dividend[0]:\n quotient.append(1)\n for i, bit in enumerate(divisor):\n dividend[i] = dividend[i] ^ bit\n dividend = dividend[1:]\n while (len(dividend) > 0) and (not dividend[0]):\n if len(dividend) >= len(divisor):\n quotient.append(0)\n dividend = dividend[1:]\n return (quotient, dividend)\n\ndef read_pancakes():\n cases = int(sys.stdin.readline())\n for count in range(1, cases + 1):\n pancakes = sys.stdin.readline().strip()\n pancakes, flipperbits = pancakes.split()\n flipperbits = (1,) * int(flipperbits)\n pancakes = tuple((1 if c == '-' else 0) for c in pancakes)\n quot, rem = gf2div(pancakes, flipperbits)\n if len(rem) > 0:\n print(\"Case #{}: IMPOSSIBLE\".format(count))\n else:\n print(\"Case #{}: {}\".format(count, sum(quot)))\n\nif __name__ == '__main__':\n read_pancakes()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/836.py","file_name":"836.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17771037422","text":"import csv\n\nfrom datetime import datetime\nfrom django.core.management.base import BaseCommand\n\n# from order.models import Order, OrderMethod, OrderStatus, Ticket, TicketStatus, MethodEnum, StatusEnum, Client, ClientCredit\n# from users.models import User, UserAuth, Role, UserEmail, UserPhone, UserAddress, UserEmailType\n# from params.models import Product, ProductStock, SchoolYear, CategoryEnum, SubCategoryEnum\n# from registration.models import Child, Sibling, SiblingChild, Record, CAF, Health, ChildPAI, ChildClass, ChildQuotient\n\nfrom order.models import Order, OrderMethod, OrderStatus, Ticket, TicketStatus, OrderTypeEnum, MethodEnum, StatusEnum\nfrom users.models import User, Role\nfrom params.models import Product\nfrom registration.models import Sibling, SiblingChild\n\n# from client_intern.utils import OrderHelper\n\nclass Command(BaseCommand):\n help = 'Report some products'\n\n def run(self, filename):\n\n def get_parent (pk):\n try:\n parent = User.objects.get(pk=pk)\n parent.roles.get(slug='parent')\n return pk\n except User.DoesNotExist:\n raise Exception(f'Le parent ({pk}) n\\'existe pas.')\n except Role.DoesNotExist:\n raise Exception('Ce n\\'est pas un parent.')\n\n def get_child (pk):\n try:\n child = User.objects.get(pk=pk)\n child.roles.get(slug='child')\n return pk\n except User.DoesNotExist:\n raise Exception(f'L\\'enfant ({pk}) n\\'existe pas.')\n except Role.DoesNotExist:\n raise Exception('Ce n\\'est pas un enfant.')\n\n def get_products (raw):\n pks = raw.split(' ')\n res = []\n for pk in pks:\n if not pk:\n continue\n \n if not Product.objects.filter(pk=pk):\n raise Exception(f'Le produit ({pk}) n\\'existe pas.')\n else:\n res.append(pk)\n \n return res\n\n def test_sibling (parent_pk, child_pk):\n try:\n sibling = Sibling.objects.get(parent=parent_pk)\n sibling.children.get(child=child_pk)\n except Sibling.DoesNotExist:\n raise Exception('La famille n\\'existe pas.')\n except SiblingChild.DoesNotExist:\n raise Exception('L\\'enfant n\\'est pas lié.')\n\n # Parent (0), Enfant 1 (1), Enfant 2 (2), Enfant 3 (3), Enfant 4 (4), Produits (5), Ref (6), Mnt Esp (7), Mnt Chk (8), Ref Chk (9)\n with open(filename, 'r', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n lcount = 0\n for row in reader:\n # print (row)\n if lcount == 0:\n titles = []\n for i, cell in enumerate(row):\n titles.append(f'{cell} ({i})')\n print (', '.join(titles))\n else:\n print (f'Ligne {lcount + 1}')\n try:\n parent = get_parent(row[0])\n\n # Test children\n children = []\n for i in range(1, 5):\n if not row[i]:\n continue\n \n child = get_child(row[i])\n # print (f'parent: {parent}, child: {child}')\n\n test_sibling(parent, child)\n children.append(child)\n\n # Test products\n products = get_products(row[5])\n\n # Test Methods\n if not row[7] and not row[8]:\n raise Exception('Aucune méthode de paiement trouvée.')\n\n # Create method\n mid = OrderMethod.objects.latest('id').id + 1\n amount = 0\n method = OrderMethod(id=mid)\n if row[7]:\n amount += int(row[7])\n method.amount = int(row[7])\n method.method = MethodEnum.CASH\n\n elif row[8]:\n amount += int(row[8])\n method.amount = int(row[8])\n method.method = MethodEnum.CHECK\n method.reference = row[9]\n\n # print (f'amount: {method.amount}, ref: {method.reference}, method: {method.method}')\n\n # Create order\n id = Order.objects.latest('id').id + 1\n order = Order.objects.create(\n id=id,\n name='Paiement ALSH/PERI 2020-2021',\n type=OrderTypeEnum.DIR,\n reference=row[6],\n payer=parent,\n caster=63,\n amount=amount\n )\n order._add_status(StatusEnum.COMPLETED)\n\n method.order = order\n method.save()\n\n # print (f'ref: {row[6]}, amount: {amount}')\n \n # Create tickets\n TARIFS_PERI = [20, 16, (40 / 3), 15]\n PRICE = TARIFS_PERI[len(children) - 1]\n\n for child in children:\n for product in products:\n tid = Ticket.objects.latest('id').id + 1\n ticket = Ticket.objects.create(\n id=tid,\n order=order,\n payee=child,\n price=PRICE,\n product=product\n )\n ticket._add_status(StatusEnum.COMPLETED)\n # print (f'payee: {child}, price: {PRICE}, product: {product}')\n\n except Exception as e:\n print (f'Exception ({e.args[0]})')\n\n lcount += 1\n\n\n def handle(self, *args, **kwargs):\n self.run('project/users/management/commands/payment_GOND_05_01_2021.csv')\n\n\n \n","repo_name":"adufi/uperiscolaire.com","sub_path":"project/users/management/commands/_payment_chk_batch.py","file_name":"_payment_chk_batch.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23385785651","text":"#print \"hello world\"\n\ndef solv():\n boo=1\n for i in xrange(raws):\n #temp=array[i][0]\n #boo=1\n #d_v=array[i][0]\n for j in xrange(colomns):\n d_v=array[i][j]\n for s in xrange(colomns):\n if d_v currentMaxProb): # greater probability for this day\n currentMaxProb = probs[i]\n else: # new day\n # Store previous days probability\n precipProbs.append([currentDay, currentMaxProb])\n\n # Update current day information\n currentDay = day\n currentMaxProb = probs[i]\n \n # Add last day's information\n precipProbs.append([day, currentMaxProb])\n \n except Exception as e: # failed to get weather forecast\n # Get traceback\n import traceback\n tb = traceback.format_exc()\n\n message = \"NWSPredict - An error occurred of type \" + type(e).__name__\n raise ModuleException(message, e, tb)\n \n \n return precipProbs\n\n","repo_name":"apogeecmb/smartSprinkler","sub_path":"nwsPredict.py","file_name":"nwsPredict.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26026380985","text":"import copy\r\nimport json\r\nimport math\r\nimport os\r\nimport shutil\r\n\r\n\r\ndef assert_dir(path):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n\r\ndef assert_not_dir(path):\r\n if os.path.exists(path):\r\n shutil.rmtree(path)\r\n\r\n\r\ndir_pack = '../out/tinkerers_statures'\r\ndir_origins = dir_pack + '/data/tinkerer/origins/'\r\ndir_powers = dir_pack + '/data/tinkerer/powers/'\r\nassert_not_dir(dir_pack)\r\nshutil.copytree('override', dir_pack)\r\nassert_dir(dir_origins)\r\nassert_dir(dir_powers)\r\n\r\n# origin_template = {\r\n# \"origin\": {\r\n# \"codename\": \"demishort\",\r\n# \"name\": \"Demi-Short\",\r\n# \"description\": \"Demi-Short\",\r\n# \"icon\": \"Demi-Short\",\r\n# \"order\": 0,\r\n# \"impact\": 0,\r\n# },\r\n# \"power\": {\r\n# \"codename\": \"demishort\",\r\n# \"name\": \"Demi-Short\",\r\n# \"description\": \"Demi-Short\",\r\n# \"scale\": 0,\r\n# }\r\n# }\r\n\r\n# We should redo this using a csv file.\r\norigins = [\r\n {'origin': {'codename': 'demishort', 'name': 'Demi-Short', 'description': 'Half-Short', 'icon': 'minecraft:honey_bottle'},\r\n 'power': {'codename': 'demishort', 'name': 'Demi-Short', 'desc-adjective': 'a bit short', 'desc-height': 1.6, 'desc-sneak-height': 1.4, 'desc-width': 0.5, 'scale-height': 0.9, 'scale-width': 0.9}},\r\n {'origin': {'codename': 'demitall', 'name': 'Demi-Tall', 'description': 'Half-Tall', 'icon': 'minecraft:cookie'},\r\n 'power': {'codename': 'demitall', 'name': 'Demi-Tall', 'desc-adjective': 'slightly taller than average', 'desc-height': 1.9, 'desc-sneak-height': 1.6, 'desc-width': 0.6, 'scale-height': 1.05, 'scale-width': 1.05}},\r\n {'origin': {'codename': 'hemidemishort', 'name': 'HemiDemi-Short', 'description': '1/4 Short', 'icon': 'minecraft:potion'},\r\n 'power': {'codename': 'hemidemishort', 'name': 'HemiDemi-Short', 'desc-adjective': 'ever-so-slightly short', 'desc-height': 1.7, 'desc-sneak-height': 1.5, 'desc-width': 0.6, 'scale-height': 0.95, 'scale-width': 0.95}},\r\n {'origin': {'codename': 'impish', 'name': 'Impish', 'description': '1.5x Short\\nMinor gameplay impact.', 'icon': 'minecraft:water_bucket'},\r\n 'power': {'codename': 'impish', 'name': 'Impish', 'desc-adjective': 'impishly short', 'desc-height': 1.3, 'desc-sneak-height': 1, 'desc-width': 0.4, 'scale-height': 0.66, 'scale-width': 0.66}},\r\n {'origin': {'codename': 'oversize1', 'name': 'Oversize', 'description': \"'Neat.'\\nMedium gameplay impact.\", 'icon': 'minecraft:pumpkin_pie'},\r\n 'power': {'codename': 'oversize1', 'name': 'Oversize', 'desc-adjective': 'oversized', 'desc-height': 3, 'desc-sneak-height': 2.5, 'desc-width': 1, 'scale-height': 1.62, 'scale-width': 1.62}},\r\n {'origin': {'codename': 'oversize2', 'name': 'Oversize II', 'description': \"'Sheesh.'\\nMajor gameplay impact.\", 'icon': 'minecraft:cake'},\r\n 'power': {'codename': 'oversize2', 'name': 'Oversize II', 'desc-adjective': 'very oversized', 'desc-height': 3.5, 'desc-sneak-height': 3, 'desc-width': 1.2, 'scale-height': 1.92, 'scale-width': 1.92}},\r\n {'origin': {'codename': 'oversize3', 'name': 'Oversize III', 'description': \"'Yikes.'\\nMajor gameplay impact.\", 'icon': 'minecraft:hay_block'},\r\n 'power': {'codename': 'oversize3', 'name': 'Oversize III', 'desc-adjective': 'extremely oversized', 'desc-height': 4, 'desc-sneak-height': 3.5, 'desc-width': 1.3, 'scale-height': 2.2, 'scale-width': 2.2}},\r\n {'origin': {'codename': 'semishort', 'name': 'Semi-Short', 'description': '3/4 Short', 'icon': 'minecraft:splash_potion'},\r\n 'power': {'codename': 'semishort', 'name': 'Semi-Short', 'desc-adjective': 'quite short', 'desc-height': 1.6, 'desc-sneak-height': 1.3, 'desc-width': 0.5, 'scale-height': 0.85, 'scale-width': 0.85}},\r\n {'origin': {'codename': 'short', 'name': 'Short', 'description': '\\'It\\'s a bit drafty actually.\\'', 'icon': 'minecraft:experience_bottle'},\r\n 'power': {'codename': 'short', 'name': 'Short', 'desc-adjective': 'very short', 'desc-height': 1.5, 'desc-sneak-height': 1.2, 'desc-width': 0.6, 'scale-height': 0.8, 'scale-width': 0.8}},\r\n {'origin': {'codename': 'sized', 'name': 'Sized', 'description': '\\'This is fine too.\\'', 'icon': 'minecraft:milk_bucket'},\r\n 'power': {'codename': 'sized', 'name': 'Sized', 'desc-adjective': 'average sized', 'desc-height': 1.8, 'desc-sneak-height': 1.5, 'desc-width': 0.6, 'scale-height': 1, 'scale-width': 1}},\r\n {'origin': {'codename': 'tall', 'name': 'Tall', 'description': '\\'How\\'s the weather down there?\\'', 'icon': 'minecraft:apple'},\r\n 'power': {'codename': 'tall', 'name': 'Tall', 'desc-adjective': 'taller than average', 'desc-height': 2, 'desc-sneak-height': 1.7, 'desc-width': 0.7, 'scale-height': 1.1, 'scale-width': 1.1}},\r\n {'origin': {'codename': 'taller', 'name': 'Taller', 'description': \"'How's the weather down *there*?'\\nMinor gameplay impact.\", 'icon': 'minecraft:bread'},\r\n 'power': {'codename': 'taller', 'name': 'Taller', 'desc-adjective': 'tall', 'desc-height': 2.2, 'desc-sneak-height': 1.8, 'desc-width': 0.7, 'scale-height': 1.2, 'scale-width': 1.2}},\r\n {'origin': {'codename': 'tallest', 'name': 'Tallest', 'description': \"'Watch your head.'\\nMinor gameplay impact.\", 'icon': 'minecraft:mushroom_stew'},\r\n 'power': {'codename': 'tallest', 'name': 'Tallest', 'desc-adjective': 'extremely tall', 'desc-height': 2.4, 'desc-sneak-height': 2, 'desc-width': 0.8, 'scale-height': 1.33, 'scale-width': 1.33}},\r\n {'origin': {'codename': 'orcish', 'name': 'Orcish', 'description': \"'Intimidated?'\\nMinor gameplay impact.\", 'icon': 'minecraft:mushroom_stew'},\r\n 'power': {'codename': 'orcish', 'name': 'Orcish', 'desc-adjective': 'larger and broader', 'desc-height': 2.4, 'desc-sneak-height': 2, 'desc-width': 1, 'scale-height': 1.33, 'scale-width': 1.56}}]\r\n\r\nwith open(\"./template/origin.json\", \"r\") as origin_template_file:\r\n with open(\"./template/power.json\", \"r\") as power_template_file:\r\n origin_template = json.load(origin_template_file)\r\n power_template = json.load(power_template_file)\r\n\r\n for origin_dict in origins:\r\n current_origin_file = copy.deepcopy(origin_template)\r\n current_power_file = copy.deepcopy(power_template)\r\n\r\n current_origin_file[\"powers\"] = [\"tinkerer:\" + origin_dict[\"power\"][\"codename\"]]\r\n current_origin_file[\"icon\"] = origin_dict[\"origin\"][\"icon\"]\r\n current_origin_file[\"name\"] = origin_dict[\"origin\"][\"name\"]\r\n current_origin_file[\"description\"] = origin_dict[\"origin\"][\"description\"]\r\n\r\n current_power_file[\"name\"] = origin_dict[\"power\"][\"name\"]\r\n current_power_file[\"description\"] = \"You\\'re \" + origin_dict[\"power\"][\"desc-adjective\"] + \" (\" + str(math.floor(origin_dict[\"power\"][\"scale-height\"] * 100)) + \"%)!\" + \\\r\n \"\\nHeight: \" + str(origin_dict[\"power\"][\"desc-height\"]) + (\" Block\" if origin_dict[\"power\"][\"desc-height\"] == 1 else \" Blocks\") + \\\r\n \"\\nSneak Height: \" + str(origin_dict[\"power\"][\"desc-sneak-height\"]) + (\" Block\" if origin_dict[\"power\"][\"desc-sneak-height\"] == 1 else \" Blocks\") + \\\r\n \"\\nWidth: \" + str(origin_dict[\"power\"][\"desc-width\"]) + (\" Block Exactly\" if origin_dict[\"power\"][\"desc-width\"] == 1 else \" Blocks\")\r\n current_power_file[\"entity_action_chosen\"][\"actions\"][0][\"command\"] = str(current_power_file[\"entity_action_chosen\"][\"actions\"][0][\"command\"]).replace(\"{{ scale_height }}\", str(origin_dict[\"power\"][\"scale-height\"]))\r\n current_power_file[\"entity_action_chosen\"][\"actions\"][1][\"command\"] = str(current_power_file[\"entity_action_chosen\"][\"actions\"][1][\"command\"]).replace(\"{{ scale_width }}\", str(origin_dict[\"power\"][\"scale-width\"]))\r\n\r\n with open(dir_origins + origin_dict[\"origin\"][\"codename\"] + \".json\", \"w\") as out_file:\r\n json.dump(current_origin_file, out_file, indent=4, sort_keys=True)\r\n with open(dir_powers + origin_dict[\"power\"][\"codename\"] + \".json\", \"w\") as out_file:\r\n json.dump(current_power_file, out_file, indent=4, sort_keys=True)\r\n","repo_name":"sisby-folk/tinkerers-statures","sub_path":"datagen/tinkerers_statures/tinkerers_statures.py","file_name":"tinkerers_statures.py","file_ext":"py","file_size_in_byte":8177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25443350516","text":"#! /usr/bin/python3\n# introducao pyhton\n#arquivo = open(\"git.txt\",\"r\")\n#print(arquivo.read())\n#arquivo.close()\n\n#outra maniera de abrir um arquivo melhor opcao\nwith open (\"frutas1.txt\",\"a\") as f:\n #print(f.readline(),end = \"\")#retorna as linhas em lista\n #print(f.readline(),end = \"\")#retorna as linhas em lista\n #f.seek(0)#zera o cursor\n #print(f.readline(),end = \"\")#retorna as linhas em lista\n listas = [\"limao\",\"uva\",\"maca\"]\n i=0\n for lista in listas:\n i+=1\n f.write(\"{}- {} \\n\".format(i,lista))\n with open (\"frutas1.txt\",\"r\") as g:\n dados=(g.readlines())\n\nwith open(\"frutas1.txt\",\"r\") as arquivo1:\n conteudo = arquivo1.readlines()\n\n \n ","repo_name":"kaduBass/520","sub_path":"arquivo.py","file_name":"arquivo.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72129307073","text":"#!/usr/bin/python3\r\n#[[0, 0, 0, 0, 0, 1, 0, 0, 1], [0, 1, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0, 0, 1, 1], [0, 1, 1, 0, 1, 1, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0, 1], [0, 1, 1, 0, 1, 1, 1, 1, 1]]\r\n#[[1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 1, 0, 1, 0, 1, 1, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 0, 0, 1, 0, 1, 1, 0], [1, 0, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 0, 1, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0]]\r\n#33.0202563382\r\nimport sys\r\nimport csv\r\nimport codecs\r\nimport numpy\r\nimport random\r\nimport math\r\nmitst=0\r\nmini=0\r\n#for tst in range(18,106):\r\nfor tst in range(1):\r\n #random.seed(str([7122,59491,66666,233333,9487]))\r\n random.seed(7122)\r\n best=float('Inf')\r\n class Regression(object):\r\n def __init__ (self):\r\n #self.w_ = numpy.zeros(1 + 18*id)\r\n self.w_ = None #numpy.zeros(1 + id*len(usage))\r\n self.g_ = None\r\n self.old_cost=float('Inf')\r\n self.eta=3\r\n self.cnt=0\r\n self.cost=float('Inf')\r\n def rebuild(self):\r\n self.g_ = None\r\n self.old_cost=float('Inf')\r\n self.cost=float('Inf')\r\n #self.eta=0\r\n def fit(self,x,y,z):\r\n if self.w_ is None:\r\n self.w_=numpy.zeros(len(x[0]))\r\n #self.w_=numpy.full(len(x[0]),1.0)\r\n #self.w_=numpy.zeros(len(x[0]))\r\n #print(\"a\")\r\n #self.w_=numpy.random.rand(len(x[0])+1,1)*1e-3\r\n #print(\"b\")\r\n output = numpy.dot(x,self.w_)\r\n\r\n output=1.0/(numpy.add(numpy.exp(-output),1))\r\n\r\n errors = y - output \r\n self.cnt+=1\r\n #if self.cnt % 30 == 0:\r\n self.cost=numpy.sum(numpy.abs(errors))/len(x)\r\n\r\n\r\n self.old_cost=self.cost\r\n g=x.T.dot(errors)\r\n \r\n #g+=(-1e1)*self.w_/len(x)\r\n\r\n if self.g_ is None:\r\n self.g_=numpy.full(len(g),1e-30)\r\n self.g_+=numpy.power(g,2)\r\n g/=numpy.sqrt(self.g_)\r\n \r\n\r\n \"\"\"\r\n print(g)\r\n print(t)\r\n print(self.eta*g)\r\n print(self.eta*t)\r\n \"\"\"\r\n self.w_+=self.eta*g\r\n return self\r\n def activation(self,x):\r\n val=-numpy.dot(x,self.w_)\r\n if val>500:\r\n val=500\r\n return 1.0/(1+math.exp(val))\r\n #from scipy import linspace, polyval, polyfit, sqrt, stats, randn\r\n #from pylab import plot, title, show , legend\r\n x = []\r\n y = []\r\n z = []\r\n ve = list(csv.reader(codecs.open(sys.argv[3], 'r', 'Big5')))\r\n ve2 = list(csv.reader(codecs.open(sys.argv[4], 'r', 'Big5')))\r\n #fun=list(range(106))\r\n #del fun[22*4]\r\n #del fun[tst]\r\n #del fun[32]\r\n #\"\"\"\r\n #del fun[51]\r\n #del fun[44]\r\n #del fun[12]\r\n #del fun[8]\r\n #del fun[7]\r\n #\"\"\"\r\n #del fun[2*4]\r\n \"\"\"\r\n sq=[0,1,3,4,5]\r\n sq2=[0,1,3,4,5]\r\n lq=[0,1,3,4,5]\r\n lq2=[0]\r\n lq3=[0,0,0,0,1,1,1,3,3,4]\r\n lq4=[1,3,4,5,3,4,5,4,5,5]\r\n lq5=[1,3,4,5,3,4,5,4,5,5]\r\n \"\"\"\r\n\r\n fun=list(range(106)) \r\n del fun[51] \r\n del fun[44] \r\n del fun[12] \r\n del fun[8] \r\n del fun[7] \r\n sq=[0,1,3,4,5] \r\n sq2=[1] \r\n lq=[3,5] \r\n lq2=[0] \r\n lq2_rat=-1 \r\n lq3=[]\r\n lq4=[]\r\n lq5=[]\r\n\r\n std=[0 for x in range(len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)+len(lq5))]\r\n avg=[0 for x in range(len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)+len(lq5))]\r\n y_tot=0\r\n for i in range(1, len(ve)):\r\n try:\r\n val=float(ve2[i-1][0])\r\n except ValueError:\r\n val=0.\r\n ary=[]\r\n for j in range(len(fun)):\r\n val2=float(ve[i][fun[j]])\r\n std[j]+=val2**2\r\n avg[j]+=val2\r\n ary.append(val2)\r\n for j in range(len(sq)):\r\n val2=float(ve[i][sq[j]])*float(ve[i][sq[j]])\r\n std[j+len(fun)]+=val2**2\r\n avg[j+len(fun)]+=val2\r\n ary.append(val2)\r\n for j in range(len(sq2)):\r\n val2=float(ve[i][sq2[j]])*float(ve[i][sq2[j]])*float(ve[i][sq2[j]])\r\n std[j+len(fun)+len(sq)]+=val2**2\r\n avg[j+len(fun)+len(sq)]+=val2\r\n ary.append(val2)\r\n for j in range(len(lq)):\r\n val2=math.sqrt(float(ve[i][lq[j]]))\r\n std[j+len(fun)+len(sq)+len(sq2)]+=val2**2\r\n avg[j+len(fun)+len(sq)+len(sq2)]+=val2\r\n ary.append(val2)\r\n for j in range(len(lq2)):\r\n val2=math.exp(float(ve[i][lq2[j]]))\r\n std[j+len(fun)+len(sq)+len(sq2)+len(lq)]+=val2**2\r\n avg[j+len(fun)+len(sq)+len(sq2)+len(lq)]+=val2\r\n ary.append(val2)\r\n for j in range(len(lq3)):\r\n val2=float(ve[i][lq3[j]])*float(ve[i][lq4[j]])\r\n std[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)]+=val2**2\r\n avg[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)]+=val2\r\n ary.append(val2)\r\n for j in range(len(lq5)):\r\n val2=math.tanh(float(ve[i][lq5[j]]))\r\n std[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)]+=val2**2\r\n avg[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)]+=val2\r\n ary.append(val2)\r\n ary.append(1)\r\n y_tot+=val\r\n y.append(val)\r\n x.append(ary)\r\n y_avg=y_tot/(len(ve)-1)\r\n for j in range(len(std)):\r\n avg[j]=avg[j]/(len(ve)-1)\r\n std[j]=math.sqrt(std[j]/(len(ve)-1)-(avg[j]**2))\r\n if std[j]<=1:\r\n avg[j]=0\r\n std[j]=1\r\n #avg[j]=0\r\n #std[j]=1\r\n #print(avg,std)\r\n for i in range(len(x)):\r\n for j in range(len(std)):\r\n x[i][j]=((x[i][j]-avg[j])/std[j])\r\n #print(x)\r\n print(\"init\")\r\n magic=Regression()\r\n magic.fit(numpy.array(x),y,z)\r\n \"\"\"\r\n for k in range(5):\r\n for j in range(2):\r\n rng=list(range(20))\r\n #random.shuffle(rng)\r\n for i in rng:\r\n xx=[]\r\n yy=[]\r\n zz=[]\r\n for rng2 in range(1+i,len(x),20):\r\n xx.append(x[rng2])\r\n yy.append(y[rng2])\r\n for h in range(1,5):\r\n print(k,i,h)\r\n magic.fit(numpy.array(xx),yy,zz)\r\n print(magic.old_cost)\r\n magic.eta*=0.7\r\n print(magic.old_cost)\r\n print(magic.w_)\r\n magic.rebuild()\r\n #magic.w_[1]=0\r\n \"\"\"\r\n x2 = []\r\n cut=-1\r\n\r\n for i in range(len(x)):\r\n x2.append([])\r\n for j in range(len(fun)):\r\n if math.fabs(magic.w_[j])<=cut:\r\n continue\r\n x2[-1].append(x[i][j])\r\n for j in range(len(sq)):\r\n x2[-1].append(x[i][len(fun)+j])\r\n for j in range(len(sq2)):\r\n x2[-1].append(x[i][len(fun)+len(sq)+j])\r\n for j in range(len(lq)):\r\n x2[-1].append(x[i][len(fun)+len(sq)+len(sq2)+j])\r\n for j in range(len(lq2)):\r\n x2[-1].append(x[i][len(fun)+len(sq)+len(sq2)+len(lq)+j])\r\n for j in range(len(lq3)):\r\n x2[-1].append(x[i][len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+j])\r\n for j in range(len(lq5)):\r\n x2[-1].append(x[i][len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)+j])\r\n x2[-1].append(1)\r\n\r\n magic2=Regression()\r\n for k in range(5):\r\n for j in range(2):\r\n rng=list(range(20))\r\n random.shuffle(rng)\r\n for i in rng:\r\n xx=[]\r\n yy=[]\r\n zz=[]\r\n for rng2 in range(1+i,len(x),20):\r\n xx.append(x2[rng2])\r\n yy.append(y[rng2])\r\n for h in range(1,20):\r\n print(k,i,h)\r\n magic2.fit(numpy.array(xx),yy,zz)\r\n print(magic2.old_cost)\r\n magic2.eta*=0.7122\r\n #tmp=magic2.eta\r\n #magic2.rebuild()\r\n #magic2.eta=1e-3\r\n for h in range(1,10):\r\n print(h)\r\n magic2.fit(numpy.array(x2),y,z)\r\n print(magic2.old_cost)\r\n\r\n #print(tmp)\r\n #magic2.rebuild()\r\n \"\"\"\r\n magic2.eta=1\r\n for h in range(1,10):\r\n print(h)\r\n magic2.fit(numpy.array(x2),y,z)\r\n print(magic2.old_cost)\r\n magic2.rebuild()\r\n magic2.eta=0.01\r\n for k in range(10):\r\n for h in range(1,200):\r\n print(h)\r\n magic2.fit(numpy.array(x2),y,z)\r\n print(magic2.old_cost)\r\n tmp=magic2.eta/2\r\n magic2.rebuild()\r\n magic2.eta=tmp\r\n \"\"\"\r\n \"\"\"\r\n magic2.rebuild()\r\n magic2.eta=1e-6\r\n for h in range(1,2):\r\n print(h)\r\n magic2.fit(numpy.array(x2),y,z)\r\n print(magic2.old_cost)\r\n \"\"\"\r\n f=codecs.open(sys.argv[6], 'w', 'Big5')\r\n f.write(\"id,label\\n\")\r\n sig=0\r\n sig2=0\r\n div=0\r\n print(magic2.w_)\r\n ve = list(csv.reader(codecs.open(sys.argv[5], 'r', 'Big5')))\r\n right=0\r\n #print(len(x2[0]))\r\n for i in range(len(x2)):\r\n ans=magic2.activation(x2[i])\r\n if ans>0.5:\r\n ans=1\r\n else:\r\n ans=0\r\n if ans == y[i]:\r\n right+=1\r\n print(right,len(x2),right/len(x2))\r\n right=0\r\n for i in range(1, len(ve)):\r\n qry = []\r\n for j in range(len(fun)):\r\n if math.fabs(magic.w_[j])<=cut:\r\n continue\r\n val=float(ve[i][fun[j]])\r\n val=((val-avg[j])/std[j])\r\n qry.append(val)\r\n \r\n for j in range(len(sq)):\r\n val=float(ve[i][sq[j]])*float(ve[i][sq[j]])\r\n val=((val-avg[j+len(fun)])/std[j+len(fun)])\r\n qry.append(val)\r\n \r\n for j in range(len(sq2)):\r\n val=float(ve[i][sq2[j]])*float(ve[i][sq2[j]])*float(ve[i][sq2[j]])\r\n val=((val-avg[j+len(fun)+len(sq)])/std[j+len(fun)+len(sq)])\r\n qry.append(val)\r\n\r\n for j in range(len(lq)):\r\n val=math.sqrt(float(ve[i][lq[j]]))\r\n val=((val-avg[j+len(fun)+len(sq)+len(sq2)])/std[j+len(fun)+len(sq)+len(sq2)])\r\n qry.append(val)\r\n\r\n for j in range(len(lq2)):\r\n val=math.exp(float(ve[i][lq2[j]]))\r\n val=((val-avg[j+len(fun)+len(sq)+len(sq2)+len(lq)])/std[j+len(fun)+len(sq)+len(sq2)+len(lq)])\r\n qry.append(val)\r\n\r\n for j in range(len(lq3)):\r\n val=float(ve[i][lq3[j]])*float(ve[i][lq4[j]])\r\n val=((val-avg[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)])/std[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)])\r\n qry.append(val)\r\n\r\n for j in range(len(lq5)):\r\n val=math.tanh(float(ve[i][lq5[j]]))\r\n val=((val-avg[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)])/std[j+len(fun)+len(sq)+len(sq2)+len(lq)+len(lq2)+len(lq3)])\r\n qry.append(val)\r\n\r\n qry.append(1)\r\n ans=magic2.activation(qry)\r\n #ans=round(ans)\r\n #if ans>(1-y_avg):\r\n if ans>0.5:\r\n f.write(str(int(i))+\",1\\n\")\r\n else :\r\n f.write(str(int(i))+\",0\\n\")\r\n print(right,(len(ve)-1),right/(len(ve)-1))\r\n f.close()\r\n if right/(len(ve)-1)>mini:\r\n mini=right/(len(ve)-1)\r\n mitst=tst\r\n print(tst,mitst)\r\nquit()\r\nwhile True:\r\n try:\r\n eval(input())\r\n except:\r\n pass","repo_name":"qazwsxedcrfvtg14/ML2017","sub_path":"hw2/hw2_logistic.py","file_name":"hw2_logistic.py","file_ext":"py","file_size_in_byte":11969,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3724825822","text":"import boto3\r\nimport csv\r\nimport datetime\r\nimport json\r\nfrom pathlib import Path\r\nimport os\r\nimport random\r\nimport re\r\nimport tweepy\r\n\r\n\r\ndef convert_filename(filename):\r\n \"\"\"\r\n Images were extracted from video files every 10 seconds\r\n The following filename pattern was used:\r\n\r\n \r\n S1E01_01.png\r\n S1E01_02.png\r\n ...\r\n S1E01_139.png\r\n S1E02_01.png\r\n ...\r\n\r\n \r\n Multiply the incrementing number after the underscore by 10\r\n to calculate an accurate timestamp (sum = # of seconds)\r\n \"\"\"\r\n \r\n # Get the number of seconds from the filename\r\n seconds = int(os.path.splitext(filename)[0].split(\"_\")[-1]) * 10\r\n\r\n # Add random deviation to prevent all timestamps from being multiples of 10\r\n seconds = seconds + random.randint(-5, 5)\r\n\r\n # Convert the number of seconds into minutes and seconds\r\n minutes, seconds = divmod(seconds, 60)\r\n minutes_str = str(minutes).zfill(2)\r\n seconds_str = str(seconds).zfill(2)\r\n\r\n # Get the episode from the filename\r\n episode_number = filename.split(\"_\")[0]\r\n\r\n # Format the minutes, seconds, and episode into a readable string\r\n readable_str = f\"{minutes_str}:{seconds_str} {episode_number}\"\r\n return readable_str\r\n\r\n\r\n\r\ndef extract_metatitle(title):\r\n # Two-part Seinfeld episodes share the same title,\r\n # resulting in nonstandard formats like \"The Trip (Part 1)\"\r\n \r\n # Regex to identify text inside parentheses\r\n match = re.search(r'(.*)\\((.*)\\)', title)\r\n\r\n # If the title contains parentheses\r\n if match:\r\n # Cut the parentheses and inner text into a separate string\r\n return match.group(1), \" (\"+match.group(2)+\")\"\r\n\r\n else:\r\n # If the title has no parentheses, just return it as is\r\n return title, \"\"\r\n\r\n\r\n\r\ndef get_random_tweet(tweets):\r\n # Create dictionary from tweets.csv with \"episode\" and \"title\" headers\r\n with open(tweets) as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n csv_dict = [{\"episode\": row[\"episode\"], \"title\": row[\"title\"]} for row in reader]\r\n\r\n return random.choice(csv_dict)\r\n\r\n\r\n\r\ndef lambda_handler(event, context):\r\n # Get environment variables from AWS Lambda configuration\r\n # These keys and tokens can be found in the Twitter developer portal\r\n api_key = os.getenv(\"API_KEY\")\r\n api_key_secret = os.getenv(\"API_KEY_SECRET\")\r\n access_token = os.getenv(\"ACCESS_TOKEN\")\r\n access_token_secret = os.getenv(\"ACCESS_TOKEN_SECRET\")\r\n\r\n # Authenticate Twitter API\r\n # The tweepy library is not native to AWS Lambda, a custom layer is required\r\n auth = tweepy.OAuthHandler(api_key, api_key_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n api = tweepy.API(auth)\r\n \r\n\r\n # Get random episode strings from tweets.csv\r\n csv_row = get_random_tweet(\"tweets.csv\")\r\n episode = csv_row[\"episode\"]\r\n title = csv_row[\"title\"]\r\n title, metatitle = extract_metatitle(title)\r\n \r\n # Download the selected image from the 'seinfeldframes' S3 bucket\r\n path = \"/tmp/\" + episode\r\n s3 = boto3.resource('s3')\r\n s3.Bucket('seinfeldframes').download_file(episode, path)\r\n\r\n # Format the caption and tweet the image\r\n media = api.media_upload(filename=path)\r\n api.update_status(status=convert_filename(episode) + ' \"' + title + '\"' + metatitle + ' #Seinfeld', media_ids=[media.media_id])\r\n \r\n \r\n # Get all tweets containing \"seinfeld\" posted in the last hour\r\n now = datetime.datetime.now()\r\n past_hour = now - datetime.timedelta(hours=1)\r\n search_query = 'seinfeld since:' + past_hour.strftime('%Y-%m-%d_%H:%M:%S')\r\n search_results = tweepy.Cursor(api.search_tweets, q=search_query).items()\r\n \r\n # Like each tweet in the search results\r\n for tweet in search_results:\r\n if tweet.user.screen_name != \"nosoupforyoubot\":\r\n try:\r\n api.create_favorite(tweet.id)\r\n print(f\"Liked tweet with ID: {tweet.id}\")\r\n except Exception as e:\r\n print(f\"Error liking tweet with ID: {tweet.id} - {e}\")\r\n \r\n \r\n return {\"statusCode\": 200, \"episode\": episode}\r\n","repo_name":"billywojcicki/nosoupforyoubot","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21354714481","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n\n\n# In[2]:\n\n\napp = Flask(__name__,template_folder='templates')\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/predict',methods=['POST'])\ndef predict():\n '''\n For rendering results on HTML GUI\n '''\n features = [x for x in request.form.values()]\n final_features = [np.array(features)]\n column_names=['HR','O2Sat','Temp','SBP','MAP','DBP','Resp','EtCO2','BaseExcess','HCO3','FiO2','pH','PaCO2','SaO2','AST','BUN','Alkalinephos',\n 'Calcium','Chloride','Creatinine','Bilirubin_direct','Glucose','Lactate','Magnesium','Phosphate','Potassium','Bilirubin_total','TroponinI','Hct','Hgb','PTT','WBC','Fibrinogen','Platelets','Age','Gender','Unit1','Unit2','HospAdmTime','ICULOS']\n final_features=pd.DataFrame(final_features,columns=column_names)\n prediction= model.predict(final_features)\n a=\"\"\n if prediction[0]==0:\n a+=\"NO SEPSIS\"\n else:\n a+=\"SEPSIS\"\n return render_template('index.html', prediction_text='{}'.format(a))\n\n\nif __name__ == '__main__':\n\tapp.run()\n","repo_name":"ymeghana/Early-Detection-Of-Sepsis","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23426162551","text":"import sys\r\n\r\ndef cookieClickerAlpha():\r\n\tf = open(sys.argv[1])\r\n\tw = open('output.out', 'w')\r\n\tT = int(f.readline())\r\n\tfor case in range(1, T + 1):\r\n\t\tdata = f.readline().split()\r\n\t\tC = float(data[0])\r\n\t\tF = float(data[1])\r\n\t\tX = float(data[2])\r\n\t\trate = 2\r\n\t\ttime = 0\r\n\t\twhile(isConvinientAFarm(C, F, X, rate)):\r\n\t\t\ttime = time + C / rate\r\n\t\t\trate = rate + F\r\n\t\ttime += X / rate\r\n\t\tw.write(\"Case #{0}: {1:.7f}\\n\".format(case, time))\r\n\r\ndef isConvinientAFarm(C, F, X, rate):\r\n\tif(X / rate > C / rate + X / (rate + F)):\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\r\ncookieClickerAlpha();","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2864.py","file_name":"2864.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23513019961","text":"import unittest\r\nimport re\r\nimport sys\r\nfrom unittest import TestCase\r\nfrom random import randint\r\n\r\n'''\r\n--+-\r\n+-++\r\n--++\r\n++++\r\n'''\r\nclass Solution():\r\n def get_factor(self, guess):\r\n for i in range(2, 10000):\r\n if guess % i == 0:\r\n return i\r\n return None\r\n\r\n def coin(self, n, m):\r\n low = 1 << (n - 1)\r\n high = 1 << n\r\n\r\n ret = []\r\n for retry in range(10000000):\r\n guess = randint(low, high) * 2 + 1\r\n guess = bin(guess)[2:]\r\n factors = [guess]\r\n for base in range(2, 11):\r\n real_guess = int(guess, base)\r\n factor = self.get_factor(real_guess)\r\n if factor:\r\n factors.append(factor)\r\n else:\r\n break\r\n if len(factors) < 10:\r\n continue\r\n ret.append(factors)\r\n if len(ret) >= m:\r\n break\r\n return ret\r\n\r\n def solve(self, filename):\r\n fout = open(filename + \"_output.txt\", 'w')\r\n with open(filename, 'r') as fp:\r\n n = int(fp.readline().strip())\r\n nCase = 0\r\n for line in fp:\r\n nCase += 1\r\n n, m = line.strip().split()\r\n n = int(n) - 1\r\n m = int(m)\r\n ret = self.coin(n, m)\r\n fout.write(\"Case #%s:\\n\"%nCase)\r\n for row in ret:\r\n outline = \" \".join([str(i) for i in row]) + \"\\n\"\r\n fout.write(outline)\r\n fout.close()\r\n\r\n'''\r\nselect Id from Weather where Temperature > (select Temperature from Weather where \r\n DateDiff(day, DateTimCol, GetDate()) = 1)\r\n'''\r\nclass TestSolution(TestCase):\r\n def test_maxprofit(self):\r\n instance = Solution()\r\n self.assertEqual(len(instance.coin(32, 500)), 500)\r\n \r\nif __name__ == '__main__':\r\n cmd = sys.argv[1]\r\n if cmd == \"test\":\r\n suite = unittest.TestLoader().loadTestsFromTestCase(TestSolution)\r\n suite = unittest.TestSuite([suite]) \r\n unittest.TextTestRunner().run(suite)\r\n else:\r\n sol = Solution()\r\n sol.solve(cmd)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/982.py","file_name":"982.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5880158733","text":"import numpy as np\nimport torch\nfrom torch import nn\n\nclass ClassificationHead(nn.Module):\n def __init__(self):\n super(ClassificationHead, self).__init__()\n \n self.dropout = nn.Dropout(0.2)\n self.ff = nn.Linear(in_features=768, out_features=256)\n self.activation = torch.nn.ReLU()\n self.output = nn.Linear(in_features=256, out_features=1)\n self.sigmoid = nn.Sigmoid()\n \n @staticmethod\n def init_weights(m):\n if type(m) == torch.nn.Linear:\n y = m.in_features\n m.weight.data.normal_(mean=0.0, std=1/np.sqrt(y))\n m.bias.data.fill_(0)\n \n \n def forward(self, X):\n X = self.dropout(X)\n X = self.ff(X)\n X = self.activation(X)\n X = self.output(X)\n X = self.sigmoid(X)\n \n return X\n \n def inference(self, X):\n predictions = self.forwad(X)\n pred_classes = self.get_class(predictions)\n return pred_classes\n \n def get_class(self, predictions):\n _, pred_classes = predictions.max(dim=1)\n return pred_classes\n \n \nclass AspectExtractor(nn.Module):\n def __init__(self, transformer):\n super(AspectExtractor, self).__init__()\n\n self.transformer = transformer\n self.head = ClassificationHead()\n self.criterion = nn.BCELoss(reduction = 'none')\n \n self.freeze()\n \n def forward (self, X, attn_mask):\n X = self.transformer(X, attn_mask)\n X = X[0]\n X = self.head(X)\n\n return X\n \n def fit(self, X, attn_mask, Y):\n \n logits = self.forward(X, attn_mask).squeeze()\n loss = self.criterion(logits, Y)\n \n weight = Y.sum(dim=1)/attn_mask.sum(dim=1)\n \n predictions = logits > 0.5\n ncorrect = (predictions == Y).all(dim=1).sum().item()\n \n return loss, ncorrect\n \n def freeze(self):\n for param in self.transformer.parameters():\n param.requires_grad = False","repo_name":"jgdula/distilBERT-for-ABSA","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73627176514","text":"from __future__ import division\n\nfrom builtins import str\nfrom past.utils import old_div\nimport logging\n\nimport numpy as np\n\nimport psysmon\nimport psysmon.core.preferences_manager as preferences_manager\n\nimport matplotlib as mpl\nimport matplotlib.patches\nimport pyproj\n\n\nclass LocalizeCircle(psysmon.core.plugins.CommandPlugin):\n ''' Run a localization using the circle method.\n\n '''\n nodeClass = 'GraphicLocalizationNode'\n\n\n def __init__(self):\n ''' Initialize the instance.\n\n '''\n psysmon.core.plugins.CommandPlugin.__init__(self,\n name = 'circle method',\n category = 'localize',\n tags = ['localize', 'circle']\n )\n\n # The logging logger instance.\n self.logger = psysmon.get_logger(self)\n\n self.icons['active'] = psysmon.artwork.icons.iconsBlack16.localize_graphical_icon_16\n\n # Add the plugin preferences.\n pref_page = self.pref_manager.add_page('Preferences')\n ps_group = pref_page.add_group('phase selection')\n vm_group = pref_page.add_group('velocity model')\n\n item = psysmon.core.preferences_manager.MultiChoicePrefItem(name = 'p_phases',\n label = 'P phases',\n value = [],\n limit = [],\n tool_tip = 'Select the P phases to use for the localization.')\n ps_group.add_item(item)\n\n item = psysmon.core.preferences_manager.MultiChoicePrefItem(name = 's_phases',\n label = 'S phases',\n value = [],\n limit = [],\n tool_tip = 'Select the S phases to use for the localization.')\n ps_group.add_item(item)\n\n item = preferences_manager.IntegerSpinPrefItem(name = 'p_velocity',\n label = 'P velocity [m/s]',\n value = 5000,\n limit = (1, 100000),\n tool_tip = 'The P-wave velocity in m/s.')\n vm_group.add_item(item)\n\n\n # The plotted circles.\n self.circles = []\n\n\n def getHooks(self):\n ''' The callback hooks.\n '''\n hooks = {}\n\n hooks['shared_information_added'] = self.on_shared_information_added\n return hooks\n\n\n def on_shared_information_added(self, origin_rid, name):\n ''' Hook that is called when a shared information was added by a plugin.\n '''\n rid = '/plugin/select_picks'\n if origin_rid.endswith(rid) and name == 'selected_pick_catalog':\n self.update_phases()\n\n\n def update_phases(self):\n ''' Update the available P- and S-phases.\n '''\n # TODO: Get the selected pick catalog. The pick catalog should contain\n # only the picks of the selected event. This should be handled by the\n # select_picks plugin.\n # Check for the shared pick catalog.\n pick_info = self.parent.get_shared_info(name = 'selected_pick_catalog')\n if pick_info:\n if len(pick_info) > 1:\n raise RuntimeError(\"More than one pick catalog info was returned. This shouldn't happen.\")\n pick_info = pick_info[0]\n catalog = pick_info.value['catalog']\n else:\n self.logger.error(\"No selected pick catalog available. Can't continue the localization.\")\n return\n\n labels = list(set([x.label for x in catalog.picks]))\n p_phases = [x for x in labels if x.lower().startswith('p')]\n s_phases = [x for x in labels if x.lower().startswith('s')]\n self.pref_manager.set_limit('p_phases', p_phases)\n self.pref_manager.set_limit('s_phases', s_phases)\n # TODO: Check if the currently selected values are part of the\n # available limits. This should be done by the pref_manager fields.\n\n\n def run(self):\n ''' Run the circle method localization.\n '''\n self.logger.info(\"Localizing using the circle method.\")\n\n # Check for an existing 2D map view.\n map_view_name = self.rid[:self.rid.rfind('/') + 1] + 'map_view'\n map_view = self.parent.viewport.get_node(name = map_view_name)\n if map_view:\n # Check for the shared selected event.\n selected_event_info = self.parent.get_shared_info(origin_rid = self.parent.collection_node.rid + '/plugin/select_event',\n name = 'selected_event')\n if selected_event_info:\n if len(selected_event_info) > 1:\n raise RuntimeError(\"More than one event info was returned. This shouldn't happen.\")\n selected_event_info = selected_event_info[0]\n event_id = selected_event_info.value['id']\n else:\n self.logger.error(\"No selected event available. Can't continue the localization.\")\n return\n\n # Check for the shared pick catalog.\n pick_info = self.parent.get_shared_info(name = 'selected_pick_catalog')\n if pick_info:\n if len(pick_info) > 1:\n raise RuntimeError(\"More than one pick catalog info was returned. This shouldn't happen.\")\n pick_info = pick_info[0]\n pick_catalog = pick_info.value['catalog']\n else:\n self.logger.error(\"No selected pick catalog available. Can't continue the localization.\")\n return\n\n p_phases = self.pref_manager.get_value('p_phases')\n s_phases = self.pref_manager.get_value('s_phases')\n\n # Compute the epidistances.\n epidist = self.compute_epidist(event_id, pick_catalog, p_phases, s_phases)\n\n # Plot the circles into the map view axes.\n self.plot_circles(epidist)\n\n\n\n\n def compute_epidist(self, event_id, pick_catalog, p_phases, s_phases, stations = None):\n ''' Compute the epidistances.\n '''\n if not p_phases:\n self.logger.error(\"No P phases available. Can't continue with localization.\")\n return\n\n if not s_phases:\n self.logger.error(\"No S phases available. Can't continue with localization.\")\n return\n\n # Get the stations for which to compute the epidistances.\n if not stations:\n stations = self.parent.project.geometry_inventory.get_station()\n\n used_picks = []\n vp = self.pref_manager.get_value('p_velocity')\n epidist = {}\n for cur_station in stations:\n epidist[cur_station.name] = {}\n for cur_p_phase in p_phases:\n cur_p = pick_catalog.get_pick(event_id = event_id,\n label = cur_p_phase,\n station = cur_station.name)\n if not cur_p:\n self.logger.info(\"No pick found for event_id %d, station %s and label %s. Can't compute the S-P difference.\", event_id, cur_station.name, cur_p_phase)\n continue\n\n if len(cur_p) > 1:\n raise RuntimeError(\"More than one phase was returned for station %s, event_id %d and label %s. This shouldn't happen.\" % (cur_station.name, event_id, cur_p_phase))\n cur_p = cur_p[0]\n\n for cur_s_phase in s_phases:\n cur_s = pick_catalog.get_pick(event_id = event_id,\n label = cur_s_phase,\n station = cur_station.name)\n if not cur_s:\n self.logger.info(\"No pick found for event_id %d, station %s and label %s. Can't compute the S-P difference.\", event_id, cur_station.name, cur_s_phase)\n continue\n if len(cur_s) > 1:\n raise RuntimeError(\"More than one phase was returned for station %s, event_id %d and label %s. This shouldn't happen.\" % (cur_station.name, event_id, cur_s_phase))\n cur_s = cur_s[0]\n\n sp_diff = cur_s.time - cur_p.time\n cur_epidist = old_div(sp_diff * vp, (np.sqrt(3) - 1))\n epidist[cur_station.name][(cur_p.label, cur_s.label)] = cur_epidist\n\n used_picks.append(cur_p)\n used_picks.append(cur_s)\n\n used_data = {}\n used_data['picks'] = used_picks\n used_data['event_id'] = event_id\n self.parent.add_shared_info(origin_rid = self.rid,\n name = 'used_data',\n value = used_data)\n\n return epidist\n\n\n def plot_circles(self, epidist):\n ''' Plot the circles in the map view.\n '''\n # Get all map views.\n map_view_name = self.rid[:self.rid.rfind('/') + 1] + 'map_view'\n map_view = self.parent.viewport.get_node(name = map_view_name)\n\n for cur_view in map_view:\n proj = pyproj.Proj(init = cur_view.map_config['epsg'])\n\n # Remove existing circles from the view.\n circles_to_delete = [x for x in cur_view.axes.patches if x.get_gid() == self.rid]\n for cur_circle in circles_to_delete:\n cur_view.axes.patches.remove(cur_circle)\n\n for cur_station_name in epidist.keys():\n station = self.parent.project.geometry_inventory.get_station(name = cur_station_name)\n\n if not station:\n continue\n\n station = station[0]\n stat_lon, stat_lat = station.get_lon_lat()\n stat_x, stat_y = proj(stat_lon, stat_lat)\n for cur_key, cur_epidist in epidist[cur_station_name].items():\n label = cur_station_name + ':' + cur_key[0] + ':' + cur_key[1]\n circle = mpl.patches.Circle((stat_x, stat_y), radius = cur_epidist,\n fill = False, gid = self.rid, label = label)\n cur_view.axes.add_patch(circle)\n self.circles.append(circle)\n\n # Remove existing text from the axes.\n text_2_delete = [x for x in cur_view.axes.texts if x.get_gid() == self.rid]\n for cur_text in text_2_delete:\n cur_view.axes.texts.remove(cur_text)\n # Add the used velocity to the map.\n cur_view.axes.text(0.98, 0.02, 'circle vp = ' + str(self.pref_manager.get_value('p_velocity')) + ' m/s',\n ha = 'right', transform = cur_view.axes.transAxes, gid = self.rid)\n\n #cur_view.axes.autoscale(True)\n cur_view.draw()\n\n","repo_name":"stefanmaar/psysmon","sub_path":"lib/psysmon/packages/localize/plugins_localize_circle.py","file_name":"plugins_localize_circle.py","file_ext":"py","file_size_in_byte":11141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27444247773","text":"with open(0) as f:\n content = f.read()\n lines = content.splitlines()\n\ngrid = set()\nmx = 0\nfor line in lines:\n coords = line.split(\" -> \")\n a,b = map(int, coords[0].split(\",\"))\n mx = max(mx,b)\n for coord in coords[1:]:\n x,y = map(int, coord.split(\",\"))\n xx,yy = x, y\n if xx != a:\n if a > xx:\n a, xx = xx, a\n for i in range(a, xx+1):\n grid.add((i,b))\n elif yy != b:\n if b > yy:\n b, yy = yy, b\n for i in range(b, yy+1):\n grid.add((a,i))\n\n a = x\n b = y\n mx = max(mx,y)\n\nfor i in range(1000):\n grid.add((i,mx+2))\nans = 0\nfor i in range(100000):\n x = 500\n y = 0\n for _ in range(1000):\n\n if (x,y+1) in grid:\n if (x-1, y+1) not in grid:\n x -= 1\n y += 1\n elif (x+1, y+1) in grid:\n if (x,y) not in grid:\n ans += 1\n grid.add((x,y))\n break\n else:\n x += 1\n y += 1\n else:\n y += 1\n\nprint(ans)","repo_name":"allEyezOnCode/adventofcode","sub_path":"2022/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14623024448","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nfrom pathlib import Path\n\nimport eelbrain\nfrom matplotlib import pyplot\nimport mne\n\n\n# Data locations\nDATA_ROOT = Path(\"~\").expanduser() / 'Data' / 'Alice'\nSTIMULUS_DIR = DATA_ROOT / 'stimuli'\nSUBJECT = 'S01'\nSTIMULUS = '1'\n# Crop the data for this demonstration \nTSTOP = 5.001 # Python indexing is exclusive of the specified stop sample \n\n# Where to save the figure\nDST = DATA_ROOT / 'figures'\nDST.mkdir(exist_ok=True)\n\n# Configure the matplotlib figure style\nFONT = 'Arial'\nFONT_SIZE = 8\nRC = {\n 'figure.dpi': 100,\n 'savefig.dpi': 300,\n 'savefig.transparent': True,\n # Font\n 'font.family': 'sans-serif',\n 'font.sans-serif': FONT,\n 'font.size': FONT_SIZE,\n}\npyplot.rcParams.update(RC)\n# -\n\n# Create a raw object for the EEG data - does not yet load the data itself \nraw = mne.io.read_raw_fif(DATA_ROOT / 'eeg' / SUBJECT / f'{SUBJECT}_alice-raw.fif')\n# Extract only the events form the EEG data, and show the events table\nevents = eelbrain.load.fiff.events(raw)\nevents.head()\n\n# For this illustration, pick only the first stimulus\nevents = events.sub(f\"event == {STIMULUS!r}\")\n# Load the EEG data corresponding to this event; load only the first 5 seconds of data here\neeg = eelbrain.load.fiff.epochs(events, tmin=0, tstop=TSTOP)\n# Filter the data and resample it to 100 Hz\neeg = eelbrain.filter_data(eeg, 1, 20)\neeg = eelbrain.resample(eeg, 100)\n\n# +\n# Load the stimulus wave file\nwave = eelbrain.load.wav(STIMULUS_DIR / f'{STIMULUS}.wav')\n# Load the high-resolution gammatone spectrogram from disk (run predictors/make_gammatone.py to generate this file)\ngammatone = eelbrain.load.unpickle(STIMULUS_DIR / f'{STIMULUS}-gammatone.pickle')\n# Remove artifacts and apply a log transform to simulate compression in the auditory systems\ngammatone = gammatone.clip(0)\ngammatone = (gammatone + 1).log()\n\n# Crop the data\nwave = wave.sub(time=(0, TSTOP))\ngammatone = gammatone.sub(time=(0, TSTOP))\n# -\n\n# For discrete events, load the word table\nword_table = eelbrain.load.tsv(STIMULUS_DIR / 'AliceChapterOne-EEG.csv')\n# Restrict to the stimulus\nword_table = word_table.sub(f\"Segment == {STIMULUS}\")\n# As with the sounds, remove words that occured after TSTOP \nword_table = word_table.sub(f\"onset < {TSTOP}\")\nword_table.head()\n\n# +\n# Setup the figure layout\nfig, axes = pyplot.subplots(9, figsize=(7.5, 7), sharex=True, subplot_kw=dict(frame_on=False))\n\n# Plot the EEG data\neelbrain.plot.Butterfly(eeg, axes=axes[0], ylabel='EEG')\n\n# plot the sound wave; downsample it first to make plotting faster\nwave_rs = eelbrain.resample(wave, 1000)\neelbrain.plot.UTS(wave_rs, colors='k', axes=axes[1], ylabel='Sound\\nWave')\n\n# plot the full gammatone spectrogram\neelbrain.plot.Array(gammatone, axes=axes[2], ylabel='Gammatone\\nSpectrogram', vmax=25)\n\n# Generate an 8-band version of the spectrogram by averaging in frequency bins\ngammatone_8 = gammatone.bin(dim='frequency', nbins=8)\n# Resample it to match the EEG sampling rate\ngammatone_8 = eelbrain.resample(gammatone_8, 100)\neelbrain.plot.Array(gammatone_8, axes=axes[3], ylabel='Gammatone\\n8 Bands', interpolation='none', vmax=25)\n\n# Generate an envelope representation by summing across all frequency bands\ngammatone_envelope = gammatone.sum('frequency')\ngammatone_envelope = eelbrain.resample(gammatone_envelope, 100)\neelbrain.plot.UTS(gammatone_envelope, colors='red', axes=axes[4], ylabel='Gammatone\\nEnvelope')\n\n# Use an edge detector model to extract acoustic onsets from the gammatone spectrogram\ngammatone_onsets = eelbrain.edge_detector(gammatone, 30)\neelbrain.plot.Array(gammatone_onsets, axes=axes[5], ylabel='Acoustic\\nOnsets', interpolation='none')\n\n# Generate an impulse at every word onset. Use a time dimension with the same properties as the EEG data.\neeg_time = eelbrain.UTS(0, 1/100, 501)\nwords = eelbrain.NDVar.zeros(eeg_time)\nfor time in word_table['onset']:\n words[time] = 1\neelbrain.plot.UTS(words, stem=True, top=1.5, bottom=-0.5, colors='blue', axes=axes[6], ylabel='Words')\n\n# For illustration, add the words to the plot\nfor time, word in word_table.zip('onset', 'Word'):\n axes[6].text(time, -0.1, word, va='top', fontsize=8)\n\n# Generate an impulse at every word onset, scaled by a variable\nngram_surprisal = eelbrain.NDVar.zeros(eeg_time)\nfor time, value in word_table.zip('onset', 'NGRAM'):\n ngram_surprisal[time] = value\neelbrain.plot.UTS(ngram_surprisal, stem=True, colors='blue', axes=axes[7], ylabel='N-Gram')\n\n# Generate an alternative impulse, only at content word onsets, and scaled by a different variable\ncfg_surprisal = eelbrain.NDVar.zeros(eeg_time)\nfor time, value, is_lexical in word_table.zip('onset', 'NGRAM', 'IsLexical'):\n if is_lexical:\n cfg_surprisal[time] = value\neelbrain.plot.UTS(cfg_surprisal, stem=True, colors='blue', axes=axes[8], ylabel='N-Gram\\nLexical')\n\n# Fine-tune layout\nLAST = 8\nfor i, ax in enumerate(axes):\n if i == LAST:\n pass\n else:\n ax.set_xlabel(None)\n ax.set_yticks(())\n ax.tick_params(bottom=False)\n\nfig.tight_layout()\nfig.savefig(DST / 'Time series.pdf')\nfig.savefig(DST / 'Time series.png')\n","repo_name":"Eelbrain/Alice","sub_path":"figures/Time-series.py","file_name":"Time-series.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"72317653955","text":"from nnsim.module import Module\nfrom nnsim.module import ModuleList as ModuleL\nfrom nnsim.nnsimChannel import NoLatencyChannel as NLCh\nfrom nnsim.nnsimChannel import Channel as Ch\nfrom nnsim.nnsimLogicalManager import nnsimLogicalManager as LM\nfrom nnsim.nnsimRAM import SRAM\nimport sys\n\nRD = True\nWR = False\n\nclass nnsimSmartBuffer(Module):\n def instantiate(self, setup):\n self.class_name = 'nnsimSmartBuffer'\n self.debug = setup['debug']\n # ----------------------------------------------------------------\n # input channels\n # ----------------------------------------------------------------\n self.fill_data_ichns = setup['fill_data_ichns']\n self.update_data_ichns = setup['update_data_ichns']\\\n if 'update_data_ichns' in setup\\\n else None\n self.drain_enable_ichns = setup['drain_enable_ichns']\\\n if 'drain_enable_ichns' in setup\\\n else None\n # ----------------------------------------------------------------\n # output channels\n # ----------------------------------------------------------------\n self.drain_data_ochns = setup['drain_data_ochns']\n # ----------------------------------------------------------------\n # subcomponents\n # ---------------------------------------------------------------- \n self.logical_managers = ModuleL()\n self.memory = SRAM(setup['SRAM'])\n self.memory_width = setup['SRAM']['width']\n # ----------------------------------------------------------------\n # internal channels\n # ----------------------------------------------------------------\n self.ack_internal_chns = ModuleL()\n self.lm_fill_addr_internal_chns = ModuleL()\n self.lm_drain_addr_internal_chns = ModuleL()\n self.lm_update_addr_internal_chns = ModuleL()\n # ----------------------------------------------------------------\n # setup logical managers\n # ---------------------------------------------------------------- \n self.num_logical_managers = setup['num_logical_managers']\n for logical_unit_idx in range(self.num_logical_managers):\n lm_setup = {'debug': self.debug + ' (LM:'+ str(logical_unit_idx) + ')'}\n lm_setup['memory_width'] = self.memory_width\n lm_setup.update(setup['LMs'][logical_unit_idx].copy())\n # ------ >> ack channel\n self.ack_internal_chns.append(Ch())\n lm_setup['ack_ichn'] = self.ack_internal_chns[logical_unit_idx]\n # ------ >> fill address channel\n self.lm_fill_addr_internal_chns.append(Ch())\n lm_setup['fill_addr_ochn'] = self.lm_fill_addr_internal_chns[logical_unit_idx]\n # ------ >> drain address channel\n self.lm_drain_addr_internal_chns.append(Ch())\n lm_setup['drain_addr_ochn'] = self.lm_drain_addr_internal_chns[logical_unit_idx]\n # ------ >> update address channel\n if 'update' in setup['LMs'][logical_unit_idx]['AGs']:\n self.lm_update_addr_internal_chns.append(Ch())\n lm_setup['update_addr_ochn'] = self.lm_update_addr_internal_chns[logical_unit_idx]\n else:\n self.lm_update_addr_internal_chns.append(None)\n # ------ >> instantiate logical manager\n self.logical_managers.append(LM(lm_setup))\n # ------ >> assign unique class name for each logical manager\n self.logical_managers[logical_unit_idx].class_name = \\\n 'LM'+ str(logical_unit_idx) + '_' + self.logical_managers[logical_unit_idx].class_name \n \n # ----------------------------------------------------------------\n # internal records containers\n # ----------------------------------------------------------------\n # >> record for processed reads\n # used for retrieving read data information \n self.last_read = ModuleL() \n for sram_port_idx in range(self.memory.nports):\n self.last_read.append(NLCh(depth = 1))\n \n # used for detecting drains that can be forwarded\n self.approved_write_data_in_cycle = []\n for lm_idx in range(self.num_logical_managers):\n self.approved_write_data_in_cycle.append({'fill': None, 'update': None})\n \n # -----------------------------------------------------------------\n # Static and Runtime information\n # -----------------------------------------------------------------\n # definitin of compound actions\n self.setup_access_info()\n \n attrs_dict = setup['SRAM'].copy()\n attrs_dict.pop('port_type')\n self.attrs.update(attrs_dict)\n \n self.component_with_action = True\n \n def configure(self, config):\n for logical_unit_idx in range(self.num_logical_managers):\n self.logical_managers[logical_unit_idx].configure(config['LM'][logical_unit_idx])\n # ----------------------------------------------------------------\n # internal records configuration\n # -------------- -------------------------------------------------- \n # >> record for logical manager and sram ports correspondance\n self.lm_sram_map = config['lm_sram_map']\n \n def tick(self):\n self.ack_packet = []\n for i in range(self.num_logical_managers):\n self.ack_packet.append([])\n # ------------------------------------------------------------------\n # Check for unprocessed reads\n # ------------------------------------------------------------------\n for sram_port_idx in range(self.memory.nports):\n if self.last_read[sram_port_idx].valid():\n read_request_info = self.last_read[sram_port_idx].peek()\n# if 'IFmapGLB' in self.debug:\n# print(self.debug, read_request_info, 'ochn vacancy:', self.drain_data_ochns[read_request_info['lm_idx']].vacancy())\n if self.drain_data_ochns[read_request_info['lm_idx']].vacancy():\n if read_request_info['enabled']:\n if not read_request_info['forwarded']:\n read_data = [d for d in self.memory.response(port = sram_port_idx)] \n# if 'PsumGLB' in self.debug:\n# print('DDDDD ' ,self.debug, 'read from memory:', read_data, 'from ', read_request_info['addr'])\n else:\n read_data = read_request_info['forwarded_data']\n# if 'PsumGLB' in self.debug:\n# print('DDDDD ' ,self.debug, 'forwared read:', read_data, 'from ', read_request_info['addr'])\n else:\n read_data = [0] * self.memory.width\n# if 'PsumGLB' in self.debug:\n# print('DDDDD ' ,self.debug, 'gated data:', read_data, 'from ', read_request_info['addr'])\n self.last_read[sram_port_idx].pop()\n# if 'PsumGLB' in self.debug:\n# print(self.debug, 'port ', sram_port_idx, 'pop')\n self.drain_data_ochns[read_request_info['lm_idx']].push(read_data)\n\n # ------------------------------------------------------------------\n # Check for ready requests \n # ------------------------------------------------------------------ \n for lm_idx in range(self.num_logical_managers):\n self.check_for_update_request(lm_idx)\n self.check_for_fill_request(lm_idx)\n self.check_for_drain_request(lm_idx)\n if self.logical_managers[lm_idx].fill_round_done.rd()\\\n and self.logical_managers[lm_idx].drain_round_done.rd():\n self.logical_managers[lm_idx].reset_book_keeping_pointers()\n# print('round done')\n \n def check_for_drain_request(self, lm_idx): \n# if not self.lm_drain_addr_internal_chns[lm_idx].valid():\n# if 'IFmapGLB' in self.debug:\n# print('!!!!!', self.debug,' no drain request')\n# else:\n# if 'IFmapGLB' in self.debug:\n# print('!!!!!', self.debug,'wait to process request',self.lm_drain_addr_internal_chns[lm_idx].peek() )\n if self.lm_drain_addr_internal_chns[lm_idx].valid():\n \n request_info = self.logical_managers[lm_idx].check_request('drain')\n# print(request_info)\n# if 'IFmapGLB' in self.debug:\n# print(self.debug, request_info)\n\n if request_info['addr'] is not None:\n enable_signal_ready = True\n enabled = True\n enabled_buffer = False\n if self.drain_enable_ichns is not None and \\\n self.drain_enable_ichns[lm_idx] is not None:\n enable_signal_ready = self.drain_enable_ichns[lm_idx].valid()\n enabled_buffer = True\n if enable_signal_ready:\n enabled = self.drain_enable_ichns[lm_idx].peek()[0]\n# if 'IFmapGLB' in self.debug:\n# print(self.debug, request_info, 'enable signal:', enable_signal_ready) \n if enable_signal_ready:\n sram_rd_port = self.lm_sram_map[lm_idx]['drain']\n if self.last_read[sram_rd_port].vacancy() and not self.memory.port_in_use(sram_rd_port):\n# if 'PsumGLB' in self.debug:\n# print(self.debug, 'there is space and available prot for issuing read request')\n if enabled_buffer:\n self.drain_enable_ichns[lm_idx].pop()\n \n address = request_info['addr']\n self.lm_drain_addr_internal_chns[lm_idx].pop()\n \n if enabled and not request_info['forwarded']:\n self.memory.request(RD, address, port = sram_rd_port)\n \n if enabled and request_info['forwarded']:\n# print('forwarding data')\n if request_info['prereq'] == 'fill':\n forwarded_data = self.approved_write_data_in_cycle[lm_idx]['fill']\n if forwarded_data is None:\n print(self.debug, 'there is no fill data available for forwarding')\n sys.exit(0)\n elif request_info['prereq'] == 'update':\n forwarded_data = self.approved_write_data_in_cycle[lm_idx]['update']\n if forwarded_data is None:\n print(self.debug, 'there is no update data available for forwarding')\n sys.exit(0)\n else:\n print(self.debug, 'nowhere to find data being forwarded')\n sys.exit(0)\n else:\n forwarded_data = None\n \n self.last_read[sram_rd_port].push({'lm_idx': lm_idx,\\\n 'enabled': enabled,\\\n 'addr': address,\\\n 'forwarded': request_info['forwarded'],\\\n 'forwarded_data': forwarded_data})\n \n ack_packet = {'type': 'drain', 'addr': address, 'shrink': request_info['shrink'],\\\n 'prereq': request_info['prereq'], 'forwarded': request_info['forwarded'],\\\n 'reset_phy_head': request_info['reset_phy_head'] }\n# if 'IFmapGLB' in self.debug:\n# print('########## DRAIN #################', self.debug, request_info)\n self.logical_managers[lm_idx].update_book_keeping(ack_packet) \n\n # -------------------------------------------------\n # record access\n # ------------------------------------------------- \n if enabled : \n last_addr = self.last_read_addr[lm_idx]\n if request_info['forwarded']:\n arg_name = 'lm' + str(lm_idx) + '_nforwarded_drain'\n self.cycle_access[arg_name] += 1\n elif last_addr is not None and last_addr == address:\n arg_name = 'lm' + str(lm_idx) + '_nrepeated_drain'\n self.cycle_access[arg_name] +=1\n else:\n arg_name = 'lm' + str(lm_idx) + '_ndrain'\n self.last_read_addr[lm_idx] = address\n self.cycle_access[arg_name] +=1\n else:\n arg_name = 'lm' + str(lm_idx) + '_ngated_drain'\n self.cycle_access[arg_name] +=1\n \n# else:\n# if 'PsumGLB' in self.debug:\n# print(self.debug, 'rd_port:', sram_rd_port, 'last_read_vacant:', self.last_read[sram_rd_port].vacancy(), \\\n# 'port_in_use:', self.memory.port_in_use(sram_rd_port))\n \n\n def check_for_update_request(self, lm_idx):\n if self.logical_managers[lm_idx].update_AG is None:\n return\n# if 'PsumGLB' in self.debug:\n# stop = 1\n if (self.update_data_ichns[lm_idx].valid() and self.lm_update_addr_internal_chns[lm_idx].valid()) or \\\n (self.lm_update_addr_internal_chns[lm_idx].valid() and self.lm_update_addr_internal_chns[lm_idx].peek()['addr'] == 'reset_phead' ):\n request_info = self.logical_managers[lm_idx].check_request('update')\n# if 'PsumGLB' in self.debug:\n# print(self.debug, request_info)\n if request_info['addr'] is not None:\n sram_wr_port = self.lm_sram_map[lm_idx]['update']\n if not self.memory.port_in_use(sram_wr_port):\n address = request_info['addr']\n data = self.update_data_ichns[lm_idx].pop()\n# if 'PsumGLB' in self.debug:\n# print(self.debug, request_info, 'data for update:', data)\n self.approved_write_data_in_cycle[lm_idx]['update'] = data\n self.lm_update_addr_internal_chns[lm_idx].pop()\n self.memory.request(WR, address, data, port = sram_wr_port)\n ack_packet = {'type': 'update', 'addr': address, 'reset_phy_head': request_info['reset_phy_head']}\n self.logical_managers[lm_idx].update_book_keeping(ack_packet) \n # ---------------------------------------\n # record access\n # --------------------------------------- \n if not data == self.last_write_data[lm_idx]:\n arg_name = 'lm' + str(lm_idx) + '_nupdate'\n else:\n arg_name = 'lm' + str(lm_idx) + '_nrepeated_data_update'\n \n self.cycle_access[arg_name] += 1 \n# self.curr_write_addr[lm_idx] = address\n self.last_write_data[lm_idx] = data\n \n if self.traces_stats == 'show':\n self.recorder.record(self.debug + '_update.txt', data[0])\n else:\n # if the write port is not avaiable, overwrite the logical manager's information\n self.logical_managers[lm_idx].approved_write_addr_in_cycle['update'] = None\n# else:\n# if 'PsumGLB' in self.debug:\n# print('update_data:', self.update_data_ichns[lm_idx].valid())\n\n \n def check_for_fill_request(self, lm_idx):\n if (self.fill_data_ichns[lm_idx].valid() and self.lm_fill_addr_internal_chns[lm_idx].valid()) or \\\n (self.lm_fill_addr_internal_chns[lm_idx].valid() and self.lm_fill_addr_internal_chns[lm_idx].peek()['addr'] == 'reset_phead' ):\n request_info = self.logical_managers[lm_idx].check_request('fill')\n if request_info['addr'] is not None:\n sram_wr_port = self.lm_sram_map[lm_idx]['fill']\n if not self.memory.port_in_use(sram_wr_port):\n address = request_info['addr']\n data = self.fill_data_ichns[lm_idx].pop()\n self.approved_write_data_in_cycle[lm_idx]['fill'] = data\n self.lm_fill_addr_internal_chns[lm_idx].pop()\n# print(self.debug, data)\n self.memory.request(WR, address, data, port = sram_wr_port)\n ack_packet = {'type': 'fill', 'addr': address, 'reset_phy_head':request_info['reset_phy_head']}\n self.logical_managers[lm_idx].update_book_keeping(ack_packet)\n# if 'IFmapGLB' in self.debug:\n# print(self.debug, request_info)\n \n # ---------------------------------------\n # record access\n # --------------------------------------- \n if not data == self.last_write_data[lm_idx]:\n arg_name = 'lm' + str(lm_idx) + '_nfill'\n else:\n arg_name = 'lm' + str(lm_idx) + '_nrepeated_data_fill'\n self.cycle_access[arg_name] += 1\n \n# self.curr_write_addr[lm_idx] = address\n self.last_write_data[lm_idx] = data\n \n # -------------------------------------------\n # record filled data\n # -------------------------------------------\n if self.traces_stats == 'show':\n self.recorder.record(self.debug + '_fill.txt', data[0])\n else:\n # if the write port is not avaiable, overwrite the logical manager's information\n self.logical_managers[lm_idx].approved_write_addr_in_cycle['fill'] = None\n \n def setup_access_info(self):\n \n # =====================================================================\n # construct action description of the compound action: buffer access\n # ===================================================================== \n self.customized_access = True\n self.arg_lst = [] # top level action attributes \n total_nread = [] # all drain related counts: aggregated for memory\n total_nwrite = [] # all fill related counts: aggregated for memory\n total_nrepeated_read = [] # all repeated drain related counts: aggregated for memory\n total_nrepeated_data_write = [] # all repeated data write counts\n subcomp_class_actions = {} # all related subcomponent class actions \n \n # ---------------------------------------------------------------------\n # Collect related information from each logical manager\n # --------------------------------------------------------------------- \n for lm_idx in range(self.num_logical_managers):\n \n lm_ndrain = 'lm' + str(lm_idx) + '_ndrain'\n lm_nfill = 'lm' + str(lm_idx) + '_nfill'\n lm_nupdate = 'lm' + str(lm_idx) + '_nupdate'\n lm_nrepeated_data_fill = 'lm' + str(lm_idx) + '_nrepeated_data_fill'\n lm_nrepeated_data_update = 'lm' + str(lm_idx) + '_nrepeated_data_update'\n lm_nrepeated_drain = 'lm' + str(lm_idx) + '_nrepeated_drain'\n lm_nforwarded_drain = 'lm' + str(lm_idx) + '_nforwarded_drain'\n lm_ngated_drain = 'lm' + str(lm_idx) + '_ngated_drain'\n \n self.arg_lst.append(lm_ndrain)\n self.arg_lst.append(lm_nfill)\n self.arg_lst.append(lm_nupdate)\n self.arg_lst.append(lm_nrepeated_data_fill)\n self.arg_lst.append(lm_nrepeated_data_update)\n self.arg_lst.append(lm_nrepeated_drain)\n self.arg_lst.append(lm_nforwarded_drain)\n self.arg_lst.append(lm_ngated_drain)\n \n # sum up the access that will point into RAM\n # gated and forwarded drain does not invovle actual RAM accesses\n total_nread.append(lm_ndrain)\n total_nwrite.append(lm_nfill)\n total_nwrite.append(lm_nupdate)\n total_nrepeated_read.append(lm_nrepeated_drain)\n total_nrepeated_data_write.append(lm_nrepeated_data_fill)\n total_nrepeated_data_write.append(lm_nrepeated_data_update)\n \n # ---------------------------------------------------------------------------\n # Collect related information for the address generators, embedded in LMs\n # --------------------------------------------------------------------------- \n subcomp_class_actions.update({self.logical_managers[lm_idx].class_name:\\\n {'action_name': 'generate',\\\n 'arguments': [lm_ndrain, lm_ngated_drain, lm_nfill, lm_nupdate, lm_nrepeated_data_update, lm_nrepeated_data_fill ],\\\n 'repeat': {'sum':[lm_ndrain, lm_ngated_drain, lm_nfill, lm_nupdate, lm_nrepeated_data_update, lm_nrepeated_data_fill]}}\\\n })\n \n # ---------------------------------------------------------------------\n # Collect related information for the channel subcomponent (move to top level/serializer/deserializer)\n # --------------------------------------------------------------------- \n self.fill_data_ichns[lm_idx].base_class_name = 'channel'\n self.fill_data_ichns[lm_idx].component_class_as_subclass = 'show'\n \n self.drain_data_ochns[lm_idx].base_class_name = 'channel'\n self.drain_data_ochns[lm_idx].component_class_as_subclass = 'show'\n \n self.fill_data_ichns[lm_idx].class_name = 'lm_' + str(lm_idx) + '_fill_chn' \n self.drain_data_ochns[lm_idx].class_name = 'lm_' + str(lm_idx) + '_drain_chn' \n \n\n \n \n subcomp_class_actions.update({self.fill_data_ichns[lm_idx].class_name: {'action_name': 'access',\\\n 'repeat': lm_nfill}})\n subcomp_class_actions.update({self.drain_data_ochns[lm_idx].class_name: {'action_name': 'access',\\\n 'repeat': {'sum':[lm_ndrain, lm_nrepeated_drain]}}}) \n \n if self.update_data_ichns is not None and self.update_data_ichns[lm_idx] is not None:\n \n self.update_data_ichns[lm_idx].base_class_name = 'channel'\n self.update_data_ichns[lm_idx].component_class_as_subclass = 'show'\n \n self.update_data_ichns[lm_idx].class_name = 'lm_' + str(lm_idx) + '_update_chn' \n \n subcomp_class_actions.update({self.update_data_ichns[lm_idx].class_name: {'action_name': 'access',\\\n 'repeat': lm_nupdate}}) \n \n \n \n # ---------------------------------------------------------------------\n # Collect related information for the memory subcomponent\n # --------------------------------------------------------------------- \n subcomp_class_actions.update({self.memory.class_name: {'action_name': 'RAM_access',\\\n 'arguments': [{'sum': total_nread}, \\\n {'sum': total_nwrite}, \\\n {'sum': total_nrepeated_read},\\\n {'sum': total_nrepeated_data_write}],\\\n 'repeat': 1}})\n # ---------------------------------------------------------------------\n # Define all of the actions using collected info\n # ---------------------------------------------------------------------\n \n smartbuffer_access_action_def = {'arguments': self.arg_lst,\\\n 'subcomponent_class_actions': subcomp_class_actions}\n \n idle_access_action_def = {'subcomponent_class_actions': \\\n {self.memory.class_name: {'action_name': 'idle','repeat': 1}}} \n \n self.actions = {'idle': idle_access_action_def,\\\n 'buffer_access': smartbuffer_access_action_def}\n \n # =====================================================================\n # construct containers for recording access counts\n # ===================================================================== \n self.last_read_addr = [None] * self.num_logical_managers\n self.last_write_addr = [None] * self.num_logical_managers\n self.last_write_data = [None] * self.num_logical_managers\n self.curr_write_addr = [None] * self.num_logical_managers\n self.access_stats = {'buffer_access':[], 'idle':{'count': 0}}\n self.cycle_access = {}\n self.raw_access_stats = {'buffer_access':{}}\n self.reset_cycle_access()\n \n def reset_cycle_access(self):\n for arg in self.arg_lst:\n self.cycle_access.update({arg:0})\n \n \n def __ntick__(self):\n \n Module.__ntick__(self)\n \n idle = True\n for arg , value in self.cycle_access.items():\n if value > 0:\n idle = False\n break\n if idle:\n self.access_stats['idle']['count'] += 1\n else:\n cycle_access_lst = []\n for arg in self.arg_lst:\n cycle_access_lst.append(self.cycle_access[arg])\n cycle_access_tuple = tuple(cycle_access_lst)\n if cycle_access_tuple[6] == 1:\n self.access_stats['idle']['count'] += 1\n if cycle_access_tuple in self.raw_access_stats['buffer_access']:\n self.raw_access_stats['buffer_access'][cycle_access_tuple] += 1\n else:\n self.raw_access_stats['buffer_access'][cycle_access_tuple] = 1\n self.reset_cycle_access()\n \n for lm_idx in range(self.num_logical_managers):\n self.last_write_addr[lm_idx] = self.curr_write_addr[lm_idx]\n \n self.logical_managers[lm_idx].approved_write_addr_in_cycle['fill'] = None \n self.logical_managers[lm_idx].approved_write_addr_in_cycle['update'] = None \n \n self.approved_write_data_in_cycle[lm_idx]['fill'] = None\n self.approved_write_data_in_cycle[lm_idx]['update'] = None\n \n def summerize_access_stats(self):\n for access_info_tuple, count in self.raw_access_stats['buffer_access'].items():\n arg_dict = {}\n arg_idx = 0\n for arg_name in self.arg_lst:\n arg_dict[arg_name] = access_info_tuple[arg_idx]\n arg_idx += 1\n access_info_dict = {'arguments': arg_dict, 'count': count}\n self.access_stats['buffer_access'].append(access_info_dict.copy())\n \n \n \n \n \n \n \n \n \n \n ","repo_name":"emma-mens/dl-hardware-project","sub_path":"tools/nnsim/nnsim/nnsimSmartBuffer.py","file_name":"nnsimSmartBuffer.py","file_ext":"py","file_size_in_byte":29493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17624198393","text":"import logging\nfrom cgi import escape\nfrom collections import OrderedDict\nfrom itertools import islice\nfrom AccessControl import Unauthorized\nfrom Products.ZenUtils.Ext import DirectResponse\nfrom Products.ZenUtils.Utils import getDisplayType\nfrom Products.ZenUtils.jsonutils import unjson\nfrom Products import Zuul\nfrom Products.ZenModel.Device import Device\nfrom Products.ZenModel.ZenossSecurity import ZEN_CHANGE_DEVICE_PRODSTATE, ZEN_MANAGE_DMD, \\\n ZEN_ADMIN_DEVICE, ZEN_MANAGE_DEVICE, ZEN_DELETE_DEVICE\nfrom Products.Zuul import filterUidsByPermission\nfrom Products.Zuul.facades import ObjectNotFoundException\nfrom Products.Zuul.routers import TreeRouter\nfrom Products.Zuul.exceptions import DatapointNameConfict\nfrom Products.Zuul.catalog.events import IndexingEvent\nfrom Products.Zuul.form.interfaces import IFormBuilder\nfrom Products.Zuul.decorators import require, contextRequire, serviceConnectionError\nfrom Products.ZenUtils.guid.interfaces import IGlobalIdentifier, IGUIDManager\nfrom Products.ZenUtils.Utils import getPasswordFields, maskSecureProperties\nfrom Products.ZenMessaging.audit import audit\nfrom zope.event import notify\n\nlog = logging.getLogger('zen.Zuul')\n\nclass DeviceRouter(TreeRouter):\n \"\"\"\n A JSON/ExtDirect interface to operations on devices\n \"\"\"\n\n @serviceConnectionError\n @contextRequire(\"Manage DMD\", 'contextUid')\n def addDeviceClassNode(self, type, contextUid, id, description=None, connectionInfo=None):\n \"\"\"\n Adds a new device class organizer specified by the parameter id to\n the parent organizer specified by contextUid.\n\n contextUid must be a path to a DeviceClass.\n\n @type type: string\n @param type: Node type (always 'organizer' in this case)\n @type contextUid: string\n @param contextUid: Path to the location organizer that will\n be the new node's parent (ex. /zport/dmd/Devices/)\n @type id: string\n @param id: The identifier of the new node\n @type description: string\n @param description: (optional) Describes the new device class\n @type connectionInfo: list\n @param connectionInfo: (optional) List of zproperties that constitute credentials for this device classs\n @rtype: dictionary\n @return: B{Properties}:\n - success: (bool) Success of node creation\n - nodeConfig: (dictionary) The new device class's properties\n \"\"\"\n facade = self._getFacade()\n organizer = facade.addDeviceClass(contextUid,\n id,\n description,\n connectionInfo)\n uid = organizer.uid\n\n treeNode = facade.getTree(uid)\n audit('UI.DeviceClass.Add', uid, description=description, connectionInfo=connectionInfo)\n return DirectResponse.succeed(\"Device Class Added\", nodeConfig=Zuul.marshal(treeNode))\n\n\n @serviceConnectionError\n @contextRequire(\"Manage DMD\", 'contextUid')\n def addLocationNode(self, type, contextUid, id,\n description=None, address=None):\n \"\"\"\n Adds a new location organizer specified by the parameter id to\n the parent organizer specified by contextUid.\n\n contextUid must be a path to a Location.\n\n @type type: string\n @param type: Node type (always 'organizer' in this case)\n @type contextUid: string\n @param contextUid: Path to the location organizer that will\n be the new node's parent (ex. /zport/dmd/Devices/Locations)\n @type id: string\n @param id: The identifier of the new node\n @type description: string\n @param description: (optional) Describes the new location\n @type address: string\n @param address: (optional) Physical address of the new location\n @rtype: dictionary\n @return: B{Properties}:\n - success: (bool) Success of node creation\n - nodeConfig: (dictionary) The new location's properties\n \"\"\"\n facade = self._getFacade()\n organizer = facade.addLocationOrganizer(contextUid,\n id,\n description,\n address)\n uid = organizer.uid\n\n treeNode = facade.getTree(uid)\n audit('UI.Location.Add', uid, description=description, address=address)\n return DirectResponse.succeed(\"Location added\", nodeConfig=Zuul.marshal(treeNode))\n\n def _getFacade(self):\n return Zuul.getFacade('device', self.context)\n\n @serviceConnectionError\n def getTree(self, id):\n \"\"\"\n Returns the tree structure of an organizer hierarchy where\n the root node is the organizer identified by the id parameter.\n\n @type id: string\n @param id: Id of the root node of the tree to be returned\n @rtype: [dictionary]\n @return: Object representing the tree\n \"\"\"\n facade = self._getFacade()\n tree = facade.getTree(id)\n data = Zuul.marshal(tree)\n return [data]\n\n @serviceConnectionError\n def getComponents(self, uid=None, meta_type=None, keys=None, start=0,\n limit=50, page=0, sort='name', dir='ASC', name=None):\n \"\"\"\n Retrieves all of the components at a given UID. This method\n allows for pagination.\n\n @type uid: string\n @param uid: Unique identifier of the device whose components are\n being retrieved\n @type meta_type: string\n @param meta_type: (optional) The meta type of the components to be\n retrieved (default: None)\n @type keys: list\n @param keys: (optional) List of keys to include in the returned\n dictionary. If None then all keys will be returned\n (default: None)\n @type start: integer\n @param start: (optional) Offset to return the results from; used in\n pagination (default: 0)\n @type limit: integer\n @param limit: (optional) Number of items to return; used in pagination\n (default: 50)\n @type sort: string\n @param sort: (optional) Key on which to sort the return results;\n (default: 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type name: regex\n @param name: (optional) Used to filter the results (default: None)\n @rtype: DirectResponse\n @return: B{Properties}:\n - data: (dictionary) The components returned\n - totalCount: (integer) Number of items returned\n - hash: (string) Hashcheck of the current component state (to check\n whether components have changed since last query)\n \"\"\"\n facade = self._getFacade()\n if name:\n # Load every component if we have a filter\n limit = None\n comps = facade.getComponents(uid, meta_type=meta_type, start=start,\n limit=limit, sort=sort, dir=dir, name=name, keys=keys)\n total = comps.total\n hash = comps.hash_\n\n data = Zuul.marshal(comps, keys=keys)\n return DirectResponse(data=data, totalCount=total,\n hash=hash)\n\n def getComponentTree(self, uid=None, id=None, sorting_dict=None):\n \"\"\"\n Retrieves all of the components set up to be used in a\n tree.\n\n @type uid: string\n @param uid: Unique identifier of the root of the tree to retrieve\n @type id: string\n @param id: not used\n @rtype: [dictionary]\n @return: Component properties in tree form\n \"\"\"\n if id:\n uid = id\n facade = self._getFacade()\n data = facade.getComponentTree(uid)\n sevs = [c[0].lower() for c in\n self.context.ZenEventManager.severityConversions]\n data.sort(cmp=lambda a, b: cmp(sevs.index(a['severity']),\n sevs.index(b['severity'])))\n result = []\n for datum in data:\n result.append(dict(\n id=datum['type'],\n path='Components/%s' % datum['type'],\n text={\n 'text': datum['type'],\n 'count': datum['count'],\n 'description': 'components'},\n iconCls='tree-severity-icon-small-' + datum['severity'],\n leaf=True))\n if sorting_dict:\n sorting_keys_list = [key for key in sorting_dict.iterkeys()]\n def cmp_items(first, second):\n # Resolving keys from a dictionary of given names convention\n x = str(first['text']['text'])\n y = str(second['text']['text'])\n if x in sorting_keys_list:\n x = sorting_dict[x][0]\n if y in sorting_keys_list:\n y = sorting_dict[y][0]\n if x < y:\n return -1\n elif x > y:\n return 1\n else:\n return 0\n result.sort(cmp=cmp_items)\n return result\n\n def findComponentIndex(self, componentUid, uid=None, meta_type=None,\n sort='name', dir='ASC', name=None, **kwargs):\n \"\"\"\n Given a component uid and the component search criteria, this retrieves\n the position of the component in the results.\n\n @type componentUid: string\n @param componentUid: Unique identifier of the component whose index\n to return\n @type uid: string\n @param uid: Unique identifier of the device queried for components\n @type meta_type: string\n @param meta_type: (optional) The meta type of the components to retrieve\n (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return results (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type name: regex\n @param name: (optional) Used to filter the results (default: None)\n @rtype: DirectResponse\n @return: B{Properties}:\n - index: (integer) Index of the component\n \"\"\"\n facade = self._getFacade()\n i = facade.findComponentIndex(componentUid, uid,\n meta_type, sort, dir, name)\n return DirectResponse(index=i)\n\n @serviceConnectionError\n def getForm(self, uid):\n \"\"\"\n Given an object identifier, this returns all of the editable fields\n on that object as well as their ExtJs xtype that one would\n use on a client side form.\n\n @type uid: string\n @param uid: Unique identifier of an object\n @rtype: DirectResponse\n @return: B{Properties}\n - form: (dictionary) form fields for the object\n \"\"\"\n info = self._getFacade().getInfo(uid)\n form = IFormBuilder(info).render(fieldsets=False)\n form = Zuul.marshal(form)\n return DirectResponse(form=form)\n\n @serviceConnectionError\n def getInfo(self, uid, keys=None):\n \"\"\"\n Get the properties of a device or device organizer\n\n @type uid: string\n @param uid: Unique identifier of an object\n @type keys: list\n @param keys: (optional) List of keys to include in the returned\n dictionary. If None then all keys will be returned\n (default: None)\n @rtype: DirectResponse\n @return: B{Properties}\n - data: (dictionary) Object properties\n - disabled: (bool) If current user doesn't have permission to use setInfo\n \"\"\"\n facade = self._getFacade()\n process = facade.getInfo(uid)\n secure_properties = getPasswordFields(process)\n data = Zuul.marshal(process, keys)\n maskSecureProperties(data, secure_properties)\n disabled = not Zuul.checkPermission('Manage DMD', self.context)\n return DirectResponse(data=data, disabled=disabled)\n\n @serviceConnectionError\n def setInfo(self, **data):\n \"\"\"\n Set attributes on a device or device organizer.\n This method accepts any keyword argument for the property that you wish\n to set. The only required property is \"uid\".\n\n @type uid: string\n @keyword uid: Unique identifier of an object\n @rtype: DirectResponse\n \"\"\"\n facade = self._getFacade()\n if not (\n Zuul.checkPermission(ZEN_MANAGE_DEVICE, self.context)\n or (\n Zuul.checkPermission(ZEN_CHANGE_DEVICE_PRODSTATE, self.context)\n and sorted(data.keys()) == ['productionState', 'uid']\n )\n or Zuul.checkAdministeredObjectPermission(\n data.get('uid'), ZEN_MANAGE_DMD, self.context\n )\n ):\n raise Exception('You do not have permission to save changes.')\n\n the_uid = data['uid'] # gets deleted\n process = facade.getInfo(the_uid)\n oldData = self._getInfoData(process, data.keys())\n Zuul.unmarshal(data, process)\n newData = self._getInfoData(process, data.keys())\n # reindex the object if necessary\n if hasattr(process._object, 'index_object'):\n process._object.index_object()\n\n # Ex: ('UI.Device.Edit', uid, data_={'productionState': 'High'})\n # Ex: ('UI.Location.Edit', uid, description='Blah', old_description='Foo')\n if 'name' in oldData:\n oldData['device_name'] = oldData['name'] # we call it this now\n del oldData['name']\n if 'name' in newData:\n del newData['name'] # it gets printed automatically\n if isinstance(process._object, Device):\n # ZEN-2837, ZEN-247: Audit names instead of numbers\n dmd = self.context\n if 'productionState' in oldData:\n oldData['productionState'] = dmd.convertProdState(oldData['productionState'])\n if 'productionState' in newData:\n newData['productionState'] = dmd.convertProdState(newData['productionState'])\n if 'priority' in oldData:\n oldData['priority'] = dmd.convertPriority(oldData['priority'])\n if 'priority' in newData:\n newData['priority'] = dmd.convertPriority(newData['priority'])\n audit(['UI', getDisplayType(process._object), 'Edit'], the_uid,\n data_=newData, oldData_=oldData, skipFields_='uid')\n return DirectResponse.succeed()\n\n def _getInfoData(self, info, keys):\n # TODO: generalize this code for all object types, if possible.\n values = {}\n for key in keys:\n val = getattr(info, key, None)\n if val is not None:\n values[key] = str(val) # unmutable copy\n return values\n\n @require('Manage Device')\n def resumeCollection(self, id):\n # argument 1 is actually uid but is passed as the \"id\" keyword\n return self._getFacade().resumeCollection(id)\n\n @require('Manage Device')\n def setProductInfo(self, uid, **data):\n \"\"\"\n Sets the ProductInfo on a device. This method has the following valid\n keyword arguments:\n\n @type uid: string\n @keyword uid: Unique identifier of a device\n @type hwManufacturer: string\n @keyword hwManufacturer: Hardware manufacturer\n @type hwProductName: string\n @keyword hwProductName: Hardware product name\n @type osManufacturer: string\n @keyword osManufacturer: Operating system manufacturer\n @type osProductName: string\n @keyword osProductName: Operating system product name\n @rtype: DirectResponse\n \"\"\"\n facade = self._getFacade()\n facade.setProductInfo(uid, **data)\n audit('UI.Device.Edit', uid, data_=data)\n return DirectResponse()\n\n def getDeviceUuidsByName(self, query=\"\", start=0, limit=25, page=1, uuid=None):\n \"\"\"\n Retrieves a list of device uuids. For use in combos.\n If uuid is set, ensures that it is included in the returned list.\n \"\"\"\n facade = self._getFacade()\n query = \"*\" if not query else query\n devices = facade.getDevices(params={'name':query}) # TODO: pass start=start, limit=limit\n result = [{'name':escape(dev.name),\n 'uuid':IGlobalIdentifier(dev._object).getGUID()}\n for dev in devices]\n\n if uuid and uuid not in (device['uuid'] for device in result):\n guidManager = IGUIDManager(self.context.dmd)\n device = guidManager.getObject(uuid)\n if device:\n result.append({'name':escape(device.name()), 'uuid':uuid})\n\n return DirectResponse.succeed(data=result)\n\n def uuidExists(self, uuid):\n \"\"\"\n Return boolean for existence of object with provided uuid.\n Note that object type is not validated.\n\n @type uuid: string\n @param uuid: UUID to check for existence\n @rtype: DirectResponse\n \"\"\"\n if IGUIDManager(self.context.dmd).getObject(uuid):\n return DirectResponse.succeed()\n else:\n return DirectResponse.fail()\n\n def getDeviceUids(self, uid):\n \"\"\"\n Return a list of device uids underneath an organizer. This includes\n all the devices belonging to an child organizers.\n\n @type uid: string\n @param uid: Unique identifier of the organizer to get devices from\n @rtype: DirectResponse\n @return: B{Properties}:\n - devices: (list) device uids\n \"\"\"\n facade = self._getFacade()\n uids = facade.getDeviceUids(uid)\n return DirectResponse.succeed(devices=uids)\n\n @serviceConnectionError\n def getDevices(self, uid=None, start=0, params=None, limit=50, sort='name',\n page=None,\n dir='ASC', keys=None):\n \"\"\"\n Retrieves a list of devices. This method supports pagination.\n\n @type uid: string\n @param uid: Unique identifier of the organizer to get devices from\n @type start: integer\n @param start: (optional) Offset to return the results from; used in\n pagination (default: 0)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type limit: integer\n @param limit: (optional) Number of items to return; used in pagination\n (default: 50)\n @type sort: string\n @param sort: (optional) Key on which to sort the return results (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: B{Properties}:\n - devices: (list) Dictionaries of device properties\n - totalCount: (integer) Number of devices returned\n - hash: (string) Hashcheck of the current device state (to check\n whether devices have changed since last query)\n \"\"\"\n facade = self._getFacade()\n if isinstance(params, basestring):\n params = unjson(params)\n\n if params:\n # clearing most often issue\n # - anterior asterisk wildcard (one or more)\n params = {key:(value.lstrip('*') if isinstance(value, str) else value)\n for key, value in params.iteritems()}\n\n devices = facade.getDevices(uid, start, limit, sort, dir, params)\n allKeys = ['name', 'ipAddress', 'productionState', 'events',\n 'ipAddressString', 'serialNumber', 'hwManufacturer',\n 'hwModel', 'osModel', 'osManufacturer', 'collector',\n 'priority', 'systems', 'groups', 'location',\n 'pythonClass', 'tagNumber']\n usedKeys = keys or allKeys\n if not 'uid' in usedKeys:\n usedKeys.append('uid')\n\n data = Zuul.marshal(devices.results, usedKeys)\n\n return DirectResponse(devices=data, totalCount=devices.total,\n hash=devices.hash_)\n\n def renameDevice(self, uid, newId, retainGraphData=False):\n \"\"\"\n Set the device specified by the uid,\"uid\" to have the\n the id \"newId\"\n This will raise an exception if it fails.\n\n @type uid: string\n @param uid: The unique id of the device we are renaming\n @type newId: string\n @param newId: string of the new id\n \"\"\"\n facade = self._getFacade()\n newUid = facade.renameDevice(uid, newId, retainGraphData)\n return DirectResponse.succeed(uid=newUid)\n\n def doesMoveRequireRemodel(self, uid, target):\n \"\"\"\n Determine if the device will need to be remodeled if it is moved.\n\n @type uid: string\n @param uid: Uid of device in current location\n @type target: string\n @param target: Uid of the organizer to move the device to\n \"\"\"\n facade = self._getFacade()\n remodelRequired = facade.doesMoveRequireRemodel(uid, target)\n return DirectResponse.succeed(remodelRequired=remodelRequired)\n\n def moveDevices(self, uids, target, hashcheck=None, ranges=(), uid=None,\n params=None, sort='name', dir='ASC', asynchronous=True):\n \"\"\"\n Moves the devices specified by uids to the organizer specified by 'target'.\n\n @type uids: [string]\n @param uids: List of device uids to move\n @type target: string\n @param target: Uid of the organizer to move the devices to\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: B{Properties}:\n - tree: ([dictionary]) Object representing the new device tree\n - exports: (integer) Number of devices moved\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n uids = filterUidsByPermission(self.context.dmd, ZEN_MANAGE_DEVICE, uids)\n facade = self._getFacade()\n\n # In order to display the device name and old location/device class,\n # we must audit first. This means it's possible we can audit a change\n # then the command fails, unfortunately.\n # example: audit('UI.Device.ChangeLocation', uid, location=..., old_location=...)\n targetType = getDisplayType(facade._getObject(target))\n autoRemovalTypes = ('DeviceClass', 'Location')\n action = ('Change' if targetType in autoRemovalTypes else 'AddTo') + targetType\n for uid in uids:\n oldData = {}\n if targetType == 'Location': # get old location\n location = facade._getObject(uid).location()\n locationPath = location.getPrimaryId() if location else ''\n oldData[targetType] = locationPath\n elif targetType == 'DeviceClass':\n deviceClass = facade._getObject(uid).deviceClass()\n deviceClassPath = deviceClass.getPrimaryId() if deviceClass else ''\n oldData[targetType] = deviceClassPath\n audit(['UI.Device', action], uid,\n data_={targetType:target}, oldData_=oldData)\n try:\n targetObj = facade._getObject(target)\n if Zuul.checkPermission(ZEN_ADMIN_DEVICE, targetObj):\n result = facade.moveDevices(uids, target, asynchronous=asynchronous)\n else:\n return DirectResponse.fail(msg='User does not have permissions to move devices to {0}'.format(target))\n except Exception as e:\n log.exception(\"Failed to move devices\")\n return DirectResponse.exception(e, 'Failed to move devices.')\n if asynchronous:\n return DirectResponse.succeed(new_jobs=Zuul.marshal([result],\n keys=('uuid', 'description', 'started')))\n else:\n return DirectResponse.succeed(exports=result)\n\n @require('Manage Device')\n def pushChanges(self, uids, hashcheck, ranges=(), uid=None, params=None,\n sort='name', dir='ASC'):\n \"\"\"\n Push changes on device(s) configuration to collectors.\n\n @type uids: [string]\n @param uids: List of device uids to push changes\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n\n facade = self._getFacade()\n facade.pushChanges(uids)\n for uid in uids:\n audit('UI.Device.PushChanges', uid)\n return DirectResponse.succeed('Changes pushed to collectors.')\n\n def lockDevices(self, uids, hashcheck, ranges=(), updates=False,\n deletion=False, sendEvent=False, uid=None, params=None,\n sort='name', dir='ASC'):\n \"\"\"\n Lock device(s) from changes.\n\n @type uids: [string]\n @param uids: List of device uids to lock\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type updates: boolean\n @param updates: (optional) True to lock device from updates (default: False)\n @type deletion: boolean\n @param deletion: (optional) True to lock device from deletion\n (default: False)\n @type sendEvent: boolean\n @param sendEvent: (optional) True to send an event when an action is\n blocked by locking (default: False)\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n uids = filterUidsByPermission(self.context.dmd, ZEN_MANAGE_DMD, uids)\n try:\n facade.setLockState(uids, deletion=deletion, updates=updates,\n sendEvent=sendEvent)\n if not deletion and not updates:\n message = \"Unlocked %s devices.\" % len(uids)\n else:\n actions = []\n if deletion:\n actions.append('deletion')\n if updates:\n actions.append('updates')\n message = \"Locked %s devices from %s.\" % (len(uids),\n ' and '.join(actions))\n for uid in uids:\n audit('UI.Device.EditLocks', uid,\n deletion=deletion, updates=updates, sendEvent=sendEvent)\n return DirectResponse.succeed(message)\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to lock devices.')\n\n\n def resetIp(self, uids, hashcheck, uid=None, ranges=(), params=None,\n sort='name', dir='ASC', ip=''):\n \"\"\"\n Reset IP address(es) of device(s) to the results of a DNS lookup or\n a manually set address\n\n @type uids: [string]\n @param uids: List of device uids with IP's to reset\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type ip: string\n @param ip: (optional) IP to set device to. Empty string causes DNS\n lookup (default: '')\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n uids = filterUidsByPermission(self.context.dmd, ZEN_ADMIN_DEVICE, uids)\n try:\n for uid in uids:\n info = facade.getInfo(uid)\n info.ipAddress = ip # Set to empty causes DNS lookup\n audit('UI.Device.ResetIP', uid, ip=ip)\n return DirectResponse('Reset %s IP addresses.' % len(uids))\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to reset IP addresses.')\n\n @require('Manage Device')\n def resetCommunity(self, uids, hashcheck, uid=None, ranges=(), params=None,\n sort='name', dir='ASC'):\n \"\"\"\n Reset SNMP community string(s) on device(s)\n\n @type uids: [string]\n @param uids: List of device uids to reset\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n try:\n for uid in uids:\n facade.resetCommunityString(uid)\n audit('UI.Device.ResetCommunity', uid)\n return DirectResponse('Reset %s community strings.' % len(uids))\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to reset community strings.')\n\n def setProductionState(self, uids, prodState, hashcheck, uid=None,\n ranges=(), params=None, sort='name', dir='ASC'):\n \"\"\"\n Set the production state of device(s).\n\n @type uids: [string]\n @param uids: List of device uids to set\n @type prodState: integer\n @param prodState: Production state to set device(s) to.\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n uids = filterUidsByPermission(self.context.dmd, ZEN_CHANGE_DEVICE_PRODSTATE,\n uids)\n try:\n oldStates = {}\n uids = (uids,) if isinstance(uids, basestring) else uids\n for uid in uids:\n device = facade._getObject(uid)\n if isinstance(device, Device):\n oldStates[uid] = self.context.convertProdState(device.getProductionState())\n\n prodStateName = self.context.convertProdState(prodState)\n\n auditData = {'productionState': prodStateName}\n for uid in uids:\n oldAuditData = {'productionState': oldStates[uid]}\n audit('UI.Device.Edit', uid, oldData_=oldAuditData, data_=auditData)\n facade.setProductionState(uids, prodState, asynchronous=True)\n return DirectResponse('Set %s devices to %s.' % (\n len(uids), prodStateName))\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to change production state.')\n\n def setPriority(self, uids, priority, hashcheck, uid=None, ranges=(),\n params=None, sort='name', dir='ASC'):\n \"\"\"\n Set device(s) priority.\n\n @type uids: [string]\n @param uids: List of device uids to set\n @type priority: integer\n @param priority: Priority to set device(s) to.\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n uids = filterUidsByPermission(self.context.dmd, ZEN_MANAGE_DEVICE, uids)\n try:\n for uid in uids:\n info = facade.getInfo(uid)\n oldPriorityLabel = info.priorityLabel\n info.priority = priority\n notify(IndexingEvent(info._object))\n audit('UI.Device.Edit', uid,\n priority=info.priorityLabel,\n oldData_={'priority':oldPriorityLabel})\n return DirectResponse('Set %s devices to %s priority.' % (\n len(uids), info.priorityLabel))\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to change priority.')\n\n def moveCollectorDevices(self, srcCollectors, dstCollector, hashcheck, uid=None, ranges=(),\n params=None, sort='name', dir='ASC', moveData=False,\n asynchronous=True):\n \"\"\"\n Move all devices under one or more collectors to another collector\n\n The signature is exactly the same as setCollector(), except that the\n 'uids' parameter is replaced with 'srcCollectors'\n\n @type srcCollectors: list of strings\n @param srcCollectors: The collectors to move all devices from\n \"\"\"\n monitorFacade = Zuul.getFacade('monitors', self.context)\n if isinstance(srcCollectors, basestring):\n srcCollectorObjs = monitorFacade.get(srcCollectors)\n else:\n srcCollectorObjs = []\n for collector in srcCollectors:\n srcCollectorObjs.append(monitorFacade.get(collector))\n deviceUids = []\n for collector in srcCollectorObjs:\n deviceUids.extend([ dev.getPrimaryId() for dev in collector.getDevices() ])\n return self.setCollector(deviceUids, dstCollector, hashcheck, uid, ranges,\n params, sort, dir, moveData, asynchronous)\n\n def setCollector(self, uids, collector, hashcheck, uid=None, ranges=(),\n params=None, sort='name', dir='ASC', moveData=False,\n asynchronous=True):\n \"\"\"\n Set device(s) collector.\n\n @type uids: [string]\n @param uids: List of device uids to set\n @type collector: string\n @param collector: Collector to set devices to\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n uids = filterUidsByPermission(self.context.dmd, ZEN_ADMIN_DEVICE, uids)\n try:\n # iterate through uids so that logging works as expected\n result = facade.setCollector(uids, collector, asynchronous)\n for devUid in uids:\n audit('UI.Device.ChangeCollector', devUid, collector=collector)\n if asynchronous and result:\n return DirectResponse.succeed(new_jobs=Zuul.marshal(result,\n keys=('uuid', 'description', 'started')))\n else:\n return DirectResponse.succeed('Changed collector to %s for %s devices.' %\n (collector, len(uids)))\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to change the collector.')\n\n def setComponentsMonitored(self, uids, hashcheck, monitor=False, uid=None,\n ranges=(), meta_type=None, keys=None,\n start=0, limit=50, sort='name', dir='ASC',\n name=None):\n \"\"\"\n Set the monitoring flag for component(s)\n\n @type uids: [string]\n @param uids: List of component uids to set\n @type hashcheck: string\n @param hashcheck: Hashcheck for the components (from getComponents())\n @type monitor: boolean\n @param monitor: (optional) True to monitor component (default: False)\n @type uid: string\n @param uid: (optional) Device to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type meta_type: string\n @param meta_type: (optional) The meta type of the components to retrieve\n (default: None)\n @type keys: [string]\n @param keys: not used\n @type start: integer\n @param start: (optional) Offset to return the results from; used in\n pagination (default: 0)\n @type limit: integer\n @param limit: (optional) Number of items to return; used in pagination\n (default: 50)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type name: string\n @param name: (optional) Component name to search for when loading ranges\n (default: None)\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadComponentRanges(ranges, hashcheck, uid, (),\n meta_type, start, limit, sort,\n dir, name)\n facade = self._getFacade()\n facade.setMonitor(uids, monitor)\n action = 'SetMonitored' if monitor else 'SetUnmonitored'\n for uid in uids:\n audit(['UI.Component', action], uid)\n return DirectResponse.succeed(('Set monitoring to %s for %s'\n ' components.') % (monitor, len(uids)))\n\n def lockComponents(self, uids, hashcheck, uid=None, ranges=(),\n updates=False, deletion=False, sendEvent=False,\n meta_type=None, keys=None, start=0, limit=50,\n sort='name', dir='ASC', name=None):\n \"\"\"\n Lock component(s) from changes.\n\n @type uids: [string]\n @param uids: List of component uids to lock\n @type hashcheck: string\n @param hashcheck: Hashcheck for the components (from getComponents())\n @type uid: string\n @param uid: (optional) Device to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type updates: boolean\n @param updates: (optional) True to lock component from updates (default: False)\n @type deletion: boolean\n @param deletion: (optional) True to lock component from deletion\n (default: False)\n @type sendEvent: boolean\n @param sendEvent: (optional) True to send an event when an action is\n blocked by locking (default: False)\n @type meta_type: string\n @param meta_type: (optional) The meta type of the components to retrieve\n (default: None)\n @type keys: [string]\n @param keys: not used\n @type start: integer\n @param start: (optional) Offset to return the results from; used in\n pagination (default: 0)\n @type limit: integer\n @param limit: (optional) Number of items to return; used in pagination\n (default: 50)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type name: string\n @param name: (optional) Component name to search for when loading ranges\n (default: None)\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadComponentRanges(ranges, hashcheck, uid, (),\n meta_type, start, limit, sort,\n dir, name)\n facade = self._getFacade()\n try:\n facade.setLockState(uids, deletion=deletion, updates=updates,\n sendEvent=sendEvent)\n if not deletion and not updates:\n message = \"Unlocked %d components.\" % len(uids)\n else:\n actions = []\n if deletion:\n actions.append('deletion')\n if updates:\n actions.append('updates')\n actions = ' and '.join(actions)\n message = \"Locked %d components from %s.\" % (len(uids), actions)\n for uid in uids:\n audit('UI.Component.EditLocks', uid,\n deletion=deletion, updates=updates, sendEvents=sendEvent)\n return DirectResponse.succeed(message)\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to lock components.')\n\n def deleteComponents(self, uids, hashcheck, uid=None, ranges=(),\n meta_type=None, keys=None, start=0, limit=50,\n sort='name', dir='ASC', name=None):\n \"\"\"\n Delete device component(s).\n\n @type uids: [string]\n @param uids: List of component uids to delete\n @type hashcheck: string\n @param hashcheck: Hashcheck for the components (from getComponents())\n @type uid: string\n @param uid: (optional) Device to use when using ranges to get\n additional uids (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type meta_type: string\n @param meta_type: (optional) The meta type of the components to retrieve\n (default: None)\n @type keys: [string]\n @param keys: not used\n @type start: integer\n @param start: (optional) Offset to return the results from; used in\n pagination (default: 0)\n @type limit: integer\n @param limit: (optional) Number of items to return; used in pagination\n (default: 50)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type name: string\n @param name: (optional) Component name to search for when loading ranges\n (default: None)\n @rtype: DirectResponse\n @return: Success or failure message\n \"\"\"\n if ranges:\n uids += self.loadComponentRanges(ranges, hashcheck, uid, (),\n meta_type, start, limit, sort,\n dir, name)\n facade = self._getFacade()\n try:\n facade.deleteComponents(uids)\n for uid in uids:\n audit('UI.Component.Delete', uid)\n return DirectResponse.succeed('Components deleted.')\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to delete components.')\n\n def removeDevices(self, uids, hashcheck, action=\"remove\", uid=None,\n ranges=(), params=None, sort='name', dir='ASC',\n deleteEvents=False, deletePerf=False\n ):\n \"\"\"\n Remove/delete device(s).\n\n @type uids: [string]\n @param uids: List of device uids to remove\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type action: string\n @param action: Action to take. 'remove' to remove devices from organizer\n uid, and 'delete' to delete the device from Zenoss.\n @type uid: string\n @param uid: (optional) Organizer to use when using ranges to get\n additional uids and/or to remove device (default: None)\n @type ranges: [integer]\n @param ranges: (optional) List of two integers that are the min/max\n values of a range of uids to include (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type deleteEvents: bool\n @param deleteEvents: will remove all the events for the devices as well\n @type deletePerf: bool\n @param deletePerf: will remove all the perf data for the devices\n @rtype: DirectResponse\n @return: B{Properties}:\n - devtree: ([dictionary]) Object representing the new device tree\n - grptree: ([dictionary]) Object representing the new group tree\n - systree: ([dictionary]) Object representing the new system tree\n - loctree: ([dictionary]) Object representing the new location tree\n \"\"\"\n if ranges:\n uids += self.loadRanges(ranges, hashcheck, uid, params, sort, dir)\n facade = self._getFacade()\n removedUids = tuple()\n uids = filterUidsByPermission(self.context.dmd, ZEN_DELETE_DEVICE, uids)\n try:\n if action == \"remove\":\n removed = facade.removeDevices(uids, organizer=uid)\n\n # uid could be an object or string.\n organizer = facade._getObject(uid) if isinstance(uid, basestring) else uid\n organizerType = organizer.meta_type\n action = 'RemoveFrom' + organizerType # Ex: RemoveFromLocation\n removedUids = map(lambda x: x.uid, removed)\n for devuid in removedUids:\n # Ex: ('UI.Device.RemoveFromLocation', deviceUid, location=...)\n audit('UI.Device.%s' % action, devuid, data_={organizerType:uid})\n notRemovedUids = list(set(uids) - set(removedUids))\n return DirectResponse.succeed(\n removedUids=removedUids,\n notRemovedUids=notRemovedUids)\n elif action == \"delete\":\n for devuid in uids:\n audit('UI.Device.Delete', devuid,\n deleteEvents=deleteEvents,\n deletePerf=deletePerf)\n facade.deleteDevices(uids,\n deleteEvents=deleteEvents,\n deletePerf=deletePerf)\n return DirectResponse.succeed()\n except Exception as e:\n log.exception(e)\n return DirectResponse.exception(e, 'Failed to remove devices.')\n\n @serviceConnectionError\n def getGraphDefs(self, uid, drange=None):\n \"\"\"\n Returns the url and title for each graph\n for the object passed in.\n @type uid: string\n @param uid: unique identifier of an object\n \"\"\"\n facade = self._getFacade()\n data = facade.getGraphDefs(uid, drange)\n return DirectResponse(data=Zuul.marshal(data))\n\n def loadRanges(self, ranges, hashcheck, uid=None, params=None,\n sort='name', dir='ASC'):\n \"\"\"\n Get a range of device uids.\n\n @type ranges: [integer]\n @param ranges: List of two integers that are the min/max values of a\n range of uids\n @type hashcheck: string\n @param hashcheck: Hashcheck for the devices (from getDevices())\n @type uid: string\n @param uid: (optional) Organizer to use to get uids (default: None)\n @type params: dictionary\n @param params: (optional) Key-value pair of filters for this search.\n Can be one of the following: name, ipAddress,\n deviceClass, or productionState (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @rtype: [string]\n @return: A list of device uids\n \"\"\"\n facade = self._getFacade()\n if isinstance(params, basestring):\n params = unjson(params)\n devs = facade.getDeviceBrains(uid, limit=None, sort=sort, dir=dir,\n params=params, hashcheck=hashcheck)\n uids = []\n for start, stop in sorted(ranges):\n uids.extend(b.getPath() for b in islice(devs, start, stop + 1))\n return uids\n\n def loadComponentRanges(self, ranges, hashcheck, uid=None, types=(),\n meta_type=(), start=0, limit=None, sort='name',\n dir='ASC', name=None):\n \"\"\"\n Get a range of component uids.\n\n @type ranges: [integer]\n @param ranges: List of two integers that are the min/max values of a\n range of uids\n @type hashcheck: string\n @param hashcheck: not used\n @type uid: string\n @param uid: (optional) Device to use to get uids (default: None)\n @type types: [string]\n @param types: (optional) The types of components to retrieve (default: None)\n @type meta_type: string\n @param meta_type: (optional) The meta type of the components to retrieve\n (default: None)\n @type start: integer\n @param start: (optional) Offset to return the results from; used in\n pagination (default: 0)\n @type limit: integer\n @param limit: (optional) Number of items to return; used in pagination\n (default: None)\n @type sort: string\n @param sort: (optional) Key on which to sort the return result (default:\n 'name')\n @type dir: string\n @param dir: (optional) Sort order; can be either 'ASC' or 'DESC'\n (default: 'ASC')\n @type name: string\n @param name: (optional) Component name to search for when loading ranges\n (default: None)\n @rtype: [string]\n @return: A list of component uids\n \"\"\"\n if uid is None:\n uid = \"/\".join(self.context.getPhysicalPath())\n facade = self._getFacade()\n comps = facade.getComponents(uid, types, meta_type, start, limit, sort,\n dir, name)\n uids = []\n for start, stop in sorted(ranges):\n uids.extend(b.uid for b in islice(comps, start, stop))\n return uids\n\n @serviceConnectionError\n def getUserCommands(self, uid):\n \"\"\"\n Get a list of user commands for a device uid.\n\n @type uid: string\n @param uid: Device to use to get user commands\n @rtype: [dictionary]\n @return: List of objects representing user commands\n \"\"\"\n facade = self._getFacade()\n cmds = facade.getUserCommands(uid)\n return Zuul.marshal(cmds, ['id', 'description'])\n\n def getProductionStates(self, **kwargs):\n \"\"\"\n Get a list of available production states.\n\n @rtype: [dictionary]\n @return: List of name/value pairs of available production states\n \"\"\"\n return DirectResponse(data=[dict(name=s.split(':')[0],\n value=int(s.split(':')[1])) for s in\n self.context.dmd.prodStateConversions])\n\n def getAllCredentialsProps(self, **kwargs):\n \"\"\"\n Get a list of available credentials props\n\n @rtype: DirectResponse\n @return: List of credentials props\n \"\"\"\n props = self._getFacade().getAllCredentialsProps()\n # zSnmpCommunity is always on the form, don't include it\n return DirectResponse(data=[prop for prop in props if prop != 'zSnmpCommunity'])\n\n def getCredentialsProps(self, deviceClass):\n \"\"\"\n Get a dictionary of the creds props and default values for this device class\n\n @rtype: DirectResponse\n @return: List of credentials props\n \"\"\"\n organizerUid = '/zport/dmd/Devices' + deviceClass\n try:\n connInfo = self._getFacade().getConnectionInfo(organizerUid)\n except ObjectNotFoundException as o:\n connInfo = {}\n\n props = OrderedDict([(item['id'], item.get('valueAsString', '')) for item in connInfo])\n if props.get('zSnmpCommunity'):\n del props['zSnmpCommunity'] # its always on the form\n return DirectResponse(data=props)\n\n def getPriorities(self, **kwargs):\n \"\"\"\n Get a list of available device priorities.\n\n @rtype: [dictionary]\n @return: List of name/value pairs of available device priorities\n \"\"\"\n return DirectResponse(data=[dict(name=s.split(':')[0],\n value=int(s.split(':')[1])) for s in\n self.context.dmd.priorityConversions])\n\n def getCollectors(self):\n \"\"\"\n Get a list of available collectors.\n\n @rtype: [string]\n @return: List of collectors\n \"\"\"\n return self.context.dmd.Monitors.getPerformanceMonitorNames()\n\n def getDeviceClassesToAdd(self, **data):\n \"\"\"\n Get a list of device classes that don't require special case add jobs\n\n @rtype: DirectResponse\n @return: B{Properties}:\n - deviceClasses: ([dictionary]) List of device classes\n - totalCount: (integer) Total number of device classes\n \"\"\"\n facade = self._getFacade()\n deviceClasses = ['']\n deviceClasses.extend(facade.getDeviceClasses(allClasses=False))\n result = [{'name': name} for name in deviceClasses]\n return DirectResponse(deviceClasses=result, totalCount=len(result))\n\n def getDeviceClasses(self, **data):\n \"\"\"\n Get a list of all device classes.\n\n @rtype: DirectResponse\n @return: B{Properties}:\n - deviceClasses: ([dictionary]) List of device classes\n - totalCount: (integer) Total number of device classes\n \"\"\"\n facade = self._getFacade()\n deviceClasses = ['']\n deviceClasses.extend(facade.getDeviceClasses())\n result = [{'name': name} for name in deviceClasses]\n return DirectResponse(deviceClasses=result, totalCount=len(result))\n\n def getSystems(self, **data):\n \"\"\"\n Get a list of all systems.\n\n @rtype: DirectResponse\n @return: B{Properties}:\n - systems: ([dictionary]) List of systems\n - totalCount: (integer) Total number of systems\n \"\"\"\n systems = self.context.dmd.Systems.getOrganizerNames()\n result = [{'name': name} for name in systems if name != '/']\n return DirectResponse(systems=result, totalCount=len(result))\n\n def getGroups(self, **data):\n \"\"\"\n Get a list of all groups.\n\n @rtype: DirectResponse\n @return: B{Properties}:\n - systems: ([dictionary]) List of groups\n - totalCount: (integer) Total number of groups\n \"\"\"\n groups = self.context.dmd.Groups.getOrganizerNames()\n result = [{'name': name} for name in groups if name != '/']\n return DirectResponse(groups=result, totalCount=len(result))\n\n def getLocations(self, **data):\n \"\"\"\n Get a list of all locations.\n\n @rtype: DirectResponse\n @return: B{Properties}:\n - systems: ([dictionary]) List of locations\n - totalCount: (integer) Total number of locations\n \"\"\"\n locations = self.context.dmd.Locations.getOrganizerNames()\n result = [{'name': name} for name in locations if name != '/']\n return DirectResponse(locations=result, totalCount=len(result))\n\n def getManufacturerNames(self, **data):\n \"\"\"\n Get a list of all manufacturer names.\n\n @rtype: DirectResponse\n @return: B{Properties}:\n - manufacturers: ([dictionary]) List of manufacturer names\n - totalCount: (integer) Total number of manufacturer names\n \"\"\"\n names = self.context.dmd.Manufacturers.getManufacturerNames()\n result = [{'name': name} for name in names]\n return DirectResponse(manufacturers=result, totalCount=len(result))\n\n def getHardwareProductNames(self, manufacturer='', **data):\n \"\"\"\n Get a list of all hardware product names from a manufacturer.\n\n @type manufacturer: string\n @param manufacturer: Manufacturer name\n @rtype: DirectResponse\n @return: B{Properties}:\n - productNames: ([dictionary]) List of hardware product names\n - totalCount: (integer) Total number of hardware product names\n \"\"\"\n manufacturers = self.context.dmd.Manufacturers\n names = manufacturers.getProductNames(manufacturer, 'HardwareClass')\n result = [{'name': name} for name in names]\n return DirectResponse(productNames=result, totalCount=len(result))\n\n def getOSProductNames(self, manufacturer='', **data):\n \"\"\"\n Get a list of all OS product names from a manufacturer.\n\n @type manufacturer: string\n @param manufacturer: Manufacturer name\n @rtype: DirectResponse\n @return: B{Properties}:\n - productNames: ([dictionary]) List of OS product names\n - totalCount: (integer) Total number of OS product names\n \"\"\"\n manufacturers = self.context.dmd.Manufacturers\n names = manufacturers.getProductNames(manufacturer, 'OS')\n result = [{'name': name} for name in names]\n return DirectResponse(productNames=result, totalCount=len(result))\n\n def addDevice(self, deviceName, deviceClass, title=None,\n snmpCommunity=\"\", snmpPort=161, manageIp=\"\",\n model=False, collector='localhost', rackSlot=0,\n locationPath=\"\", systemPaths=[], groupPaths=[],\n productionState=1000, comments=\"\", hwManufacturer=\"\",\n hwProductName=\"\", osManufacturer=\"\", osProductName=\"\",\n priority=3, tag=\"\", serialNumber=\"\", zCommandUsername=\"\",\n zCommandPassword=\"\", zWinUser=\"\", zWinPassword=\"\",\n zProperties={}, cProperties={},):\n\n \"\"\"\n Add a device.\n\n @type deviceName: string\n @param deviceName: Name or IP of the new device\n @type deviceClass: string\n @param deviceClass: The device class to add new device to\n @type title: string\n @param title: (optional) The title of the new device (default: '')\n @type snmpCommunity: string\n @param snmpCommunity: (optional) A specific community string to use for\n this device. (default: '')\n @type snmpPort: integer\n @param snmpPort: (optional) SNMP port on new device (default: 161)\n @type manageIp: string\n @param manageIp: (optional) Management IP address on new device (default:\n empty/derive from DNS)\n @type locationPath: string\n @param locationPath: (optional) Organizer path of the location for this device\n @type systemPaths: List (strings)\n @param systemPaths: (optional) List of organizer paths for the device\n @type groupPaths: List (strings)\n @param groupPaths: (optional) List of organizer paths for the device\n @type model: boolean\n @param model: (optional) True to model device at add time (default: False)\n @type collector: string\n @param collector: (optional) Collector to use for new device (default:\n localhost)\n @type rackSlot: string\n @param rackSlot: (optional) Rack slot description (default: '')\n @type productionState: integer\n @param productionState: (optional) Production state of the new device\n (default: 1000)\n @type comments: string\n @param comments: (optional) Comments on this device (default: '')\n @type hwManufacturer: string\n @param hwManufacturer: (optional) Hardware manufacturer name (default: '')\n @type hwProductName: string\n @param hwProductName: (optional) Hardware product name (default: '')\n @type osManufacturer: string\n @param osManufacturer: (optional) OS manufacturer name (default: '')\n @type osProductName: string\n @param osProductName: (optional) OS product name (default: '')\n @type priority: integer\n @param priority: (optional) Priority of this device (default: 3)\n @type tag: string\n @param tag: (optional) Tag number of this device (default: '')\n @type serialNumber: string\n @param serialNumber: (optional) Serial number of this device (default: '')\n @type zCommandUsername: string\n @param zWinUser: (optional) Username for WMI (default: '')\n @type zCommandPassword: string\n @param zWinPassword: (optional) Password for WMI (default: '')\n @rtype: DirectResponse\n @return: B{Properties}:\n - jobId: (string) ID of the add device job\n \"\"\"\n # check for permission in the device organizer to which we are\n # adding the device\n facade = self._getFacade()\n organizerUid = '/zport/dmd/Devices' + deviceClass\n organizer = facade._getObject(organizerUid)\n if not Zuul.checkPermission(\"Manage Device\", organizer):\n raise Unauthorized('Calling AddDevice requires ' +\n 'Manage Device permission on %s' % deviceClass)\n\n if title is None:\n title = deviceName\n\n # the device name is used as part of the URL, so any unicode characters\n # will be stripped before saving. Preempt this and make the device name\n # safe prior to the uniqueness check.\n safeDeviceName = organizer.prepId(deviceName)\n\n foundDevice = None\n if organizer.getZ('zUsesManageIp', False):\n foundDevice = facade.getDeviceByIpAddress(safeDeviceName, collector, manageIp)\n if not foundDevice:\n foundDevice = facade.getDeviceByName(safeDeviceName)\n if foundDevice and foundDevice.getDeviceClassName() == deviceClass:\n primaryId = foundDevice.getPrimaryId()\n return DirectResponse.fail(\n deviceUid=primaryId,\n msg=\"Device %s already exists. Go to the device\" % (deviceName, primaryId)\n )\n\n if isinstance(systemPaths, basestring):\n systemPaths = [systemPaths]\n if isinstance(groupPaths, basestring):\n groupPaths = [groupPaths]\n\n jobrecords = self._getFacade().addDevice(deviceName,\n deviceClass,\n title,\n snmpCommunity,\n snmpPort,\n manageIp,\n model,\n collector,\n rackSlot,\n productionState,\n comments,\n hwManufacturer,\n hwProductName,\n osManufacturer,\n osProductName,\n priority,\n tag,\n serialNumber,\n locationPath,\n zCommandUsername,\n zCommandPassword,\n zWinUser,\n zWinPassword,\n systemPaths,\n groupPaths,\n zProperties,\n cProperties,\n )\n\n deviceUid = '/'.join([organizerUid, 'devices', deviceName])\n # Zero groups or systems sends as [''] so exclude that case.\n hasGroups = len(groupPaths) > 1 or (groupPaths and groupPaths[0])\n hasSystems = len(systemPaths) > 1 or (systemPaths and systemPaths[0])\n auditData = {\n 'deviceClass': '/Devices' + deviceClass,\n 'location': '/Locations' + locationPath if locationPath else None,\n 'deviceGroups': ['/Groups' + x for x in groupPaths] if hasGroups else None,\n 'systems': ['/Systems' + x for x in systemPaths] if hasSystems else None,\n 'device_name': title if title else deviceName, # see Trac #30109\n 'collector': collector,\n 'model': str(model), # show value even if False\n 'productionState': self.context.convertProdState(productionState),\n 'priority': self.context.convertPriority(priority),\n }\n audit('UI.Device.Add', deviceUid, data_=auditData)\n return DirectResponse.succeed(new_jobs=Zuul.marshal(jobrecords, keys=('uuid', 'description')))\n\n def remodel_device_permissions(self, deviceUid, collectPlugins='', background=True):\n ctx = self.context if deviceUid is None else self._getFacade()._getObject(deviceUid)\n return Zuul.checkPermission(ZEN_MANAGE_DEVICE, ctx)\n\n @require(remodel_device_permissions)\n def remodel(self, deviceUid, collectPlugins='', background=True):\n \"\"\"\n Submit a job to have a device remodeled.\n\n @type deviceUid: string\n @param deviceUid: Device uid to remodel\n @type collectPlugins: string\n @param collectPlugins: (optional) Modeler plugins to use.\n Takes a regular expression (default: '')\n @type background: boolean\n @param background: (optional) False to not schedule a job\n (default: True)\n @rtype: DirectResponse\n @return: B{Properties}:\n - status: (string) ID of the add device job or command exit status\n \"\"\"\n status = self._getFacade().remodel(deviceUid, collectPlugins=collectPlugins, background=background)\n audit('UI.Device.Remodel', deviceUid)\n if background:\n response = DirectResponse.succeed(jobId=status.id)\n else:\n #returned value is exit status of a command\n #if command end up successfully None is returned\n #make from None a zero value to make it clear\n status = status or 0\n response = DirectResponse.succeed(exitStatus=status)\n return response\n\n @require('Edit Local Templates')\n def addLocalTemplate(self, deviceUid, templateId):\n \"\"\"\n Adds a local template on a device.\n\n @type deviceUid: string\n @param deviceUid: Device uid to have local template\n @type templateId: string\n @param templateId: Name of the new template\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n facade = self._getFacade()\n facade.addLocalTemplate(deviceUid, templateId)\n audit('UI.Device.AddLocalTemplate', deviceUid, template=templateId)\n return DirectResponse.succeed()\n\n @require('Edit Local Templates')\n def removeLocalTemplate(self, deviceUid, templateUid):\n \"\"\"\n Removes a locally defined template on a device.\n\n @type deviceUid: string\n @param deviceUid: Device uid that has local template\n @type templateUid: string\n @param templateUid: Name of the template to remove\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n facade = self._getFacade()\n facade.removeLocalTemplate(deviceUid, templateUid)\n audit('UI.Device.RemoveLocalTemplate', deviceUid, template=templateUid)\n return DirectResponse.succeed()\n\n def getLocalTemplates(self, query, uid):\n \"\"\"\n Get a list of locally defined templates on a device.\n\n @type query: string\n @param query: not used\n @type uid: string\n @param uid: Device uid to query for templates\n @rtype: DirectResponse\n @return: B{Properties}:\n - data: ([dictionary]) List of objects representing local templates\n \"\"\"\n facade = self._getFacade()\n templates = facade.getLocalTemplates(uid)\n data = []\n for template in templates:\n data.append(dict(label=template['text'], uid=template['uid']))\n return DirectResponse.succeed(data=data)\n\n @serviceConnectionError\n def getTemplates(self, id):\n \"\"\"\n Get a list of available templates for a device.\n\n @type id: string\n @param id: Device uid to query for templates\n @rtype: DirectResponse\n @return: B{Properties}:\n - data: ([dictionary]) List of objects representing templates\n \"\"\"\n facade = self._getFacade()\n templates = facade.getTemplates(id)\n return Zuul.marshal(templates)\n\n @serviceConnectionError\n def getUnboundTemplates(self, uid):\n \"\"\"\n Get a list of unbound templates for a device.\n\n @type uid: string\n @param uid: Device uid to query for templates\n @rtype: DirectResponse\n @return: B{Properties}:\n - data: ([dictionary]) List of objects representing templates\n \"\"\"\n facade = self._getFacade()\n templates = facade.getUnboundTemplates(uid)\n data = []\n for template in templates:\n label = '%s (%s)' % (template.titleOrId(), template.getUIPath())\n data.append([template.id, label])\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n @serviceConnectionError\n def getBoundTemplates(self, uid):\n \"\"\"\n Get a list of bound templates for a device.\n\n @type uid: string\n @param uid: Device uid to query for templates\n @rtype: DirectResponse\n @return: B{Properties}:\n - data: ([dictionary]) List of objects representing templates\n \"\"\"\n facade = self._getFacade()\n templates = facade.getBoundTemplates(uid)\n data = []\n for template in templates:\n label = '%s (%s)' % (template.titleOrId(), template.getUIPath())\n data.append([template.id, label])\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n @require('Edit Local Templates')\n def setBoundTemplates(self, uid, templateIds):\n \"\"\"\n Set a list of templates as bound to a device.\n\n @type uid: string\n @param uid: Device uid to bind templates to\n @type templateIds: [string]\n @param templateIds: List of template uids to bind to device\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n facade = self._getFacade()\n try:\n old_templateIds = [t.id for t in facade.getBoundTemplates(uid)]\n facade.setBoundTemplates(uid, templateIds)\n except DatapointNameConfict as e:\n log.info(\"Failed to bind templates for %s: %s\", uid, e)\n return DirectResponse.exception(e, 'Failed to bind templates.')\n audit('UI.Device.BindTemplates', uid,new_bound_templates=templateIds, old_bound_templates=old_templateIds )\n return DirectResponse.succeed()\n\n @require('Edit Local Templates')\n def resetBoundTemplates(self, uid):\n \"\"\"\n Remove all bound templates from a device.\n\n @type uid: string\n @param uid: Device uid to remove bound templates from\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n facade = self._getFacade()\n facade.resetBoundTemplates(uid)\n audit('UI.Device.ResetBoundTemplates', uid)\n return DirectResponse.succeed()\n\n @require('Edit Local Templates')\n def bindOrUnbindTemplate(self, uid, templateUid):\n \"\"\"\n Bind an unbound template or unbind a bound template from a device.\n\n @type uid: string\n @param uid: Device uid to bind/unbind template\n @type templateUid: string\n @param templateUid: Template uid to bind/unbind\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n facade = self._getFacade()\n template = facade._getObject(templateUid)\n templateIds = [t.id for t in facade.getBoundTemplates(uid)]\n # not bound\n if not template.id in templateIds:\n self.setBoundTemplates(uid, templateIds + [template.id])\n audit('UI.Device.BindTemplate', uid, template=templateUid)\n else:\n # already bound so unbind it\n templateIds = [t for t in templateIds if t != template.id]\n self.setBoundTemplates(uid, templateIds)\n audit('UI.Device.UnbindTemplate', uid, template=templateUid)\n return DirectResponse.succeed()\n\n def getOverridableTemplates(self, query, uid):\n \"\"\"\n Get a list of available templates on a device that can be overridden.\n\n @type query: string\n @param query: not used\n @type uid: string\n @param uid: Device to query for overridable templates\n @rtype: DirectResponse\n @return: B{Properties}:\n - data: ([dictionary]) List of objects representing templates\n \"\"\"\n facade = self._getFacade()\n templates = facade.getOverridableTemplates(uid)\n # we just need the text and the id (for our combobox)\n data = []\n for template in templates:\n label = '%s (%s)' % (template.text, template.getUIPath())\n data.append(dict(label=label, uid=template.uid))\n return DirectResponse.succeed(data=data)\n\n @require('Manage DMD')\n def clearGeocodeCache(self):\n \"\"\"\n Clear the Google Maps geocode cache.\n\n @rtype: DirectResponse\n @return: Success message\n \"\"\"\n facade = self._getFacade()\n facade.clearGeocodeCache()\n audit('UI.GeocodeCache.Clear')\n return DirectResponse.succeed()\n\n def getConnectionInfo(self, uid):\n \"\"\"\n Returns the zproperty information about those zproperties which comprise\n the credentials\n @rtype: List of Dictionaries\n @return: B{Properties}:\n - path: (string) where the property is defined\n - type: (string) type of zproperty it is\n - options: (Array) available options for the zproperty\n - value (Array) value of the zproperty\n - valueAsString (string)\n \"\"\"\n facade = self._getFacade()\n data = facade.getConnectionInfo(uid)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n @serviceConnectionError\n def getModelerPluginDocStrings(self, uid):\n \"\"\"\n Given a uid returns the documentation for all the modeler plugins.\n \"\"\"\n facade = self._getFacade()\n data = facade.getModelerPluginDocStrings(uid)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def addIpRouteEntry(self, uid, dest='', routemask='', nexthopid='', interface='',\n routeproto='', routetype='', userCreated=True):\n \"\"\"\n Adds an Ip Route Entry to this device\n \"\"\"\n facade = self._getFacade()\n data = facade.addIpRouteEntry(uid, dest, routemask, nexthopid, interface,\n routeproto, routetype, userCreated)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def addIpInterface(self, uid, newId, userCreated=True):\n \"\"\"\n Adds an Ip Interface\n \"\"\"\n facade = self._getFacade()\n data = facade.addIpInterface(uid, newId, userCreated)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def addOSProcess(self, uid, newClassName, example, userCreated=True):\n \"\"\"\n Adds an os processes\n \"\"\"\n facade = self._getFacade()\n data = facade.addOSProcess(uid, newClassName, example, userCreated)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def addFileSystem(self, uid, newId, userCreated=True):\n \"\"\"\n Adds an Ip Interface\n \"\"\"\n facade = self._getFacade()\n data = facade.addFileSystem(uid, newId, userCreated)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def addIpService(self, uid, newClassName, protocol, userCreated=True):\n \"\"\"\n Adds an Ip Service\n \"\"\"\n facade = self._getFacade()\n data = facade.addIpService(uid, newClassName, protocol, userCreated)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n\n def addWinService(self, uid, newClassName, userCreated=True):\n \"\"\"\n Adds an Ip Service\n \"\"\"\n facade = self._getFacade()\n data = facade.addWinService(uid, newClassName, userCreated)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def getSoftware(self, uid, keys=None):\n\n facade = self._getFacade()\n software = facade.getSoftware(uid)\n return DirectResponse(data=Zuul.marshal(software, keys))\n\n def getOverriddenObjectsList(self, uid, propname, relName):\n \"\"\"\n returns a list of Overridden Objects and properties for this context\n \"\"\"\n facade = self._getFacade()\n data = facade.getOverriddenObjectsList(uid, propname, relName)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def getOverriddenObjectsParent(self, uid, propname=''):\n \"\"\"\n returns the base of the Overridden Objects\n \"\"\"\n facade = self._getFacade()\n data = facade.getOverriddenObjectsParent(uid, propname)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def getOverriddenZprops(self, uid, all=True, pfilt=''):\n \"\"\"\n returns a list of zProperty values for the overridden objects\n \"\"\"\n facade = self._getFacade()\n data = facade.getOverriddenZprops(uid, all)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def getGraphDefintionsForComponents(self, uid):\n facade = self._getFacade()\n data = facade.getGraphDefinitionsForComponent(uid)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def getComponentGraphs(self, uid, meta_type, graphId, allOnSame=False):\n \"\"\"\n Returns the graph denoted by graphId for every component in\n device (uid) with the meta_type meta_type\n \"\"\"\n facade = self._getFacade()\n data = facade.getComponentGraphs(uid, meta_type, graphId, allOnSame=allOnSame)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n def getDevTypes(self, uid, filter=None):\n \"\"\"\n Returns a list of devtypes for the wizard\n \"\"\"\n facade = self._getFacade()\n data = facade.getDevTypes(uid)\n return DirectResponse.succeed(data=Zuul.marshal(data))\n\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/Zuul/routers/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":88057,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"39500806191","text":"# https://leetcode.com/problems/subarrays-with-k-different-integers/\n# https://leetcode.com/problems/subarrays-with-k-different-integers/solution/\n# other idea: convert K to at most K - at most (K-1)\n# Time: O(N)\n# Space: O(1)\n#\n# for each r, [l1,l2] are be a contiguous interval.\n# Thr r, l1 and l2 must be monotone increeasing. \n# -> use sliding window\n\nclass Window(object):\n def __init__(self):\n self.count = collections.Counter()\n self.unique = 0\n def add(self, x):\n if self.count[x] == 0:\n self.unique += 1\n self.count[x] += 1\n def remove(self, x):\n self.count[x] -= 1\n if self.count[x] == 0:\n self.unique -= 1\n\nclass Solution(object):\n def subarraysWithKDistinct(self, A, K):\n \"\"\"\n :type A: List[int]\n :type K: int\n :rtype: int\n \"\"\"\n w1 = Window()\n w2 = Window()\n res = l1 = l2 = 0\n for x in A:\n w1.add(x)\n w2.add(x)\n while w1.unique > K:\n w1.remove(A[l1])\n l1 += 1\n while w2.unique >= K:\n w2.remove(A[l2])\n l2 += 1\n res += l2 - l1\n return res\n","repo_name":"jwyx3/practices","sub_path":"leetcode/two-pointers/subarrays-with-k-different-integers.py","file_name":"subarrays-with-k-different-integers.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73635424195","text":"\n\"\"\"Language modeling dataset\"\"\"\nimport io\nimport torchtext\n\n\nclass LMDataset(torchtext.data.Dataset):\n \"\"\"Define a dataset class.\"\"\"\n\n def __init__(self, fields, filename, truncate=0):\n sents = []\n with io.open(filename, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n for line in f:\n line = line.strip().split(\" \")\n if truncate:\n line = line[:truncate]\n sents += [line]\n fields = [(k, fields[k]) for k in fields]\n examples = [torchtext.data.Example.fromlist([sent], fields) for sent in sents]\n super(LMDataset, self).__init__(examples, fields)\n\n def sort_key(self, ex):\n \"\"\"Sort by sentence length.\"\"\"\n return len(ex.sent)\n","repo_name":"ntunlp/daga","sub_path":"lstm-lm/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"61"} +{"seq_id":"34626405199","text":"import tensorflow as tf\n\nfrom easy_rec.python.layers import dnn\nfrom easy_rec.python.model.rank_model import RankModel\n\nfrom easy_rec.python.protos.dcn_pb2 import DCN as DCNConfig # NOQA\n\nif tf.__version__ >= '2.0':\n tf = tf.compat.v1\n\n\nclass DCN(RankModel):\n\n def __init__(self,\n model_config,\n feature_configs,\n features,\n labels=None,\n is_training=False):\n super(DCN, self).__init__(model_config, feature_configs, features, labels,\n is_training)\n assert self._model_config.WhichOneof('model') == 'dcn', \\\n 'invalid model config: %s' % self._model_config.WhichOneof('model')\n self._model_config = self._model_config.dcn\n assert isinstance(self._model_config, DCNConfig)\n\n self._features, _ = self._input_layer(self._feature_dict, 'all')\n\n def _cross_net(self, tensor, num_cross_layers):\n x = x0 = tensor\n input_dim = tensor.shape[-1]\n for i in range(num_cross_layers):\n name = 'cross_layer_%s' % i\n w = tf.get_variable(\n name=name + '_w',\n dtype=tf.float32,\n shape=(input_dim),\n )\n b = tf.get_variable(name=name + '_b', dtype=tf.float32, shape=(input_dim))\n xw = tf.reduce_sum(x * w, axis=1, keepdims=True) # (B, 1)\n x = tf.math.add(tf.math.add(x0 * xw, b), x)\n return x\n\n def build_predict_graph(self):\n tower_fea_arr = []\n # deep tower\n deep_tower_config = self._model_config.deep_tower\n\n dnn_layer = dnn.DNN(deep_tower_config.dnn, self._l2_reg, 'dnn',\n self._is_training)\n deep_tensor = dnn_layer(self._features)\n tower_fea_arr.append(deep_tensor)\n # cross tower\n cross_tower_config = self._model_config.cross_tower\n num_cross_layers = cross_tower_config.cross_num\n cross_tensor = self._cross_net(self._features, num_cross_layers)\n tower_fea_arr.append(cross_tensor)\n # final tower\n all_fea = tf.concat(tower_fea_arr, axis=1)\n final_dnn_layer = dnn.DNN(self._model_config.final_dnn, self._l2_reg,\n 'final_dnn', self._is_training)\n all_fea = final_dnn_layer(all_fea)\n output = tf.layers.dense(all_fea, self._num_class, name='output')\n\n self._add_to_prediction_dict(output)\n\n return self._prediction_dict\n","repo_name":"alibaba/EasyRec","sub_path":"easy_rec/python/model/dcn.py","file_name":"dcn.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":1284,"dataset":"github-code","pt":"61"} +{"seq_id":"5679535716","text":"# 달이랑 날짜 입력.\nm , d = map(int,input().split())\n\n#1월 1일이 월요일\nmonths = [31,28,31,30,31,30,31,31,30,31,30,31]\nweek = [\"SUN\",\"MON\",\"TUE\",\"WED\",\"THU\",\"FRI\",\"SAT\"]\nday = 0\n\nfor i in range(m-1):\n day += months[i]\n\nday = (day+d) % 7\n\nprint(week[day])","repo_name":"wnstjd369/BaekJoon","sub_path":"1924.py","file_name":"1924.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74629292673","text":"import os\nimport re\nfrom datetime import datetime, timedelta\n\nfrom django.core.files import File\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.template.loader import render_to_string\nfrom django.test import TransactionTestCase, override_settings\nfrom django.utils import timezone\n\nimport freezegun\nimport pytest\n\nfrom aleksis.core.models import PDFFile, Person\nfrom aleksis.core.util.pdf import clean_up_expired_pdf_files\n\npytestmark = pytest.mark.django_db\n\n\n@pytest.mark.usefixtures(\"celery_worker\")\n@override_settings(CELERY_BROKER_URL=\"memory://localhost//\")\nclass PDFFIleTest(TransactionTestCase):\n serialized_rollback = True\n\n _test_pdf = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"test.pdf\")\n\n def _get_test_html(self):\n return ContentFile(render_to_string(\"core/pages/test_pdf.html\"), name=\"source.html\")\n\n def test_pdf_file(self):\n dummy_person = Person.objects.create(first_name=\"Jane\", last_name=\"Doe\")\n\n html = self._get_test_html()\n file_object = PDFFile.objects.create(person=dummy_person, html_file=html)\n assert isinstance(file_object.expires_at, datetime)\n assert file_object.expires_at > timezone.now()\n assert not bool(file_object.file)\n\n with open(self._test_pdf, \"rb\") as f:\n file_object.file.save(\"print.pdf\", File(f))\n file_object.save()\n re_base = r\"pdfs/print_[a-zA-Z0-9]+\\.pdf\"\n assert re.match(re_base, file_object.file.name)\n\n def test_delete_signal(self):\n dummy_person = Person.objects.create(first_name=\"Jane\", last_name=\"Doe\")\n file_object = PDFFile.objects.create(person=dummy_person, html_file=self._get_test_html())\n with open(self._test_pdf, \"rb\") as f:\n file_object.file.save(\"print.pdf\", File(f))\n file_object.save()\n\n file_path = file_object.file.path\n\n assert default_storage.exists(file_path)\n file_object.delete()\n assert not default_storage.exists(file_path)\n\n def test_delete_expired_files(self):\n # Create test instances\n dummy_person = Person.objects.create(first_name=\"Jane\", last_name=\"Doe\")\n file_object = PDFFile.objects.create(person=dummy_person, html_file=self._get_test_html())\n file_object2 = PDFFile.objects.create(\n person=dummy_person,\n html_file=self._get_test_html(),\n expires_at=timezone.now() + timedelta(minutes=10),\n )\n with open(self._test_pdf, \"rb\") as f:\n file_object.file.save(\"print.pdf\", File(f))\n file_object2.file.save(\"print.pdf\", File(f))\n file_object.save()\n file_object2.save()\n\n clean_up_expired_pdf_files()\n assert PDFFile.objects.get(pk=file_object.pk)\n assert PDFFile.objects.get(pk=file_object2.pk)\n\n # Prepare times\n test_time_before = timezone.now() + timedelta(minutes=2.5)\n test_time_between = timezone.now() + timedelta(minutes=4)\n test_time_after = timezone.now() + timedelta(minutes=15)\n\n # None of the files are expired\n with freezegun.freeze_time(test_time_before):\n clean_up_expired_pdf_files()\n assert PDFFile.objects.get(pk=file_object.pk)\n assert PDFFile.objects.get(pk=file_object2.pk)\n\n # One file is expired\n with freezegun.freeze_time(test_time_between):\n clean_up_expired_pdf_files()\n with pytest.raises(PDFFile.DoesNotExist):\n PDFFile.objects.get(pk=file_object.pk)\n assert PDFFile.objects.get(pk=file_object2.pk)\n\n # Both files are expired\n with freezegun.freeze_time(test_time_after):\n clean_up_expired_pdf_files()\n with pytest.raises(PDFFile.DoesNotExist):\n PDFFile.objects.get(pk=file_object.pk)\n with pytest.raises(PDFFile.DoesNotExist):\n PDFFile.objects.get(pk=file_object2.pk)\n","repo_name":"deepanshumehtaa/AlekSIS-Core","sub_path":"aleksis/core/tests/models/test_pdffile.py","file_name":"test_pdffile.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41518758173","text":"import json\nimport xml.etree.ElementTree as ET\n\nwith open('manual_data_file.json') as f:\n data = json.load(f)\n\ntop_level = ET.Element('osmChange',{'version': '0.6', 'generator': 'rova-to-osm'})\ncreate_level = ET.SubElement(top_level, 'create')\n\nidcounter = 0\n\nfor item in data:\n idcounter = idcounter -1\n # skip items that do not have a FacilityType to avoid errors below\n if not item.get('FacilityType'):\n continue\n if item['FacilityType']['Id'] == 14:\n # print(item['Address'])\n lat = item['MapLocation']['latitude']\n lon = item['MapLocation']['longitude']\n title = item['Title']\n guid = item['Guid']\n attrib_dict = {'user': 'Pozoman', 'lat': str(lat), 'lon': str(lon), 'visible': 'true', 'version':'1', 'id': str(idcounter)}\n new_node = ET.SubElement(create_level, 'node',attrib_dict)\n ET.SubElement(new_node, 'tag', {'k': 'access', 'v': 'private'})\n ET.SubElement(new_node, 'tag', {'k': 'operator', 'v': 'ROVA'})\n ET.SubElement(new_node, 'tag', {'k': 'amenity', 'v': 'waste_disposal'})\n ET.SubElement(new_node, 'tag', {'k': 'waste', 'v': 'trash'})\n\ntree = ET.ElementTree(top_level)\ntree.write('trash.osc')\n","repo_name":"jcjveraa/rova-osm-bridge","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34977079046","text":"#!/usr/bin/env python\n\n# **Check available lumisections** \n# \n# Run with `python check_dataset_plot.py -h` for a list of available options. \n\n### imports\nimport sys\nimport os\nimport json\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport argparse\nsys.path.append(os.path.abspath('../../utils'))\nimport plot_utils as pu\n\n\ndef find_duplicates(runsls):\n duplicates = []\n for i in range(len(runsls)):\n for j in range(i+1,len(runsls)):\n if runsls[i]==runsls[j]:\n #print('{} {} {}'.format(i,j,runsls[i]))\n duplicates.append(runsls[i])\n return duplicates\n\ndef divide_elements(list1, list2):\n both = []\n only1 = []\n only2 = []\n for el in list1:\n if el in list2:\n both.append(el)\n else:\n only1.append(el)\n for el in list2:\n if el in both: continue\n only2.append(el)\n return (both,only1,only2)\n\n\nif __name__=='__main__':\n\n # read arguments\n parser = argparse.ArgumentParser(description='Check available lumis')\n parser.add_argument('--inputfiles', required=True, nargs='+',\n help='Input json file(s), space separated,'\n +' may contain shell expandable wildcards.')\n parser.add_argument('--key1', default='dqmiodas',\n help='Key in the input json file(s) of first element of comparison')\n parser.add_argument('--label1', default='DQMIO',\n help='Label for the first element of comparison')\n parser.add_argument('--key2', default='rawdas',\n help='Key in the input json file(s) of second element of comparison')\n parser.add_argument('--label2', default='RAW',\n help='Label for the second element of comparison')\n parser.add_argument('--labels', nargs='+',\n help='Label(s) for plot legends (if specified, must be same length'\n +' and in same order as the input files).')\n parser.add_argument('--condlabel', default=None,\n help='Conditions label (e.g. \"2022 (13.6 TeV)\")'\n +' (if it contains spaces, wrap it in single quotes).')\n args = parser.parse_args()\n\n # print arguments\n print('Running with following configuration:')\n for arg in vars(args):\n print(' - {}: {}'.format(arg,getattr(args,arg)))\n\n # check arguments\n if args.labels is not None:\n if args.labels==['auto']:\n args.inputfiles = sorted(args.inputfiles)\n args.labels = []\n for inputfile in args.inputfiles:\n label = inputfile.split('_',2)[2]\n label = ' '.join( label.split('-')[:2] )\n if 'Run' in label: label = label.replace('Run','')\n args.labels.append(label)\n if len(args.labels)!=len(args.inputfiles):\n msg = 'ERROR: number of input files and labels must be equal.'\n raise Exception(msg)\n else:\n args.inputfiles = sorted(args.inputfiles)\n args.labels = [None]*len(args.inputfiles)\n\n # loop over input files\n data = {}\n for inputfile in args.inputfiles:\n \n # read input file\n print('Reading input file {}...'.format(inputfile))\n with open(inputfile,'r') as f:\n info = json.load(f)\n runsls = info[args.key1]\n rawrunsls = info[args.key2]\n\n # check for duplicates\n do_check_duplicates = False\n if do_check_duplicates:\n print('Checking for duplicates...')\n dup = find_duplicates(runsls)\n if len(dup)>0:\n raise Exception('ERROR: found duplicate lumisections.')\n rawdup = find_duplicates(rawrunsls)\n if len(rawdup)>0:\n raise Exception('ERROR: found duplicate lumisections.')\n\n # add lumisections to collection\n print('Checking lumisection overlap...')\n (both,onlyraw,onlydqmio) = divide_elements(rawrunsls, runsls)\n\n # also consider runs\n print('Checking run overlap...')\n runs = sorted(list(set([el[0] for el in runsls])))\n rawruns = sorted(list(set([el[0] for el in rawrunsls])))\n (runsboth,runsonlyraw,runsonlydqmio) = divide_elements(rawruns, runs)\n\n # check lumisection overlap for runs in both\n print('Checking lumisection overlap in common runs...')\n selectedrunsls = [runlumi for runlumi in runsls if runlumi[0] in runsboth]\n selectedrawrunsls = [runlumi for runlumi in rawrunsls if runlumi[0] in runsboth]\n (selectedboth,selectedonlyraw,selectedonlydqmio) = divide_elements(\n selectedrawrunsls, selectedrunsls)\n\n # add summary to data for plotting\n completion_num = len(selectedboth)+len(selectedonlydqmio)\n completion_denom = completion_num + len(selectedonlyraw)\n completion = 0.\n if completion_denom>0:\n completion = float(completion_num) / (completion_num + len(selectedonlyraw))\n data[inputfile] = ({ 'present': completion_num, \n 'missing': len(selectedonlyraw),\n 'completion': completion })\n\n # print summary\n print('Summary for {}:'.format(inputfile))\n print(' Runs:')\n print(' - runs in both {} and {}: {}'.format(args.label2, args.label1, len(runsboth)))\n print(' - runs in {} only: {}'.format(args.label2, len(runsonlyraw)))\n print(' - runs in {} only: {}'.format(args.label1, len(runsonlydqmio)))\n print(' Lumis:')\n print(' - lumis in both {} and {}: {}'.format(args.label2, args.label1, len(both)))\n print(' - lumis in {} only: {}'.format(args.label2, len(onlyraw)))\n print(' - lumis in {} only: {}'.format(args.label1, len(onlydqmio)))\n print(' Lumis for runs in both:')\n print(' - lumis in both {} and {}: {}'.format(args.label2, args.label1, len(selectedboth)))\n print(' - lumis in {} only: {}'.format(args.label2, len(selectedonlyraw)))\n print(' - lumis in {} only: {}'.format(args.label1, len(selectedonlydqmio)))\n\n # print runs in RAW only\n #print(sorted(list(runsonlyraw)))\n \n # print lumisections in RAW only in common runs\n print(selectedonlyraw)\n\n # print lumisections in DQMIO only in common runs\n #print(selectedonlydqmio)\n\n # define colors\n norm = mpl.colors.Normalize(vmin=0,vmax=len(data))\n cobject = mpl.cm.ScalarMappable(norm=norm, cmap=mpl.cm.cool)\n cobject.set_array([]) # ad-hoc bug fix\n colorlist = [cobject.to_rgba(i) for i in range(len(data))]\n\n # make a summary plot\n fig,ax = plt.subplots(figsize=(6.4+0.4*(len(data)-1),4.8))\n # set the x-axis tick labels\n ax.set_xticks(range(len(args.labels)))\n if not None in args.labels:\n ax.set_xticklabels(args.labels, rotation=45, horizontalalignment='right')\n fig.subplots_adjust(bottom=0.3)\n # draw the lines\n for i, f in enumerate(args.inputfiles):\n completion = data[f]['completion']\n ax.plot( [i-0.5, i+0.5], [completion, completion], \n color=colorlist[i], linewidth=3 )\n text = '{} / {}'.format(data[f]['present'], data[f]['present']+data[f]['missing'])\n ax.text(i, completion-0.01, text, fontsize=12,\n horizontalalignment='center', verticalalignment='top')\n # layout\n ax.grid(visible=True)\n ax.set_ylim((ax.get_ylim()[0]*0.8,1.2))\n pu.add_cms_label( ax, extratext='Preliminary', pos=(0.05,0.93),\n fontsize=12, background_alpha=1. )\n ax.set_xlabel('Dataset and era', fontsize=15)\n ax.set_ylabel('Lumisection completion', fontsize=15)\n if args.condlabel is not None:\n pu.add_text(ax, args.condlabel, (1.0,1.01), horizontalalignment='right')\n fig.savefig('test.png')\n","repo_name":"LukaLambrecht/ML4DQMDC-PixelAE","sub_path":"dqmio/commissioning/check_dataset_lumis_plot.py","file_name":"check_dataset_lumis_plot.py","file_ext":"py","file_size_in_byte":7347,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"13619845501","text":"import os\nfrom pathlib import Path\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import KFold, GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom tqdm import tqdm\nimport numpy as np\nfrom config import paths\nfrom common import classification_report_utils\nfrom integration import utils\nfrom sklearn import metrics\nfrom common.classification_metrics import METRICS_skl, top_metric_skl\nfrom imblearn.pipeline import Pipeline\n\nfrom integration.utils import get_patient_kfold_split\n\n\ndef __get_classifier(method_name, params):\n \"\"\"\n Description: Get classifier method.\n :param method_name: method name.\n :param params: parameters.\n :returns: classifier, grid.\n \"\"\"\n if method_name == 'linearsvc':\n classifier = ('linearsvc', LinearSVC(max_iter=params['linearsvc']['max_iter'], random_state=params['general']['random_state']))\n grid = [{'linearsvc__C' : [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]}] # C\n else:\n classifier = ('sgdclassifier', SGDClassifier(max_iter=params['sgdclassifier']['max_iter'], random_state=params['general']['random_state']))\n grid = [{'sgdclassifier__alpha': [1.e-01, 1.e-02, 1.e-03, 1.e-04, 1.e-05, 1.e-06]}] # alpha\n return classifier, grid\n\n\ndef shallow_classifier(args, params, data_path):\n \"\"\"\n Description: Train and test shallow classifier, then show results.\n :param args: arguments.\n :param params: configuration parameters.\n :param data_path: data path.\n \"\"\"\n # classification pipeline with scaler, SMOTE and classifier:\n classifier, param_grid = __get_classifier(args.classification_method, params)\n pipe = Pipeline([('standardscaler', StandardScaler()), ('smote', SMOTE(random_state=params['general']['random_state'])), classifier])\n\n # get data\n X_train, y_train, X_test, y_test = utils.get_data(data_path)\n\n\n test_outer_results = []\n train_outer_results = []\n best_hyperparams = []\n\n metric = top_metric_skl\n\n # cv_outer = KFold(n_splits=params['cv']['n_outer_splits'], shuffle=True, random_state=params['general']['random_state'])\n splits, _ = get_patient_kfold_split(\n X_train,\n y_train,\n data_info_path=data_path / 'info_train.csv',\n n_splits=params['cv']['n_outer_splits'])\n\n # nested cross validation for unbiased error estimation:\n print(\">> Nested cross validation for unbiased error estimation...\")\n for train_ix, test_ix in tqdm(splits):\n\n # split data in k_outer folds (one is test, the rest is trainval) for outer loop\n X_train_cv, X_test_cv = X_train[train_ix, :], X_train[test_ix, :]\n y_train_cv, y_test_cv = y_train[train_ix], y_train[test_ix]\n\n # inner cross validation procedure for grid search of best hyperparameters:\n # trainval will be split in k_inner folds (one is val, the rest is train)\n # use train and val to find best model\n cv_inner = KFold(n_splits=params['cv']['n_inner_splits'], shuffle=True,\n random_state=params['general']['random_state'])\n search = GridSearchCV(estimator=pipe, param_grid=param_grid, cv=cv_inner, n_jobs=-1,\n scoring='recall', refit=True, verbose=2)\n\n # outer cross validation procedure to evaluate the performance of the best estimator:\n # fit the best model on the whole trainval\n search.fit(X_train_cv, y_train_cv)\n best_model = search.best_estimator_\n best_score = search.best_score_\n best_param = search.best_params_\n\n # evaluate the performance of the model on test\n y_test_pred = best_model.predict(X_test_cv)\n test_score = metric(y_test_cv, y_test_pred)\n y_train_pred = search.predict(X_train_cv)\n train_score = metric(y_train_cv, y_train_pred)\n\n test_outer_results.append(test_score)\n train_outer_results.append(train_score)\n best_hyperparams.append(search.best_params_)\n\n print(f\"Inner cv: \")\n print(f\" Best {metric.__name__} = {best_score}, best hyperparam = {best_param}\")\n print(f\"Outer cv: \")\n print(f\" Val {metric.__name__} = {test_score}) - \", end='')\n print(f\"Val precision = {metrics.precision_score(y_test_cv, y_test_pred)}) - \", end='')\n print(f\"Val accuracy = {metrics.accuracy_score(y_test_cv, y_test_pred)} - \", end='')\n print(f\"Val matthews_corrcoef = {metrics.matthews_corrcoef(y_test_cv, y_test_pred)}) - \", end='')\n print(f\"Train {metric.__name__} = {train_score}) - \", end='')\n print(f\"Train precision = {metrics.precision_score(y_train_cv, y_train_pred)}) - \", end='')\n print(f\"Train matthews_corrcoef = {metrics.matthews_corrcoef(y_train_cv, y_train_pred)})\")\n\n # calculate the mean score over all K outer folds, and report as the generalization error\n global_test_score = np.mean(test_outer_results)\n global_test_std = np.std(test_outer_results)\n global_train_score = np.mean(train_outer_results)\n global_train_std = np.std(train_outer_results)\n print()\n print(f\"Global validation {metric.__name__} = {str(global_test_score)} ({str(global_test_std)})\")\n print(f\"Global training {metric.__name__} = {str(global_train_score)} ({str(global_train_std)})\")\n print(\"List of best hyperparameters to check stability: \")\n print(best_hyperparams)\n print()\n\n # simple cross validation to find the best model:\n print('>> Simple cross validation to find the best model...')\n _, groups = get_patient_kfold_split(\n X_train,\n y_train,\n data_info_path=data_path / 'info_train.csv',\n n_splits=params['cv']['n_inner_splits'])\n cv = KFold(n_splits=params['cv']['n_inner_splits'], shuffle=True, random_state=params['general']['random_state'])\n search = GridSearchCV(estimator=pipe, param_grid=param_grid, cv=cv, n_jobs=-1, scoring='recall',\n refit=True, verbose=2)\n\n search.fit(X_train, y_train, groups=groups)\n best_model = search.best_estimator_\n best_hyperparam = search.best_params_\n best_score = search.best_score_\n\n print('>> Predicting on test dataset...')\n y_pred = best_model.predict(X_test)\n final_test_score = metric(y_test, y_pred)\n print(f\"Final test {metric.__name__} = {final_test_score}\")\n\n test_scores = {}\n for metr in METRICS_skl:\n test_scores[metr.__name__] = metr(y_test, y_pred)\n\n # path to save results:\n experiment_descr = f\"{os.path.split(data_path)[1]}\"\n experiment_descr += f\"_{params['general']['use_features_images_only']}\"\n experiment_descr += f\"_{args.classification_method}\"\n experiment_descr += f\"_smote\"\n results_path = Path(paths.integration_classification_results_dir) / experiment_descr\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n\n # generate classification report:\n experiment_info = {}\n experiment_info['Data folder'] = str(data_path)\n experiment_info['Selected features'] = f'Image features, no gene features' if params['general']['use_features_images_only'] else 'All'\n experiment_info['PCA'] = f\"n. components={params['general']['num_principal_components']}\" if params['general']['num_principal_components'] else 'No'\n experiment_info['Classification method'] = str(args.classification_method)\n experiment_info['Class balancing method'] = 'SMOTE'\n experiment_info['Error estimation:'] = '---------------------------------------'\n experiment_info['Global validation score'] = f\"{metric.__name__}={str(global_test_score)} ({str(global_test_std)})\"\n experiment_info['Global train score'] = f\"{metric.__name__}={str(global_train_score)} ({str(global_train_std)})\"\n experiment_info['Test results:'] = '---------------------------------------'\n experiment_info['Best cv hyperparameter'] = f\"{'C' if args.classification_method == 'linearsvc' else 'alpha'}={best_hyperparam}\"\n experiment_info['Best cv score'] = f\"{metric.__name__}={best_score}\"\n experiment_info['Final test score'] = f\"{metric.__name__}={final_test_score}\"\n test_data_info_path = data_path / 'info_test.csv'\n classification_report_utils.generate_classification_report(results_path, y_test, y_pred, test_scores, experiment_info,\n test_data_info_path=test_data_info_path)\n\n # generate plots:\n classification_report_utils.generate_classification_plots(results_path, best_model, X_test, y_test, X_train, y_train)\n print('>> Done')","repo_name":"Sylelil/Bioinformatics_project","sub_path":"src/integration/classification_methods/shallow_classification.py","file_name":"shallow_classification.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30191886338","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n dict={}\n split_word=[]\n if(len(strs)==1):\n return [strs]\n else:\n for s in strs:\n temp = \"\".join(sorted(list(s)))\n if temp not in dict:\n dict[temp] = [s]\n else: \n dict[temp].append(s)\n #print(sorted(dict)) \n #result = [] \n # for value in dict.values():\n # result +=[value]\n # return result \n #print(split_word)\n #print(strs)\n \n \n","repo_name":"cindyjn46/leetcode","sub_path":"Group Anagrams.py","file_name":"Group Anagrams.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73443772993","text":"from confluent_kafka import Producer\nimport time\nimport random\nfrom faker import Faker\nfrom datetime import datetime\n\nfake = Faker()\n\n# 카프카 브로커 정보 설정\nbootstrap_servers = 'localhost:9092' # 카프카 브로커의 호스트와 포트를 설정해야 합니다.\n\n# 첫 번째 프로듀서 설정 (fakelog1 토픽으로 데이터 보내기)\nproducer_config1 = {\n 'bootstrap.servers': bootstrap_servers,\n 'queue.buffering.max.messages': 10000000, # 배치 크기 설정 (메시지 버퍼링)\n 'queue.buffering.max.ms': 10, # 배치를 보내는 최대 대기 시간 (밀리초)\n}\n\nproducer1 = Producer(producer_config1)\n\n# 두 번째 프로듀서 설정 (fakelog2 토픽으로 데이터 보내기)\n# producer_config2 = {\n# 'bootstrap.servers': bootstrap_servers,\n# 'queue.buffering.max.messages': 10000000, # 배치 크기 설정 (메시지 버퍼링)\n# 'queue.buffering.max.ms': 10, # 배치를 보내는 최대 대기 시간 (밀리초)\n# }\n\n# producer2 = Producer(producer_config2)\n\n# 첫 번째 토픽 이름 설정\ntopic_name1 = 'fakelog1'\n\n# 두 번째 토픽 이름 설정\n# topic_name2 = 'fakelog2'\n\n# TPS 측정을 위한 변수 초기화\nstart_time = time.time()\nmessage_count1 = 0\n# message_count2 = 0\n\ndef generate_log_data():\n current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n log_level = random.choice([\"INFO\", \"WARNING\", \"ERROR\"])\n log_source = random.choice([\"Administrator\", \"Client\", \"Host\"])\n random_integer = random.randint(10, 99)\n log_message = fake.sentence() \n return f\"[{current_time}][{log_level}][{log_source}][{random_integer}] {log_message}\"\n\n\ndef produce_data1():\n global message_count1\n for _ in range(1): # 120초 동안\n for i in range(3600000): \n log_data = generate_log_data()\n producer1.produce(topic_name1, key=str(i), value=log_data)\n message_count1 += 1\n\nfor _ in range(10):\n log_data = generate_log_data()\n print(log_data)\n# def produce_data2():\n# global message_count2\n# for _ in range(1): # 120초 동안\n# for i in range(1800, 3600): # 첫 번째 프로듀서와 중복되지 않는 범위\n# log_data = generate_log_data()\n# producer2.produce(topic_name2, key=str(i), value=log_data)\n# message_count2 += 1\n\n# 두 프로듀서 함수 호출\nproduce_data1()\n# produce_data2()\n\n# 모든 메시지를 보내고 대기\nproducer1.flush()\n# producer2.flush()\n\n# TPS 측정\nend_time = time.time()\nelapsed_time = end_time - start_time\ntps1 = message_count1 / elapsed_time\n# tps2 = message_count2 / elapsed_time\n\nprint(f\"Total Messages Produced (Producer 1): {message_count1}\")\n# print(f\"Total Messages Produced (Producer 2): {message_count2}\")\nprint(f\"Total Time Taken: {elapsed_time} seconds\")\nprint(f\"TPS (Producer 1): {tps1} messages/second\")\n# print(f\"TPS (Producer 2): {tps2} messages/second\")","repo_name":"pjh2328/playground","sub_path":"VM_test/k8s-cluster/test/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14247244896","text":"import traceback\nfrom random import randrange\n\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models import Q\n\nfrom iget.settings import EMAIL_FROM\nfrom users.models import EmailVerification\n\n\ndef create_random_code(code_length):\n code_source = '2134567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM'\n code = ''\n for i in range(code_length):\n e = code_source[randrange(0, len(code_source))]\n code += e\n return code\n\n\ndef send_email_code(email, verify_type):\n \"\"\"发送邮箱验证:安全问题——可以随便伪造验证码。。。。\n 问题:在发送的html格式的邮件中, text_content的内容并不会出现在邮箱正文中, 该如何处理text_content?\n \"\"\"\n try:\n code = create_random_code(randrange(5, 9))\n subject, from_email, to_list = '', '', []\n text_content, html_content = '', ''\n if verify_type == -1:\n return False\n elif verify_type == 0:\n # 发送html格式邮件\n mail_list = [email, ]\n subject, from_email, to_list = '账号激活', EMAIL_FROM, mail_list\n text_content = '请点击激活链接完成账号注册:'\n html_content = '

请点击激活链接完成账号注册,链接在5分钟后或者完成相关操作后失效:' \\\n '激活账号

'\n elif verify_type == 1:\n mail_list = [email, ]\n subject, from_email, to_list = '重置密码', EMAIL_FROM, mail_list\n text_content = '请点击链接以重置密码:'\n html_content = '

请点击链接以重置密码,链接在5分钟后或者完成相关操作后失效:' \\\n '重置密码

'\n elif verify_type == 2:\n mail_list = [email, ]\n subject, from_email, to_list = '修改密码', EMAIL_FROM, mail_list\n text_content = '您好,请查收您的验证码:'\n html_content = '
这是您本次操作的验证码,验证码将在5分钟后或者完成相关操作后失效:' \\\n '

' + code + '

'\n elif verify_type == 3:\n mail_list = [email, ]\n subject, from_email, to_list = '换绑邮箱', EMAIL_FROM, mail_list\n text_content = '您好,请查收您的验证码:'\n html_content = '
这是您本次操作的验证码,验证码将在5分钟后或者完成相关操作后失效:' \\\n '

' + code + '

'\n else:\n return False\n\n # 前四个参数是必填项\n html_email = EmailMultiAlternatives(subject, text_content, from_email, to_list)\n html_email.attach_alternative(html_content, 'text/html')\n result = html_email.send()\n\n if result == 1:\n verify = EmailVerification()\n verify.to_email = email\n verify.verify_type = verify_type\n verify.code = code\n verify.save()\n _invalidate_code(email=email, code_type=verify_type, except_code=code)\n else:\n raise Exception\n except Exception:\n traceback.print_exc()\n return False\n else:\n return True\n\n # 发送纯文本邮件:\n # send_mail(\n # subject=subject,\n # message=message,\n # from_email=EMAIL_FROM,\n # recipient_list=mail_list,\n # fail_silently=False,\n # auth_user=EMAIL_HOST_USER,\n # auth_password=EMAIL_HOST_PASSWORD,\n # connection=None,\n # )\n\n\ndef _invalidate_code(email, code_type, except_code):\n codes = EmailVerification.objects.filter(\n Q(to_email=email) & Q(verify_type=code_type) & Q(is_valid=True) & ~Q(code=except_code)\n )\n for code in codes:\n code.is_valid = False\n code.save()\n","repo_name":"log-e-e/iget","sub_path":"tools/send_email_tool.py","file_name":"send_email_tool.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16454798907","text":"import pymel.core as pm\nimport metarig\nimport miscutil\n\ndef create_core_metanode_graph(root_joint, type='character', name = None, object_id = None, base_dir = None, source_file_path = None):\n #check root joint\n #check for current meta_root\n meta_root = metarig.MetaRoot.create(root_joint, type, name, object_id, base_dir, source_file_path)\n #check for metaanimrender\n metarender = metarig.MetaAnimRender.create(meta_root)\n metarender.set_metanode_parent(meta_root)\n \n return metaroot\n\ndef get_metaroot_from_root_joint(root_joint):\n return root_joint.listConnections(s=1, d=0, type='network')[0]\n\ndef get_metaroot(node):\n metaroot = None\n metanode = None\n if not node and not pm.ls(sl=1):\n pm.error('get_metaroot: select a node on the object hierarchy.')\n elif not node:\n node = pm.ls(sl=1)[0]\n \n if node.hasAttr('meta_type'):\n if node.meta_type.get() == 'MetaRoot':\n return node\n else:\n metanode = node\n \n if metanode is None:\n all_parents = pm.listConnections(node, type='network')\n for parent in all_parents:\n if parent.hasAttr('meta_type') and parent.meta_type.get() == 'MetaRoot':\n return parent\n meta_parent = get_meta_parent(parent)\n if meta_parent != None:\n metanode = meta_parent\n\n if metanode:\n if metanode.hasAttr('meta_type') and metanode.meta_type.get() == 'MetaRoot':\n return metanode\n \n count = 0\n type = metanode.meta_type.get()\n parent = get_meta_parent(metanode)\n while (type != 'MetaRoot') and (not parent) and (count < 100):\n if (metanode.meta_type.get() == 'MetaRoot'):\n return metanode\n parent = get_meta_parent(parent)\n if not parent:\n return None\n metanode = parent\n count += 1\n if (parent.meta_type.get() == 'MetaRoot'):\n return parent\n else:\n return None\n return metaroot\n \ndef get_all_metaroots():\n metaroots = []\n nodes = pm.ls(type='network')\n for node in nodes:\n if node.hasAttr('meta_type'):\n if node.meta_type.get() == 'MetaRoot':\n metaroots.append(node)\n return metaroots\n \ndef get_meta_parent(node):\n if not pm.objExists(node):\n pm.error('get_meta_parent: no such node {0}'.format(node))\n if not node.hasAttr('meta_parent'):\n return None\n \n meta_parent = pm.listConnections(node+'.meta_parent', s=1, d=0)[0]\n if not meta_parent.hasAttr('meta_type'):\n return None\n \n return meta_parent\n \n \ndef find_pv_location(start_joint, middle_joint, end_joint):\n '''finds the pole vector position based from three joints'''\n start_joint = pm.PyNode(start_joint)\n middle_joint = pm.PyNode(middle_joint)\n end_joint = pm.PyNode(end_joint)\n \n start_pos = pm.datatypes.Vector(start_joint.getTranslation(space = 'world'))\n mid_pos = pm.datatypes.Vector(middle_joint.getTranslation(space = 'world'))\n end_pos = pm.datatypes.Vector(end_joint.getTranslation(space = 'world'))\t\n \n a_vector = end_pos - start_pos\n b_vector = mid_pos - start_pos\n a_magnitude = a_vector.length()\n b_magnitude = b_vector.length()\n dot_product = a_vector.dot(b_vector)\n \n projection_point = (dot_product / a_magnitude) * (a_vector / a_magnitude)\n projection_point = projection_point + start_pos\n \n ortho_vector = mid_pos - projection_point\n ortho_magnitude = ortho_vector.length()\n away_scale = 1.5 \n final_scale= (max(a_magnitude, b_magnitude) * away_scale / ortho_magnitude)\n final_point = ortho_vector * final_scale + projection_point\n return pm.datatypes.Point(final_point)\n\ndef find_pv_location_through_mid(start_joint, middle_joint, end_joint):\n '''finds the pole vector position based from three joints'''\n start_pos = pm.datatypes.Vector(pm.xform(start_joint, q=True, ws=True, t=True))\n mid_pos\t = pm.datatypes.Vector(pm.xform(middle_joint, q=True, ws=True, t=True))\n end_pos\t = pm.datatypes.Vector(pm.xform(end_joint, q=True, ws=True, t=True))\n\n mid_vector\t= (start_pos - end_pos) / 2\n pv_mid_pos\t= start_pos - mid_vector\n pv_vector\t= mid_pos - pv_mid_pos\n\n scale_value = ((mid_pos - start_pos).length() + (end_pos - mid_pos).length()) * 1.3\n pv_loc = pv_mid_pos + pv_vector + pv_vector.normal() * scale_value\n return pm.datatypes.Point(pv_loc)\n\n \ndef create_locator_at_point(point, label=None):\n point = pm.datatypes.Point(point)\n locator = pm.spaceLocator(p=[0,0,0])\n if label:\n locator.rename(label)\n \n locator.translate.set(point)\n return locator\t\n \ndef create_locator_at_object(object, label = None):\n if object == None or not pm.objExists(object):\n locator = pm.spaceLocator(p = [0, 0, 0])\n if label:\n locator.rename(label)\n else:\n object = pm.PyNode(object)\n locator = pm.spaceLocator(p = [0, 0, 0])\n \n if label:\n locator.rename(label)\n \n else:\n locator.rename(object.name() + \"_loc\")\n \n if object.hasAttr('rotateOrder'):\n locator.rotateOrder.set(object.rotateOrder.get())\n miscutil.align(object, locator)\n return locator\n \ndef create_line_between(object1, object2):\n '''draws a referenced line between two objects'''\n object1 = pm.PyNode(object1)\n object2 = pm.PyNode(object2)\n pos1 = object1.getTranslation(space = 'world')\n pos2 = object2.getTranslation(space = 'world')\n line = pm.curve(d = 1, p = [pos1, pos2], k = [0, 1])#linear curve\n obj1_cluster = pm.cluster(line.cv[0])[1]\n obj2_cluster = pm.cluster(line.cv[1])[1]\n pm.parent([obj1_cluster, obj2_cluster], line)\n line.inheritsTransform.set(0)\n obj1_cluster.inheritsTransform.set(0)\n obj2_cluster.inheritsTransform.set(0)\n obj1_cluster.hide()\n obj2_cluster.hide()\n pm.pointConstraint(object1, obj1_cluster, w = 1, mo = 1)\n pm.pointConstraint(object2, obj2_cluster, w = 1, mo = 1)\n line_shape = pm.PyNode(line).getShape()\n line_shape.overrideEnabled.set(1)\n line_shape.overrideDisplayType.set(1)#template\n line_shape.overrideColor.set(3) #set to grey color\n return line\n \ndef add_zero_transform(node):\n node = pm.PyNode(node)\n result = None\n if has_zero_transform(node):\n result = get_zero_transform(node)\n else:\n transform = pm.group(empty = True, n = miscutil.get_name_root(node.name()) + \"_zero_transform\")\n transform.rotateOrder.set(node.rotateOrder.get())\n miscutil.align(node, transform)\n node_parent = node.getParent()\n if node_parent:\n transform.setParent(node_parent)\n node.setParent(transform)\n if isinstance(node, pm.nt.Joint):\n node.jointOrient.set([0,0,0])\n node.addAttr('zero_transform', at = 'message')\n transform.addAttr('zero_object', dt = 'string')\n transform.zero_object >> node.zero_transform\n result = transform\n return result\n \ndef has_zero_transform(node):\n node = pm.PyNode(node)\n result = False\n if node.hasAttr('zero_transform'):\n if node.zero_transform.listConnections():\n result = True\n else:\n raise StandardError('has_zero_transform: ' + str(node) + ' has zero_transform attr with no connection.')\n return result\n \ndef get_zero_transform(node):\n node = pm.PyNode(node)\n if has_zero_transform(node):\n return node.zero_transform.listConnections()[0]\n return None\n\ndef create_box_at_object(object, width = 1, ratio = None, down_axis = None,):\n object = pm.PyNode(object)\n \n if not down_axis:\n down_axis = get_down_axis(object)\n \n #create base cube\n cube = pm.polyCube(w=1, h=1, d=1, sx=1, sy=1, sz=1, ax=(0,1,0), cuv=4, ch=0)[0]#default down axis of cube is \"y\"\n cube.rotateOrder.set(object.rotateOrder.get())\n cube.translateY.set(0.5)\n pm.xform(cube, worldSpace=True, pivots=(0, 0, 0))\n pm.makeIdentity(cube, apply=True, t=1, r=1, s=1, n=0)\n #rename cube\n name_root = pm.PyNode(object).stripNamespace()\n if name_root[:2] == \"b_\":\tname_root = name_root[2:] # ###a-jjones $HACK seems hacky, special case handling on low level cmd\n cube = pm.rename(cube, (name_root + '_box'))\n #align cube\n miscutil.align(object, cube)\n cube.setParent(object)\n #make \"y\" down axis of cube, match down axis of object\n if down_axis == 'y': \tpass\n if down_axis == 'x':\tcube.rotate.set(0, 0, -90)\n if down_axis == 'z':\tcube.rotate.set(90, 0, 0)\n if down_axis == '-y':\tcube.rotate.set(180, 0, 0)\n if down_axis == '-x':\tcube.rotate.set(0, 0, 90)\n if down_axis == '-z':\tcube.rotate.set(-90, 0, 0)\n #scale cube\n children = object.getChildren(type = \"joint\")\n dist = miscutil.distance_between(object, children[0])\n cube.scale.set(width, dist, width)\n if ratio:\n cube.scale.set(dist*ratio, dist, dist*ratio)\n #finalize cube\n pm.makeIdentity(cube, apply=True, r=1, s=1)\n pm.parent(cube, world=True)\n \n return cube\n \ndef get_down_axis(node):\n node = pm.PyNode(node)\n children = node.getChildren()\n best_axis = None\n best_axis_dist = 0\n axes = [\"x\", \"y\", \"z\"]\n for child in children:\n if isinstance(child, pm.nodetypes.Transform):\n trans = child.translate.get()\n for inc, axis in enumerate(axes):\n if abs(trans[inc]) > abs(best_axis_dist):\n best_axis = axis\n best_axis_dist = trans[inc]\n \n if not best_axis:\n best_axis = \"x\"\n \n if best_axis_dist < 0:\n best_axis = \"-\" + best_axis\n \n return best_axis\n \ndef inherit_fbik_attrs(from_node, to_node):\n\tfrom_node = pm.PyNode(from_node)\n\tto_node = pm.PyNode(to_node)\n\tside = to_node.setAttr('side', from_node.getAttr('side'))\n\ttype = to_node.setAttr('type', from_node.getAttr('type'))\n\tother_type = to_node.setAttr('otherType', from_node.getAttr('otherType'))","repo_name":"deathglitch/metarigging","sub_path":"python/metautil/rigutil.py","file_name":"rigutil.py","file_ext":"py","file_size_in_byte":10111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39005770993","text":"# Initialize the pygame libriry\nimport pygame\nfrom pygame import mixer\nimport math\nimport os, os.path\npygame.init()\n\nclock = pygame.time.Clock()\n\n# Background music\nmixer.music.load(os.path.join('Sound', 'background_music.wav'))\nmixer.music.play(-1)\n\nxCoordinate = 0\nyCoordinate = 330\nplayerWidth = 64\nplayerHeight = 64\nvelocity = 6\nisJump = False\njumpCount = 10\nleft = False\nright = True\nwalkCount = 0\nscreenLength = 800\nscreenBreadth = 600\nbulletPos = 0\nshoot = False\ndirection = True\nenemy = False\nenemyWalk = 0\nenemyRelease = False\nenemyX = 0\nsign = 1\npoint = 0\nman = 0\ngameOver = False\ncall = 5\nfont = pygame.font.Font('freesansbold.ttf', 25)\n\nmanHealth = enemyHealth = 100\n\nscreen = pygame.display.set_mode((screenLength, screenBreadth))\npygame.display.set_caption('Fighter Game')\n\n\n\nenemyRight = [pygame.image.load(os.path.join('Images', 'enemy', 'ER1.png')), pygame.image.load(os.path.join('Images', 'enemy', 'ER2.png')), pygame.image.load(os.path.join('Images', 'enemy', 'ER3.png')), pygame.image.load(os.path.join('Images', 'enemy', 'ER4.png')), pygame.image.load(os.path.join('Images', 'enemy', 'ER5.png')), pygame.image.load(os.path.join('Images', 'enemy', 'ER6.png')), pygame.image.load(os.path.join('Images', 'enemy', 'ER7.png'))]\nenemyLeft = [pygame.image.load(os.path.join('Images', 'enemy', 'EL1.png')), pygame.image.load(os.path.join('Images', 'enemy', 'EL2.png')), pygame.image.load(os.path.join('Images', 'enemy', 'EL3.png')), pygame.image.load(os.path.join('Images', 'enemy', 'EL4.png')), pygame.image.load(os.path.join('Images', 'enemy', 'EL5.png')), pygame.image.load(os.path.join('Images', 'enemy', 'EL6.png')), pygame.image.load(os.path.join('Images', 'enemy', 'EL7.png'))]\nwalkRight = [pygame.image.load(os.path.join('Images', 'player', 'R0.png')), pygame.image.load(os.path.join('Images', 'player', 'R1.png')), pygame.image.load(os.path.join('Images', 'player', 'R2.png')), pygame.image.load(os.path.join('Images', 'player', 'R3.png')), pygame.image.load(os.path.join('Images', 'player', 'R4.png')), pygame.image.load(os.path.join('Images', 'player', 'R5.png')), pygame.image.load(os.path.join('Images', 'player', 'R6.png')), pygame.image.load(os.path.join('Images', 'player', 'R7.png')), pygame.image.load(os.path.join('Images', 'player', 'R8.png')), pygame.image.load(os.path.join('Images', 'player', 'R9.png')), pygame.image.load(os.path.join('Images', 'player', 'R10.png'))]\nwalkLeft = [pygame.image.load(os.path.join('Images', 'player', 'L0.png')), pygame.image.load(os.path.join('Images', 'player', 'L1.png')), pygame.image.load(os.path.join('Images', 'player', 'L2.png')), pygame.image.load(os.path.join('Images', 'player', 'L3.png')), pygame.image.load(os.path.join('Images', 'player', 'L4.png')), pygame.image.load(os.path.join('Images', 'player', 'L5.png')), pygame.image.load(os.path.join('Images', 'player', 'L6.png')), pygame.image.load(os.path.join('Images', 'player', 'L7.png')), pygame.image.load(os.path.join('Images', 'player', 'L8.png')), pygame.image.load(os.path.join('Images', 'player', 'L9.png')), pygame.image.load(os.path.join('Images', 'player', 'L10.png'))]\nbackground = pygame.image.load(os.path.join('Images', 'background', 'background.png'))\narrowR = pygame.image.load(os.path.join('Images', 'arrow', 'AR.png')) \narrowL = pygame.image.load(os.path.join('Images', 'arrow', 'AL.png'))\n\ndef arrowShoot(direction):\n\n global bulletPos\n if bulletPos < screenLength and bulletPos > 0: \n if direction == False:\n var = -1\n arrow = arrowL\n if direction == True:\n var = 1\n arrow = arrowR\n bulletPos += 35 * var\n screen.blit(arrow, (bulletPos, 350)) \n\n else:\n shoot = False\n return\n \n\ndef enemy(sign):\n global enemyWalk \n global enemyX\n enemyWalk += 1\n enemyX = enemyX - (5 * sign)\n if enemyWalk == 21:\n attackSound = mixer.Sound(os.path.join('Sound', 'enemy_attack.wav'))\n attackSound.play()\n enemyWalk = 0\n if sign == 1:\n screen.blit(enemyLeft[enemyWalk // 3], (enemyX, 270))\n else:\n screen.blit(enemyRight[enemyWalk // 3], (enemyX, 270))\n\n\ndef isArrowCollision(bulletPos, enemyX):\n # print('bullet ' + str(bulletPos))\n # print('enemy ' + str(enemyX))\n arrowDistance = int(math.sqrt((math.pow(bulletPos - enemyX, 2)) + (math.pow(350 - 270, 2))))\n # print('arrowDistance ' + str(arrowDistance))\n if arrowDistance < 100 and arrowDistance > 90:\n return True\n\n\ndef isEnemyCollision(xCoordinate, enemyX):\n enemyDistance = int(math.sqrt((math.pow(enemyX - xCoordinate, 2)) + (math.pow(350 - 270, 2))))\n # print(enemyDistance)\n # print(xCoordinate)\n if enemyDistance > 90 and enemyDistance <= 92 and yCoordinate == 330:\n return True\n \ndef scoreDisplay(manHealth, enemyHealth):\n if manHealth < 0:\n manHealth = 0\n elif enemyHealth < 0:\n enemyHealth = 0\n manScore = font.render('PLAYER health: ' + str(manHealth), True, (255, 0, 0))\n enemyScore = font.render('ENEMY health: ' + str(enemyHealth), True, (0, 0, 0))\n screen.blit(manScore, (10, 30))\n screen.blit(enemyScore, (550, 30))\n\n\ndef gameOverText(call):\n # call == 1 then enemy health is zero\n # call == 0 then man's health is zero\n if call == 1:\n fontNew = pygame.font.Font('freesansbold.ttf', 50)\n over = fontNew.render('GAME OVER', True, (0, 0, 0))\n screen.blit(over, (240, 130))\n fontMan = pygame.font.Font('freesansbold.ttf', 30)\n manWins = fontMan.render('YOU WON', True, (0, 0, 0))\n screen.blit(manWins, (335, 200))\n screen.blit(walkRight[0], (335, yCoordinate))\n elif call == 0:\n fontNew = pygame.font.Font('freesansbold.ttf', 50)\n over = fontNew.render('GAME OVER', True, (0, 0, 0))\n screen.blit(over, (240, 130))\n fontEnemy = pygame.font.Font('freesansbold.ttf', 30)\n enemyWins = fontEnemy.render('ENEMY WINS', True, (0, 0, 0)) \n screen.blit(enemyWins, (290, 200))\n screen.blit(enemyLeft[0], (290, 270))\n\n# Run until the user presses the close button or Escape key\nrunning = True\nwhile running:\n\n screen.blit(background, (0, 0))\n keys = pygame.key.get_pressed()\n # Check if the user has pressed the quit button\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False \n if not (gameOver):\n if enemyWalk + 1 >= 30:\n enemyWalk = 0\n \n if walkCount + 1 >= 30:\n walkCount = 0\n\n if left:\n screen.blit(walkLeft[walkCount // 3], (xCoordinate, yCoordinate))\n walkCount += 1\n elif right:\n screen.blit(walkRight[walkCount // 3], (xCoordinate, yCoordinate))\n walkCount += 1\n else:\n if left:\n screen.blit(walkLeft[0], (xCoordinate, yCoordinate))\n else:\n screen.blit(walkRight[0], (xCoordinate, yCoordinate))\n \n # Checks if arrow keys are pressed and moves in that direction\n \n \n if keys[pygame.K_LSHIFT]:\n arrowSound = mixer.Sound(os.path.join('Sound', 'arrow_shoot.wav'))\n arrowSound.play()\n bulletPos = xCoordinate\n if right:\n direction = True\n else:\n direction = False\n shoot = True\n if shoot:\n arrowShoot(direction)\n\n if keys[pygame.K_UP]:\n enemyRelease = True\n enemyX = 700\n pygame.mixer.music.stop()\n bossMusic = mixer.Sound(os.path.join('Sound', 'boss_music.wav'))\n bossMusic.play()\n if enemyRelease:\n if enemyX > 700:\n sign = 1\n elif enemyX < 0:\n sign = -1\n enemy(sign)\n\n if keys[pygame.K_ESCAPE]:\n running = False\n if keys[pygame.K_SPACE]:\n isJump = True\n jumpSound = mixer.Sound(os.path.join('Sound', 'jump.wav'))\n jumpSound.play()\n if keys[pygame.K_LEFT] and xCoordinate > 0:\n xCoordinate -= velocity\n left = True\n right = False\n # direction = False\n elif keys[pygame.K_RIGHT] and xCoordinate < screenLength - playerWidth:\n xCoordinate += velocity\n left = False\n right = True\n # direction = True\n else:\n # left = False\n # right = False\n walkCount = 0\n\n arrowCollision = isArrowCollision(bulletPos, enemyX)\n if arrowCollision:\n enemyHealth -= 5\n bulletPos = 800\n # print(point)\n if enemyHealth == 0:\n call = 1\n gameOver = True\n\n\n enemyCollision = isEnemyCollision(xCoordinate, enemyX)\n if enemyCollision:\n manHealth -= 20\n if manHealth == 0:\n call = 0\n gameOver = True\n\n \n # if manHealth > 0 and enemyHealth > 0:\n scoreDisplay(manHealth, enemyHealth)\n\n gameOverText(call)\n \n if isJump:\n\n if jumpCount >= -10:\n pos = 1\n if jumpCount < 0:\n pos = -1 \n yCoordinate -= int((jumpCount ** 2) * 0.5 * pos)\n jumpCount -= 1\n else:\n isJump = False\n jumpCount = 10\n \n\n pygame.display.update()\n\n\npygame.quit()\n","repo_name":"ShervinAmbrose/Man-vs-Goblin","sub_path":"man_vs_goblin.py","file_name":"man_vs_goblin.py","file_ext":"py","file_size_in_byte":9265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42780575297","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"base\"\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('login', views.loginUser, name=\"login\"),\n path('logout', views.logoutUser, name=\"logout\"),\n path('register/student', views.registerStudent, name=\"register-student\"),\n path('register/teacher', views.registerTeacher, name=\"register-teacher\"),\n]","repo_name":"TheCodingComposer/InPractice","sub_path":"base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10784397203","text":"from urllib.parse import unquote\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views import View\nfrom django.views.generic import TemplateView\n\nfrom core.forms import SearchForm\nfrom core.models import Language, WordWithTranslation, Import\nfrom wiktionary.search import get_word_information\n\n\ndef get_languages_config(session, query_dict=None):\n result = {\"from\": session.get(\"from\", \"en\"),\n \"to\": session.get(\"to\", None)\n }\n\n # if there is a query_dict it has higher priouty\n\n if query_dict is not None:\n if \"from\" in query_dict:\n result[\"from\"] = query_dict[\"from\"]\n\n if \"to\" in query_dict:\n result[\"to\"] = query_dict[\"to\"]\n\n return result\n\n\nclass Homepage(TemplateView):\n template_name = \"kamus/homepage.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"search\"] = SearchForm(initial=get_languages_config(self.request.session, self.request.GET))\n\n return context\n\n\nclass About(TemplateView):\n template_name = \"kamus/about.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"active_page\"] = \"about\"\n return context\n\nclass Shortcuts(TemplateView):\n template_name = \"kamus/shortcuts.html\"\n\nclass Imports(TemplateView):\n template_name = \"kamus/imports.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n imports = []\n for language_dict in Import.objects.values(\"language__name\").distinct():\n language_name = language_dict[\"language__name\"]\n latest_import = Import.objects.filter(language__name=language_name).order_by(\"-imported_on\").first()\n\n imports.append({\"language_name\": language_name,\n \"file_created_on\": latest_import.file_created_on,\n \"translated_words\": latest_import.translated_words})\n\n context[\"imports\"] = imports\n\n return context\n\n\nclass Translate(View):\n def _add_urls(self, senses, key):\n for sense in senses[key]:\n if \"see\" in sense:\n for sense_information in sense[\"see\"]:\n sense_information[\"url\"] = self._link_to_word(sense_information[\"word\"])\n\n def _link_to_word(self, word):\n url_parameters = self.request.GET.copy()\n url_parameters[\"word\"] = word\n\n return reverse(\"translate\") + \"?\" + url_parameters.urlencode()\n\n def get(self, request, *args, **kwargs):\n search_form = SearchForm(self.request.GET)\n\n is_valid = search_form.is_valid()\n\n if is_valid == False:\n if \"to\" in search_form.errors:\n if \"to\" in search_form.data:\n messages.error(self.request, f'Language \"{search_form.data[\"to\"]}\" is not available or supported')\n else:\n messages.error(self.request, \"Invalid parameters - please try again or get in touch\")\n return redirect(\"homepage\")\n\n if isinstance(search_form.cleaned_data[\"word\"], WordWithTranslation):\n word = search_form.cleaned_data[\"word\"].word\n else:\n # It didn't come from the autocomplete box\n word = search_form.cleaned_data[\"word\"]\n\n from_language = search_form.cleaned_data[\"from\"]\n to_language = search_form.cleaned_data[\"to\"].code\n\n self.request.session[\"from\"] = from_language\n self.request.session[\"to\"] = to_language\n self.request.session.save()\n\n context = {}\n context[\"word\"] = word\n try:\n context[\"translations\"] = get_word_information(from_language, to_language, word)\n except NotImplementedError:\n messages.error(self.request, f'The language \"{from_language}\" is not implemented in Kamus')\n return redirect(reverse(\"homepage\"))\n\n url_parameters_without_word = self.request.GET.copy()\n del url_parameters_without_word[\"word\"]\n\n context[\"base_link_to_word\"] = \"?\" + url_parameters_without_word.urlencode()\n context[\"search\"] = SearchForm(initial={\"from\": from_language, \"to\": to_language})\n\n new_sources = []\n for source in context[\"translations\"][\"sources\"]:\n source_decoded = unquote(source)\n\n new_sources.append({\"url\": source, \"url_decoded\": source_decoded})\n\n context[\"translations\"][\"sources\"] = new_sources\n\n self._add_urls(context[\"translations\"], \"translated_senses\")\n self._add_urls(context[\"translations\"], \"non_translated_senses\")\n\n return render(self.request, \"kamus/translation.html\", context)\n","repo_name":"cpina/kamus-dictionary","sub_path":"kamus/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13009462976","text":"from calibre.gui2.actions import InterfaceAction\nfrom calibre_plugins.send_to_kindle import SendToKindle\nfrom calibre_plugins.send_to_kindle.main import ProcessDialog, pop_alert\n\n\nload_translations()\n\n\nclass InterfacePlugin(InterfaceAction):\n name = SendToKindle.name\n action_spec = (\n _(name), None, _('Send ebooks to Kindle via email'), None)\n\n def genesis(self):\n try:\n icon = get_icons('images/icon.png', _(self.name))\n except Exception:\n icon = get_icons('images/icon.png')\n\n self.qaction.setIcon(icon)\n self.qaction.triggered.connect(self.show_dialog)\n\n def show_dialog(self):\n ebooks = self.get_selected_ebooks()\n\n if len(ebooks) < 1:\n pop_alert(_('You must choose at least one ebook.'))\n return\n\n window = ProcessDialog(self.gui, self.qaction.icon(), ebooks)\n window.setModal(True)\n window.setMinimumWidth(500)\n window.setMinimumHeight(420)\n window.setWindowTitle(\n '%s - %s' % (_(self.name), SendToKindle.__version__))\n window.setWindowIcon(self.qaction.icon())\n window.show()\n\n def get_selected_ebooks(self):\n ebooks = {}\n db = self.gui.current_db\n api = db.new_api\n rows = self.gui.library_view.selectionModel().selectedRows()\n model = self.gui.library_view.model()\n for index, row in enumerate(rows):\n row_number = row.row()\n ebook_id = model.id(row)\n formats = api.formats(ebook_id)\n title = model.title(row_number)\n ebooks[index] = [ebook_id, title, title]\n return ebooks\n","repo_name":"bookfere/Send-to-Kindle-Calibre-Plugin","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"61"} +{"seq_id":"23609147111","text":"import math,sys\r\nfin = open('B-large.in','r')\r\nsys.stdout = open('B-large.out','w')\r\n \r\nC = int(fin.readline())\r\nfor c in range(1,C+1):\r\n N,K,B,T = [int(q) for q in fin.readline().split(' ')]\r\n x = [int(q) for q in fin.readline().split(' ')]\r\n v = [int(q) for q in fin.readline().split(' ')]\r\n time = [(B-x[i])/v[i] for i in range(0,N)]\r\n k = 0\r\n count = 0\r\n for i in range(N-1,-1,-1):\r\n j = i\r\n while j > -1 and time[j] > T: j-=1\r\n if j == -1: break\r\n if not j == i:\r\n temp = time[j]\r\n del time[j]\r\n time.insert(i,temp)\r\n count += i-j\r\n k+=1\r\n if k == K: break\r\n if k == K: print('Case #%d: %d' % (c,count))\r\n else: print('Case #%d: IMPOSSIBLE' % c)\r\nfin.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_60/101.py","file_name":"101.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23551583801","text":"def solve(N):\n \"\"\" solve the problem \"\"\"\n\n for i in range(len(N)-1, 0, -1):\n if N[i-1] > N[i]:\n N = str(int(N[:i])-1) + '9'*(len(N)-i)\n\n N = N.lstrip('0')\n\n return N\n\n\ndef parse():\n \"\"\" parse input \"\"\"\n\n N = input()\n\n return N\n\n\ndef main():\n \n T = int(input())\n\n # solve\n for t in range(1, T+1):\n params = parse()\n result = solve(params)\n print('Case #%d: %s' % (t, result))\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1093.py","file_name":"1093.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9464949575","text":"from glob import glob\nimport sys\nfrom typing import Optional\nfrom warnings import warn\nfrom .nu_utils import (\n np,\n plt,\n os,\n create_dir,\n fits,\n run_command,\n start_stop,\n)\n\nmodules = ['A','B']\n\n\n\n### NuSTAR utilities functions\n\ndef PIfromE(E):\n \"\"\"\n PI channel number from energy\n \"\"\"\n return (E - 1.6) / 0.04\n\n\ndef EfromPI(PI):\n \"\"\"\n energy from PI channel number\n \"\"\"\n return PI * 0.04 + 1.6\n\n\ndef scan_phase_resolved_products(prodpath:str = 'phase_resolved',n_phase: int=10):\n \"\"\"\n scan_phase_resolved_products for a current path to phase resolved path, it creates a list of spectra and lightcurves which one can iterate over every FMP and phase bin\n used in obtaining phase-resolved spectra and lightcurves\n Args:\n n_phase (int): number of phase bins\n prodpath (str, optional): product path for phase resolved stuff. Defaults to 'phase_resolved'.\n\n Returns:\n Tuple: list of light curves (original, bary and orbitally corrected), list of spectra and a function to return list of any combination in form bla-bla-bla_A_binX_sr.format\n \"\"\"\n binnum = np.arange(1, n_phase + 1)\n\n def propname(modes, bin, postfix):\n if len(modes) == 1:\n return f\"{prodpath}_bin{bin}{modes[0]}_sr.{postfix}\"\n else:\n return [f\"{prodpath}_bin{bin}{mode}_sr.{postfix}\" for mode in modes]\n\n def propname_per_bin(modes, postfix):\n return [propname(modes, bin, postfix) for bin in binnum]\n\n lclist = propname_per_bin([\"A\", \"B\"], \"lc\")\n lclist_bary = propname_per_bin([\"A\", \"B\"], \"lc_bary\")\n\n spelist = propname_per_bin([\"A\", \"B\"], \"pha\")\n\n return lclist, lclist_bary, spelist, propname_per_bin\n\n\nclass NustarObservation:\n def __init__(self, ObsID: str, nu_path: str):\n \"\"\"\n __init__ initialize observation analysis: create directories for products and assign directory for data\n\n Args:\n ObsID (str): ObsID of NuSTAR observation\n nu_path (str): path to nustar data of a particular source (e.g. in my case /Users/sdbykov/work/xray_pulsars/groj2058_nu/)\n \"\"\"\n\n print(\"###\")\n print(f\"Observation {ObsID} loaded successfully\")\n self.ObsID = ObsID\n self.data_path = nu_path + \"nustar_data/\" + ObsID + \"/\"\n os.chdir(nu_path + \"nustar_products/\")\n\n create_dir(\"out\" + self.ObsID)\n os.chdir(\"out\" + ObsID)\n out_path = os.getcwd()\n self.out_path = out_path\n create_dir(\"products\")\n os.chdir(\"products\")\n self.products_path = os.getcwd()\n\n\n ### NUPIPELINE ###\n\n def nupipeline(self, ObsList_bright: list):\n \"\"\"\n nupipeline creates a command fo nupipeline of observation. if ObsID is in list ObsList_brights, sets status expression as recommendeed for bright sources\n\n Args:\n ObsList_bright (List): list of observations when source was bright (>100 cts/s, see NuSTAR tutorial). You can assess it by looking at light curve pre-peoduced by NusTAR team in event_cl folder of observation: from data folder run $ open */*/*A*lc* an check light curves by eye\n\n \"\"\"\n\n os.chdir(self.out_path)\n if len(glob(\"*A01_cl.evt\")) != 0:\n raise Exception(\"nupipeline has already been launched!\")\n\n nupipeline = f\"\"\"nupipeline indir={self.data_path} steminputs=nu{self.ObsID} outdir={self.out_path} obsmode=SCIENCE createexpomap=yes\"\"\"\n if self.ObsID in ObsList_bright:\n nupipeline += ' statusexpr=\"STATUS==b0000xxx00xxxx000\"'\n run_command(cmd=nupipeline, cmd_name=\"nupipeline\", rewrite=True)\n\n def make_regions(self):\n \"\"\"\n make_regions creates a ds9 script which plots images from modules and you can easily set and check region positions for source and background estimation.\n Regions should be set by hand!!!\n \"\"\"\n\n ds9_set = f\"ds9 -tile nu{self.ObsID}A01_cl.evt -scale log -tile nu{self.ObsID}B01_cl.evt -scale log -lock scale\"\n run_command(cmd=ds9_set, cmd_name=\"ds9_set\")\n\n ds9_check = f\"ds9 -tile nu{self.ObsID}A01_cl.evt -regions srcA.reg -tile nu{self.ObsID}A01_cl.evt -regions bkgA.reg -tile nu{self.ObsID}B01_cl.evt -region srcB.reg -tile nu{self.ObsID}B01_cl.evt -regions bkgB.reg -scale log -lock scale -lock frame wcs\"\n\n run_command(cmd=ds9_check, cmd_name=\"ds9_check\")\n\n ### NUPRODUCTS ###\n\n def nuproducts(\n self,\n mode: str,\n outdir: str,\n stemout=\"DEFAULT\",\n bkgextract: str = \"yes\",\n phafile: str = \"DEFAULT\",\n bkgphafile: str = \"DEFAULT\",\n runmkrmf: str = \"yes\",\n runmkarf: str = \"yes\",\n lcfile: str = \"DEFAULT\",\n bkglcfile: str = \"DEFAULT\",\n imagefile: str = \"DEFAULT\",\n pilow: str = \"60\",\n pihigh: str = \"1935\",\n lcenergy = '10',\n binsize: float = 0.01,\n usrgtifile: str = \"NONE\",\n usrgtibarycorr: str = \"no\",\n rewrite=True,\n ):\n \"\"\"\n nuproducts executes a nuproduct command for nustar scientific products of level 3\n https://heasarc.gsfc.nasa.gov/lheasoft/ftools/caldb/help/nuproducts.html\n\n\n Args:\n mode (str): module of the FPM, A or B\n outdir (str): output directory name and stem for data files. E.g. 'spe_and_lc'\n stemout (str): nuproducts argument. Defaults to 'DEFAULT'.\n bkgextract (str, optional): nuproducts argument. Defaults to 'yes'.\n phafile (str, optional): nuproducts argument. Defaults to 'DEFAULT'.\n bkgphafile (str, optional): nuproducts argument. Defaults to 'DEFAULT'.\n runmkrmf (str, optional): nuproducts argument. Defaults to 'yes'.\n runmkarf (str, optional): nuproducts argument. Defaults to 'yes'.\n lcfile (str, optional): nuproducts argument. Defaults to 'DEFAULT'.\n bkglcfile (str, optional): nuproducts argument. Defaults to 'DEFAULT'.\n imagefile (str, optional): nuproducts argument. Defaults to 'DEFAULT'.\n pilow (str, optional): nuproducts argument. Defaults to '60'.\n pihigh (str, optional): nuproducts argument. Defaults to '1935'.\n lcenergy (str, optional): nuproducts argument. Defaults to '10'.\n binsize ( , optional): nuproducts argument. Defaults to 0.01.\n usrgtifile (str, optional): nuproducts argument. Defaults to 'NONE'.\n usrgtibarycorr (str, optional): nuproducts argument. Defaults to 'no'.\n rewrite (bool, optional): whether to rewrite existing .sh file. Defaults to True.\n\n \"\"\"\n\n if mode not in modules:\n raise Exception(\"choose module name from A,B\")\n\n os.chdir(self.products_path)\n create_dir(outdir)\n\n ObsID = self.ObsID\n outdi = outdir\n\n if stemout != \"DEFAULT\":\n stemout = stemout + mode\n\n nuproducts = f\"\"\"\n nuproducts \\\n indir={self.out_path} \\\n instrument=FPM{mode} \\\n steminputs=nu{ObsID} \\\n stemout={stemout} \\\n outdir={outdi} \\\n srcregionfile={self.out_path}/src{mode}.reg \\\n bkgextract={bkgextract} \\\n bkgregionfile={self.out_path}/bkg{mode}.reg \\\n binsize={binsize} \\\n lcfile={lcfile} \\\n phafile={phafile} \\\n bkglcfile={bkglcfile} \\\n bkgphafile={bkgphafile} \\\n imagefile={imagefile} \\\n usrgtifile={usrgtifile} \\\n pilow={pilow} pihigh={pihigh} \\\n lcenergy={lcenergy} \\\n usrgtibarycorr={usrgtibarycorr}\\\n runmkarf={runmkarf}\\\n runmkrmf={runmkrmf}\"\"\"\n\n run_command(cmd=nuproducts, cmd_name=outdir + mode, rewrite=rewrite)\n\n ##### SPECTRA ######\n\n def make_spe(self, **kwargs):\n \"\"\"\n make_spe: nuproducts without creation of lightcurve or images, arguments apply\n \"\"\"\n self.nuproducts(**kwargs, lcfile=\"NONE\",\n bkglcfile=\"NONE\", imagefile=\"NONE\")\n\n def grppha(self, infiles: list, prodpath: str, group_min=30):\n \"\"\"\n grppha createes a grppha command to group photons into bins\n\n Args:\n infiles (List): list of spectra to apply barycorr to\n prodpath (str): path where all spectra lie\n group_min (int, optional): grppha argument. Defaults to 30.\n \"\"\"\n\n os.chdir(self.products_path + \"/\" + prodpath)\n for i, infile in enumerate(infiles):\n outfile = infile[0:-4] + \".pi\"\n grppha = f\"\"\"grppha infile=\"{infile}\" outfile=\"{outfile}\" comm=\"group min {group_min} & exit\" clobber=yes\"\"\"\n run_command(cmd=grppha, cmd_name=\"grppha\", rewrite=i==0)\n\n ### LIGHT CURVES ###\n\n def make_lc(self, **kwargs):\n \"\"\"\n make_lc: nuproducts without spectra and image, arguments apply\n \"\"\"\n self.nuproducts(\n **kwargs,\n phafile=\"NONE\",\n bkgphafile=\"NONE\",\n runmkrmf=\"NONE\",\n runmkarf=\"NONE\",\n imagefile=\"NONE\",\n )\n\n def barycorr(\n self,\n infiles: list,\n prodpath: str,\n barytime: str = \"no\",\n cmd_name: str = \"barycorr\",\n rewrite=False,\n ):\n \"\"\"\n barycorr applies barycorr to input file. NOTE: Ra and Dec of the source are readed from fits file. If you need, you may add keywords ra, dec to barycorr command below.\n\n Args:\n infiles (List): input file names\n prodpath (str): path to products whene file lies\n barytime (str, optional): Argument of barycor ftools task. Defaults to 'no'.\n cmd_name (str, optional): command name. Defaults to 'barycorr'.\n rewrite (bool, optional): whether to rewrite. Defaults to False.\n\n \"\"\"\n os.chdir(self.products_path + \"/\" + prodpath)\n for i, infile in enumerate(infiles):\n outfile = infile + \"_bary\"\n barycorr = f\"\"\" barycorr \\\n infile={infile} \\\n outfile={outfile}\\\n orbitfiles={self.data_path}auxil/nu{self.ObsID}_orb.fits.gz\\\n barytime={barytime} \"\"\"\n run_command(barycorr, cmd_name=cmd_name, rewrite=rewrite+i==0)\n\n def lcmath(\n self,\n infiles: list,\n outfile: str,\n prodpath: str,\n cmd_name: str = \"lcmath\",\n rewrite: bool = True,\n ):\n \"\"\"\n lcmath creates a script for running lcmath ftools routine to AVERAGE two input lightcurves\n\n Args:\n infiles (List): list of two lightcurvs to average\n outfile (str): output filenamee\n prodpath (str): path to light curves\n cmd_name (str, optional): command name. Defaults to 'lcmath'.\n rewrite (bool, optional): whether to rewrit. Defaults to True.\n \"\"\"\n\n os.chdir(self.products_path + \"/\" + prodpath)\n\n infile1, infile2 = infiles\n lcmath_cmd = f\"\"\"\n lcmath \\\n infile={infile1} \\\n bgfile = {infile2} \\\n outfile = {outfile} \\\n multi = 0.5 \\\n multb = 0.5 \\\n addsubr = yes\n \"\"\"\n run_command(lcmath_cmd, cmd_name=cmd_name, rewrite=rewrite)\n\n def orb_correction_lc(self, filename: str, prodpath: str):\n \"\"\"\n orb_correction_lc creates a new file of a lightcurve with times corrected for orbital motion\n\n I use the correction of LC file because the correction of event files\n would take a lot of time\n\n Args:\n filename (str): filename of a lighcurve to be corrected, should be barycentred\n prodpath (str): path to lightcurve product folder\n \"\"\"\n if 'bary' not in filename:\n raise ValueError(\"lightcurve file should be barycentred\")\n\n os.chdir(self.products_path + \"/\" + prodpath)\n warn('Oorbital correction is not implemented, using barycorreted lighcurve instead')\n filename_path, filename_only = filename.rsplit('/', 1)\n new_filename = filename_path+'/'+filename_only+'_orb_corr'\n os.system(f'cp {filename} {new_filename}')\n\n ### PERIOD SEARCH AND EPOCH FOLDING ###\n\n def make_efsearch(\n self,\n filename: str,\n prodpath: str,\n p0: str,\n nphase: str = \"8\",\n dres: str = \"0.001\",\n nper: str = \"128\",\n cmd_name: str = \"efsearch\",\n rewrite=True,\n ):\n \"\"\"\n make_efsearch creates a script for running eefsearch for a given light curve\n\n Args:\n filename (str): filename of a light curve (barycentred or orbitally-corrected)\n prodpath (str): path to light curves\n p0 (str): efsearch argument\n nphase (str, optional): efsearch argument. Defaults to '8'.\n dres (str, optional): efsearch argument. Defaults to '0.001'.\n nper (str, optional): efsearch argument. Defaults to '128'.\n cmd_name (str, optional): command name. Defaults to 'efsearch'.\n rewrite (bool, optional): whether to rewrite. Defaults to True.\n\n \"\"\"\n os.chdir(self.products_path + \"/\" + prodpath)\n\n efsearch = f\"\"\"\n efsearch \\\n cfile1=\"{filename}\" \\\n dper={p0} \\\n nphase={nphase} \\\n dres={dres} \\\n nper={nper} \\\n outfile=\"{filename}.efs\" \\\n window=\"-\" \\\n sepoch=INDEF \\\n nbint=INDEF \\\n plot=yes \\\n plotdev=\"/xw\"\n \"\"\"\n run_command(efsearch, cmd_name=cmd_name, rewrite=rewrite)\n\n def make_efold(\n self,\n filename: str,\n prodpath: str,\n period: str,\n nphase: str = \"32\",\n cmd_name: str = \"efold\",\n rewrite=True,\n ):\n \"\"\"\n make_efold makes efold script to fold lightcurves with a given period\n\n Args:\n filename (str): name of a lc\n prodpath (str): path to an lc\n period (str): period to fold with, in seconds\n nphase (str, optional): number of phases. Defaults to '32'.\n cmd_name (str, optional): name of the command. Defaults to 'efold'.\n rewrite (bool, optional): whether to rewrite command file. Defaults to True.\n \"\"\"\n\n os.chdir(self.products_path + \"/\" + prodpath)\n\n efold = f\"\"\"\n efold \\\n nser = 1 \\\n norm = 0 \\\n cfile1 = {filename} \\\n dper = {period} \\\n nphase = {nphase} \\\n nbint = INDEF \\\n nintfm = INDEF \\\n outfileroot = 'default' \\\n window = '-' \\\n sepoch = 0 \\\n plot = no \\\n outfile = {filename}_nphase_{nphase}.efold\"\"\"\n\n run_command(efold, cmd_name=cmd_name, rewrite=rewrite)\n\n def plot_efold(self, filename: str, prodpath: str):\n \"\"\"\n plot_efold plots folded lightcurve\n\n Args:\n filename (str): name of a lc\n prodpath (str): path to an lc\n \"\"\"\n os.chdir(self.products_path + \"/\" + prodpath)\n with fits.open(filename + \".efold\") as f:\n ph, rate, err = f[1].data['PHASE'], f[1].data['RATE1'], f[1].data['ERROR1']\n roll_idx = -np.argmin(rate)\n\n rate = np.roll(rate, roll_idx)\n err = np.roll(err, roll_idx)\n\n rate = np.tile(rate, 2)\n err = np.tile(err, 2)\n ph = np.hstack((ph, ph+1))\n\n factor = np.mean(rate)\n rate = rate/factor\n err = err/factor\n\n fig, ax = plt.subplots( figsize = (12,8))\n ax.step(ph, rate, where='mid', label=self.ObsID)\n color = ax.get_lines()[-1].get_color()\n ax.errorbar(ph, rate, err, fmt = 'none', color= color, ms = 10, alpha = 0.7)\n ax.set_title(self.ObsID)\n plt.show()\n return fig\n\n\n\n ### PHASE RESOLVED SPECTRA AND GTIs ###\n\n def make_gti_from_lc(\n self,\n prodpath: str,\n mode: str,\n period: float,\n phase_bins: int = 10,\n outfolder: str = \"gtis/\",\n half_shift: int = 0,\n ):\n \"\"\"\n make_gti_from_lc\n This task uses BARYCENTRED AND ORBIRALLY CORRECTED LIGHTCURVES to produce GTI with given period and timebins.\n Al long as original lc was barycorrected to different file,\n and this barycentred file was used to produce Orbital correction(see above), we juxtapose times from corected LC(TBD time) and original LC(times starts with zero). \n Timezero for the lightcurve is a time of the first event in an event file(hence we use time of original lc + timezero)\n\n Args:\n prodpath (str): path to light curve file from which GTIs are built\n mode (str): mode of FPM telescope. May be A, B or AB\n period (float): period for gti\n phase_bins (int, optional): number of phases. Defaults to 10.\n outfolder (str, optional): output folder for GTIs. Defaults to 'gtis/'.\n half_shift (int, optional): number of half bin size shifts to apply to zero time, If 0, makes no shift and starts with the first bin time as zero-time. Defaults to 0. Example: If half_shift = 1, the bin that previously started at phi = 0.5, would start at phi = 0.5 - (phase_bin_width)/2\n\n \"\"\"\n\n os.chdir(self.products_path + \"/\" + prodpath)\n gtis_created = glob(f\"{outfolder}*gti*.fits\")\n if gtis_created:\n print(gtis_created)\n if (\n input(\n \"GTIs for this mode have already been created. Delete folder to rebuild? [y/n]\"\n )\n == \"y\"\n ):\n os.system(f\"rm -r {outfolder}\")\n else:\n raise Exception(\"Abort GTI creation\")\n\n create_dir(outfolder)\n filename_orig = glob(f\"*{mode}_sr.lc\")[0]\n filename_corr = glob(f\"*{mode}_sr.lc_bary\")[0]\n\n ff_orig = fits.open(filename_orig)\n time_orig = ff_orig[1].data[\"time\"] # type: ignore\n time_orig_zero = ff_orig[1].header[\"timezero\"] # type: ignore\n # no need to add mjdref, because gti is in seconds since ref (see *gti* files in the root directory)\n time_orig += time_orig_zero\n\n time_corr = fits.open(filename_corr)[1].data[\"time\"] # type: ignore\n if min(np.diff(time_corr)) < 0:\n raise Exception(\"corrected times are not monotonous!\")\n\n phase_duration = period / phase_bins\n t0 = time_corr[0]\n phases = (\n np.floor(\n (time_corr - t0 + half_shift * phase_duration / 2)\n / (period / phase_bins)\n % phase_bins\n )\n + 1\n )\n\n mindt = np.min(np.diff(time_orig))\n for ii in range(1, phase_bins + 1):\n ind = start_stop(phases, trigger_val=ii)\n gtis = time_orig[ind]\n\n tstart = gtis[:, 0] - mindt / 100\n tstop = gtis[:, 1] + mindt / 100\n\n hdu = fits.PrimaryHDU()\n c1 = fits.Column(name=\"START\", format=\"1D\", unit=\"s\", array=tstart)\n c2 = fits.Column(name=\"STOP\", format=\"1D\", unit=\"s\", array=tstop)\n cols = fits.ColDefs([c1, c2])\n datahdu = fits.BinTableHDU.from_columns(cols)\n datahdu.name = \"GTI\"\n datahdu.header[\"START\"] = len(tstart)\n datahdu.header[\"STOP\"] = len(tstop)\n hdulist = fits.HDUList([hdu, datahdu])\n if mode == \"AB\":\n hdulist.writeto(outfolder + f\"gti_{ii}A.fits\")\n hdulist.writeto(outfolder + f\"gti_{ii}B.fits\")\n else:\n hdulist.writeto(outfolder + f\"gti_{ii}{mode}.fits\")\n hdulist.close()\n #print(\n # f\"GTIs have been created for module(s) {mode} with period {period} and {phase_bins} bins, phase bin number {ii}\"\n #)\n\n print(\n f\"GTIs have been created for module(s) {mode} with period {period} and {phase_bins} bins\"\n )\n\n # Plotting phase assigned-lightcurve. Need to check this plot before extracting products\n fig, [ax1, ax2] = plt.subplots(2, sharex=True, figsize = (12,12)) # type: ignore\n\n bin_size = np.diff(time_orig)[0]\n ind_max = int(float(period) / bin_size * 1.5)\n ax1.plot(time_orig[:ind_max], phases[:ind_max])\n ax1.set_xlabel(\"original time column\")\n ax1.set_ylabel(\"phase of a light curve bin\")\n\n rate = ff_orig[1].data[\"rate\"] # type: ignore\n import matplotlib.cm as cm\n\n cmap = cm.get_cmap(\"Set3\", phase_bins + 1)\n sc = ax2.scatter(\n time_orig[:ind_max], rate[:ind_max], c=phases[:ind_max], cmap=cmap, s=100\n )\n\n # fig.colorbar(sc, ax=ax2)\n ax2.set_xlabel(\"original time column\")\n ax2.set_ylabel(\"count rate with color as phase coding\")\n plt.show()\n\n def phase_resolved_spectra(\n self,\n mode: str,\n gtipath=\"spe_and_lc/gtis\",\n folder=\"phase_resolved\",\n ):\n \"\"\"\n phase_resolved_spectra creates a script for A and B modules, in which nuproducts is run several times (for each phase bin) and extracts spectra AND lightcures for input GTI. Lightcurves are mandatory extracted so that one may check the folded light curves of each phase, and be sure that the spectra are extracted from correct time bins\n\n Args:\n mode (str): mode of FPM, A or B\n gtipath (str, optional): path to GTI files. Defaults to 'spe_and_lc/gtis'.\n folder (str, optional): folder when nuproduct will unpack spectra and lightcurves. Defaults to 'phase_resolved'.\n\n CHECK TERMINAL FOR ERRORS IN NUPRODUCTS (search 'nuproducts error' in terminal window and relaunch problematic phase bins, having deleted beforehand all products created before error). I do not know why errors sometimes arise, probably due to the conflicts in naming of temporal files and folders while I launch phase resolved for A and B modules in parallel terminal windows. When I launch phase-resolved products for FPMA and only after that FPMB, the errors do not appear (e.g. $ ./phase_resolvedA.sh; ./phase_resolvedB.sh ).\n\n The time it takes for one phase bin depends on the observation duration and on the lightcurve binning. It may take a few minutes per phase bin (lc+spe).\n \"\"\"\n os.chdir(self.products_path)\n os.system(f\"rm -f {folder}{mode}.sh\") #remove previous script if any\n create_dir(folder)\n gtipath = os.path.abspath(gtipath)\n gtis = glob(f\"{gtipath}/*{mode}.fits\")\n if gtis == []:\n raise Exception(\"no GTI found for input parameters\")\n\n phase_bins = len(gtis)\n gtis = [\n f\"{gtipath}/gti_{ii}{mode}.fits\" for ii in range(1, phase_bins + 1)\n ] # this is because glob yields strange order of files, even sorted(glob(..)) does not help. It creates incorrect numeration of spectral bins.\n\n if (\n input(\n f\"GTIs from {gtipath} for module {mode}: \\n {[os.path.basename(x) for x in gtis]} \\n Is this correct? [y/n]?: \"\n )\n == \"y\"\n ):\n os.chdir(folder)\n else:\n raise Exception(\"Stop phase resolved spectroscopy\")\n\n print('start phase resolved spectra scripts')\n \n for ph_num, gtifile in enumerate(gtis, 1):\n # !!! usrgtibarycorr = no is very important!!!\n self.nuproducts(\n outdir=folder,\n usrgtifile=gtifile,\n mode=mode,\n stemout=f\"phase_resolved_bin{ph_num}\",\n usrgtibarycorr=\"no\",\n rewrite=False,\n )\n print('done with phase resolved spectra scripts')\n\n\n\n def plot_efolds_of_bins(\n self,\n efolds_files: Optional[list],\n phase_zero_efold_file: Optional[str] =None,\n prodpath: str = \"phase_resolved\",\n ax_efold=None,\n fig=None,\n save: bool=False,\n legend: bool=False,\n ):\n #check_efold_of_bins builds period-folded lightcurves for each phase bin and compares with the folded lightcurve of the whole observation if necessary\n \"\"\"\n plot_efolds_of_bins _summary_\n\n Args:\n efolds_files (Optional[list]): files to plot pulse prfiles from\n phase_zero_efold_file (Optional[str], optional): file which be deem as the first bin. Defaults to None.\n prodpath (str, optional): path to phase-resolved products. Defaults to \"phase_resolved\".\n ax_efold (optional): axis to plot on. Defaults to None.\n fig (optional): figure to plot on. Defaults to None.\n save (bool, optional): whether to save the figure. Defaults to True.\n legend (bool, optional): whether to show legend. Defaults to True.\n\n Returns:\n returns a figure and a sequence of colors for each phase bin.\n \"\"\"\n os.chdir(self.products_path + \"/\" + prodpath)\n\n assert len(glob(\"*A_bin_*sr.pha\")) == len(\n glob(\"*B_bin_*sr.pha\")\n ), \"different number of spectra files for mode A and B\"\n assert len(glob(\"*A_bin_*sr.lc\")) == len(\n glob(\"*B_bin_*sr.lc\")\n ), \"different number of light curve files for mode A and B\"\n\n if ax_efold is None or fig is None:\n fig, ax_efold = plt.subplots(figsize=(16, 5))\n else:\n pass\n\n if phase_zero_efold_file is None:\n ph_shift = 0\n else:\n ff_zero = fits.open(phase_zero_efold_file)\n phase_zero = ff_zero[1].data[\"PHASE\"]\n rate_zero = ff_zero[1].data[\"RATE1\"]\n idx = np.where(~np.isnan(rate_zero))[0][0]\n ph_shift = phase_zero[idx] * (-1)\n # print(f'#### PHASE SHIFT = {ph_shift}')\n\n colors = []\n if efolds_files is None:\n efolds_files = glob(\"*A_bin_*sr.lc\")\n for fitsfile in efolds_files:\n binnum = fitsfile.split(\"_\")[2]\n pulse_profile = fits.open(fitsfile)[1].data # type: ignore\n phase = pulse_profile[\"PHASE\"] + ph_shift\n rate = pulse_profile[\"RATE1\"]\n error = pulse_profile[\"ERROR1\"]\n\n ax_efold.errorbar(\n phase, rate, error, drawstyle=\"steps-mid\", ls=\"-\", alpha=0.6, lw=3\n )\n\n\n color = ax_efold.get_lines()[-1].get_color()\n colors.append(color)\n for dph in [-2, -1, 0, 1, 2]:\n ax_efold.fill_between(\n phase + dph, 0, rate, fc=color, alpha=0.2, ec=\"k\", label=f\"{binnum}\"\n )\n \n ax_efold.errorbar(\n phase + dph,\n rate,\n error,\n drawstyle=\"steps-mid\",\n ls=\"-\",\n alpha=0.6,\n lw=3,\n ecolor=color,\n color=color,\n )\n\n if legend:\n ax_efold.legend()\n ax_efold.set_xlabel(\"Phase\")\n ax_efold.set_ylabel(\"Rate\")\n ax_efold.set_xlim(-0.1, 2.1)\n ax_efold.relim() # type: ignore\n #fig.tight_layout()\n plt.show()\n if save:\n fig.savefig(\"efold.png\")\n return fig, colors\n","repo_name":"SergeiDBykov/nustar_sj0243","sub_path":"nustar_scripts/nu_class.py","file_name":"nu_class.py","file_ext":"py","file_size_in_byte":27298,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1416961781","text":"import requests\n\ncredentials = {\n \"username\": \"vasudhabm19\",\n \"password\": \"fWwL@2x4PruEgiM\"\n}\n\ndef getIMDBID(moviename):\n moviename = moviename.lower()\n url = f\"https://www.omdbapi.com/?t={moviename}&apikey=68dd8045\"\n response = requests.get(url)\n data = response.json()\n if 'Response' in data and data['Response'] == 'False':\n return (-1, -1)\n return (data[\"imdbID\"], data[\"imdbRating\"])\n\n\ndef subtitleAuthentication():\n url = \"https://api.opensubtitles.com/api/v1/login\"\n response = requests.post(url, json = credentials, headers = {\"Api-Key\": \"9Q3t0YsFSJ4pK55yV4nQII74OGO4Xqq7\"})\n response = response.json()\n token = response['token']\n return token\n\n\ndef getSubtitle(imdbID):\n url = f\"https://api.opensubtitles.com/api/v1/subtitles?imdb_id={imdbID}\"\n response = requests.get(url, headers = {\"Api-Key\": \"9Q3t0YsFSJ4pK55yV4nQII74OGO4Xqq7\"})\n data = response.json()\n data = data['data'][0]\n file_id = data['attributes']['files'][0]['file_id']\n return file_id\n\ndef downloadSubtitle(file_id):\n url = \"https://api.opensubtitles.com/api/v1/download\"\n response = requests.post(url, json = { \"file_id\" : file_id}, headers = {\"Api-Key\": \"9Q3t0YsFSJ4pK55yV4nQII74OGO4Xqq7\"})\n data = response.json()\n return data['link']\n\n\n\n\n#Function calls to get imdb_id, rating and subtitles\nimdbID, imdbRating = getIMDBID(\"Shawshank Redemption\")\n\nprint(imdbID, imdbRating)\n\ntoken = subtitleAuthentication()\nfile_id = getSubtitle(imdbID)\nlink = downloadSubtitle(file_id)\n\nresponse = requests.get(link)\nopen(f\"{imdbID}.srt\", \"wb\").write(response.content)\n","repo_name":"SanketSapkal/semantic_search","sub_path":"data and subtitles preprocessing/fetch_imdb_subtitles.py","file_name":"fetch_imdb_subtitles.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31263743733","text":"\n# setup\ndevice = 'cuda'\nprint_every_n_batches = 100\nnum_dataloader_workers = 4\n\n# hyperparams\nlearning_rate = 0.1\nnum_epochs = 1\nbatch_size = 32\ndecay = 2e-4\nmomentum = 0.9\n\n# optimizations\nuse_threaded_loader_worker = True\nuse_revnet = True\n","repo_name":"adelebai/resnet-50-performance","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31838287385","text":"from tkinter import *\r\n\r\nclass FramePosition(LabelFrame):\r\n \r\n def __init__(self, interface, **kwargs):\r\n LabelFrame.__init__(self, interface.windows, **kwargs)\r\n\r\n self.interface= interface\r\n self.columnconfigure(0, weight=1)\r\n self.columnconfigure(1, weight=1)\r\n self.columnconfigure(2, weight=2)\r\n \r\n self.limitBW= IntVar()\r\n self.limitBWEntry= Entry(self, textvariable= self.limitBW, width=4)\r\n self.limitBWEntry.bind(\"\", self.limitBW_changed)\r\n self.limitBW.set(350)\r\n Label(self, text=self.interface.lang[\"LimitBW\"]).grid(column=0, row=0, pady=2, padx=5, sticky=W) \r\n self.limitBWEntry.grid(column=1, row=0, padx=5, pady=5)\r\n\r\n self.limitFW= IntVar() \r\n self.limitFWEntry= Entry(self, textvariable= self.limitFW, width=4)\r\n Label(self, text=self.interface.lang[\"LimitFW\"]).grid(column=4, row=0, pady=2, padx=5, sticky=W) \r\n self.limitFWEntry.bind(\"\", self.limitFW_changed)\r\n self.limitFW.set(1150)\r\n self.limitFWEntry.grid(column=3, row=0, padx=5, pady=5)\r\n\r\n self.positionvalueScale = IntVar()\r\n self.positionScale= Scale(self, from_=self.limitBW.get(), to=self.limitFW.get(), orient=HORIZONTAL, variable=self.positionvalueScale, showvalue=0, command=self.positionScale_changed)\r\n self.positionScale.grid(column=2, row=0, sticky=E)\r\n\r\n self.contactBW= StringVar()\r\n self.contactBW.set(\"%s: ?\" % self.interface.lang[\"Contact BW\"])\r\n Label(self, textvariable=self.contactBW).grid(column=0, row=1, pady=2, columnspan=2, padx=5, sticky=W) \r\n\r\n self.positionvalueEntry= IntVar()\r\n self.positionEntry= Entry(self, textvariable= self.positionvalueEntry, width=4)\r\n self.positionEntry.bind(\"\", self.positionEntry_changed)\r\n self.positionEntry.grid(column=2, row=1)\r\n self.positionvalueEntry.set(750)\r\n\r\n self.contactFW= StringVar()\r\n self.contactFW.set(\"%s: ?\" % self.interface.lang['Contact FW'])\r\n Label(self, textvariable=self.contactFW).grid(column=3, row=1, pady=2, columnspan=2, padx=5, sticky=E) \r\n\r\n self.getButton= Button(self, text=\" %s \" % self.interface.lang['Get'], command=self.getPressed)\r\n self.getButton.grid(column=0, columnspan=2, row=2, pady=5, sticky=E)\r\n self.setButton= Button(self, text=\" %s \" % self.interface.lang['Set'], command=self.setPressed)\r\n self.setButton.grid(column=3, columnspan=2, row=2, pady=5, sticky=W) \r\n \r\n\r\n def positionScale_changed(self, event):\r\n value= self.positionvalueScale.get()\r\n self.positionvalueEntry.set(value)\r\n self.interface.mboxeGet.positionCurrent= value\r\n\r\n\r\n\r\n def positionEntry_changed(self, event):\r\n try:\r\n value= self.positionvalueEntry.get()\r\n except:\r\n value=0\r\n lastValue= self.interface.mboxeGet.positionCurrent\r\n minRef= self.interface.mboxeGet.positionCurrent_ref[\"min\"]\r\n maxRef= self.interface.mboxeGet.positionCurrent_ref[\"max\"]\r\n if value>=minRef and value<=maxRef:\r\n self.positionvalueScale.set(value)\r\n self.interface.mboxeGet.positionCurrent= value\r\n else:\r\n self.positionvalueEntry.set(lastValue)\r\n self.interface.log_print(\"\\n%s: %s %d<>%d\" % (self.interface.lang['Position'], self.interface.lang['Invalid value'], minRef, maxRef))\r\n \r\n def limitBW_changed(self, event):\r\n try:\r\n value= self.limitBW.get()\r\n except:\r\n value=0\r\n lastValue= self.interface.mboxeGet.limitBW\r\n minRef= self.interface.mboxeGet.limitBW_ref[\"min\"]\r\n maxRef= self.interface.mboxeGet.limitBW_ref[\"max\"]\r\n if value>=minRef and value<=maxRef:\r\n self.positionScale.config(from_= value)\r\n self.interface.mboxeGet.limitBW= value\r\n else:\r\n self.limitBW.set(lastValue)\r\n self.interface.log_print(\"\\n%s: %s %d<>%d\" % (self.interface.lang['LimitBW'], self.interface.lang['Invalid value'], minRef, maxRef))\r\n \r\n def limitFW_changed(self, event):\r\n try:\r\n value= self.limitFW.get()\r\n except:\r\n value=0\r\n lastValue= self.interface.mboxeGet.limitFW\r\n minRef= self.interface.mboxeGet.limitFW_ref[\"min\"]\r\n maxRef= self.interface.mboxeGet.limitFW_ref[\"max\"]\r\n if value>=minRef and value<=maxRef:\r\n self.positionScale.config(to = value)\r\n self.interface.mboxeGet.limitFW= value\r\n else:\r\n self.limitFW.set(lastValue)\r\n self.interface.log_print(\"\\n%s: %s %d<>%d\" % (self.interface.lang['LimitFW'], self.interface.lang['Invalid value'], minRef, maxRef))\r\n \r\n def setPressed(self):\r\n if self.interface.mboxeSelected > 2:\r\n value= self.positionvalueScale.get()\r\n self.interface.mboxeGet.set_positionCurrent(value)\r\n self.interface.log_print(\"\\n%s= %d\" % (self.interface.lang['Set Position'], self.positionvalueEntry.get()) )\r\n else:\r\n self.interface.log_print(\"\\n%s\" % self.interface.lang['Not selected Mboxe'])\r\n \r\n def getPressed(self):\r\n if self.interface.mboxeSelected > 2:\r\n self.interface.mboxeGet.get_positionCurrent()\r\n self.positionvalueEntry.set(self.interface.mboxeGet.positionCurrent)\r\n self.positionvalueScale.set(self.interface.mboxeGet.positionCurrent)\r\n self.positionScale.update()\r\n self.interface.log_print(\"\\n%s= %d\" % (self.interface.lang['Get Position'], self.interface.mboxeGet.positionCurrent) )\r\n else:\r\n self.interface.log_print(\"\\n%s\" % self.interface.lang['Not selected Mboxe'])\r\n \r\n\r\n def update(self, mboxeGet):\r\n self.limitBW.set(mboxeGet.return_limitBW())\r\n self.limitFW.set(mboxeGet.return_limitFW()) \r\n self.positionvalueEntry.set(mboxeGet.return_positionCurrent())\r\n self.positionvalueScale.set(mboxeGet.return_positionCurrent())\r\n self.positionScale.update()\r\n contactBWFW= mboxeGet.return_contactBWFW()\r\n self.contactBW.set(\"%s: %s\" % (self.interface.lang['Contact BW'], (1 & (contactBWFW) >> 5) ) )\r\n self.contactFW.set(\"%s: %s\" % (self.interface.lang['Contact FW'], (1 & (contactBWFW) >> 4) ) )\r\n","repo_name":"Nao974/M-BOXE_MANAGER_TK","sub_path":"frame_position.py","file_name":"frame_position.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36865725226","text":"import pickle\nimport os\nfrom datetime import datetime as dt\n\n\ndef pickle_writer(in_dct: [list[dict], dict],\n path: str,\n file_name: str) -> None:\n \"\"\"\n Writes to disk binary file of dictionary passed\n :param in_dct: dict -- dictionary to write\n :param path: str -- path where binary file to write\n :param file_name: str -- file name WITHOUT extension (extension \".bin\" used)\n :return: None\n \"\"\"\n file_path = os.path.join(path, file_name + '.bin')\n if os.path.exists(file_path):\n file_name += '_copy_' + str(dt.now()).replace(' ', '_').replace(':', '-').split('.')[0]\n file_path = os.path.join(path, file_name + '.bin')\n\n with open(file_path, 'wb') as f_out:\n pickle.dump(in_dct, f_out)\n\n\ndef pickle_reader(file_in_path: str) -> list[dict[str]]:\n \"\"\"\n Reads binary file to a list of dictionaries\n :param file_in_path: str -- path to a file to be read\n :return: list[dict[str]]\n \"\"\"\n with open(file_in_path, 'rb') as f_in:\n out_lst = pickle.load(f_in, encoding='utf-8')\n return out_lst\n\n\nif __name__ == '__main__':\n print('Not for separate use')\n","repo_name":"BeliaevAndrey/Python-II-Homeworks","sub_path":"homework_08/hw08_task02/pack_task02/pickle_works.py","file_name":"pickle_works.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39119159749","text":"from evernote.api.client import EvernoteClient\nfrom evernotecore.error.errors import *\nimport evernote.edam.type.ttypes as CommonTypes\nimport evernote.edam.notestore.ttypes as NoteStoreTypes\nimport evernote.edam.limits.constants as LimitConstants\n\n\nclass UserCoordinator:\n def __init__(self, auth_token, is_sandbox):\n self.client = EvernoteClient(token=auth_token, sandbox=is_sandbox)\n\n def get_user(self):\n user_store = self.client.get_user_store()\n return user_store.getUser()\n\n\nclass NoteCoordinator:\n def __init__(self, auth_token, is_sandbox):\n self.token = auth_token\n self.client = EvernoteClient(token=auth_token, sandbox=is_sandbox)\n self.note_store = self.client.get_note_store()\n\n def create_note(self, content, parent_notebook=None):\n note = CommonTypes.Note(title=content.title, content=content.body)\n if parent_notebook and hasattr(parent_notebook, 'guid'):\n note.notebookGuid = parent_notebook.guid\n return self.note_store.createNote(self.token, note)\n\n def get_notebook(self, name):\n notebooks = self.get_notebooks()\n found = list(filter(lambda x: x.name == name, notebooks))\n if not found:\n raise NotFoundNotebookError(name=name)\n else:\n return found[0]\n\n def get_notebooks(self):\n ret = self.note_store.listNotebooks()\n return ret\n\n def get_latest_note_on_notebook(self, notebook):\n note_metadata = self.find_notes_metadata(notebook)\n if not note_metadata.notes:\n return None\n return note_metadata.notes[0]\n\n def find_notes_metadata(self, notebook=None):\n note_filter = NoteStoreTypes.NoteFilter(order=CommonTypes.NoteSortOrder.TITLE, ascending=False)\n if notebook is not None:\n note_filter.notebookGuid = notebook.guid\n spec = NoteStoreTypes.NotesMetadataResultSpec(includeTitle=True)\n\n return self.note_store.findNotesMetadata(self.token, note_filter, 0, LimitConstants.EDAM_USER_NOTES_MAX, spec)\n\n def find_note_counts(self, word):\n note_filter = NoteStoreTypes.NoteFilter()\n note_filter.words = word\n note_filter.ascending = False\n\n ret = self.note_store.findNoteCounts(self.token, note_filter, False)\n return ret\n","repo_name":"gwanos/evernotemaster","sub_path":"evernotecore/coordinator.py","file_name":"coordinator.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24760103967","text":"#!/usr/bin/python3\n#CS7641 HW1 by Tian Mi\n\nimport csv\nimport numpy as np\nimport pandas as pd\nimport graphviz\nimport matplotlib.pyplot as plt\nimport sklearn.tree as tree\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\n\n#################################################\n#wine quality data set\n\ndata = pd.read_csv('winequality-data.csv')\nX = data.iloc[:,:11]\ny = data.iloc[:,11]\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.25)\nfeatures = list(X_train.columns.values)\n\n#Decision Tree classifier\n#decision tree learning curve of tree depth 5\nlist1=[]\nlist2=[]\nfor i in range(1,95):\n clf = DecisionTreeClassifier(random_state=0, criterion='gini', max_depth=5)\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=1-i/100)\n clf = clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list1.append(accuracy_score(y_train, train_predict))\n list2.append(accuracy_score(y_test, test_predict))\nplt.plot(range(len(list2)),list2)\nplt.plot(range(len(list1)),list1)\nplt.show()\n\n#decision tree learning curve of different function of split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.25)\nlist1=[]\nlist2=[]\nfor depth in range(3,40):\n clf = DecisionTreeClassifier(random_state=0, criterion='gini', max_depth=depth)\n clf = clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list1.append(accuracy_score(y_test, test_predict))\n \n clf = DecisionTreeClassifier(random_state=0, criterion='entropy', max_depth=depth)\n clf = clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list2.append(accuracy_score(y_test, test_predict))\nplt.plot(range(len(list2)),list2)\nplt.plot(range(len(list1)),list1)\nplt.show()\n\n#choose tree depth of 5 as optimal solution\nclf = DecisionTreeClassifier(random_state=0, criterion='gini', max_depth=5)\nclf = clf.fit(X_train, y_train)\ntest_predict = clf.predict(X_test)\nprint(\"The prediction accuracy of decision tree is \" + str(accuracy_score(y_test, test_predict)))\n\n#visualization of decision tree\n#dot_data = tree.export_graphviz(clf, out_file=None, \n# feature_names=features, \n# class_names=list(map(str, set(y))), \n# filled=True, rounded=True, \n# special_characters=True)\n#graph = graphviz.Source(dot_data)\n#graph\n\n#Neural network classifier learning curve\nlist1=[]\nlist2=[]\nfor i in range(1,95):\n clf = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(20, 5), random_state=0, activation='logistic')\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=1-i/100)\n clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list1.append(accuracy_score(y_train, train_predict))\n list2.append(accuracy_score(y_test, test_predict))\nplt.plot(range(len(list2)),list2)\nplt.plot(range(len(list1)),list1)\nplt.show()\n\n#Boosted DT classifier\nlist1=[]\nlist2=[]\nfor i in range(1,95):\n clf = clf = AdaBoostClassifier(n_estimators=100)\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=1-i/100)\n clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list1.append(accuracy_score(y_train, train_predict))\n list2.append(accuracy_score(y_test, test_predict))\nplt.plot(range(len(list2)),list2)\nplt.plot(range(len(list1)),list1)\nplt.show()\n\n#SVM classifier\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.25)\nfor kernel in ('linear', 'rbf'):\n clf = svm.SVC(kernel=kernel, gamma=2)\n clf = clf.fit(X_train, y_train)\n test_predict = clf.predict(X_test)\n print(accuracy_score(y_test, test_predict))\n\n#Choose RBF as the preferred kernel function\nclf = svm.SVC(kernel=\"rbf\", gamma=2)\nclf = clf.fit(X_train, y_train)\ntest_predict = clf.predict(X_test)\nprint(\"The prediction accuracy of SVM with RBF kernel is \" + str(accuracy_score(y_test, test_predict)))\n\n#SVM learning curve with RBF kernel\nlist1=[]\nlist2=[]\nfor i in range(1,95):\n clf = svm.SVC(kernel=\"rbf\", gamma=2)\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=1-i/100)\n clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list1.append(accuracy_score(y_train, train_predict))\n list2.append(accuracy_score(y_test, test_predict))\nplt.ylim(ymin=0,ymax=1.1)\nplt.plot(range(len(list2)),list2)\nplt.plot(range(len(list1)),list1)\nplt.show()\n\n#KNN classifier\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.25)\nKNN_list=[]\nlist2=[]\nfor K in range(1,50):\n clf = KNeighborsClassifier(K, weights=\"distance\")\n scores = cross_val_score(clf, X_train, y_train, cv=5)\n clf = clf.fit(X_train, y_train)\n test_predict = clf.predict(X_test)\n KNN_list.append(accuracy_score(y_test, test_predict))\n list2.append(sum(scores)/len(scores))\nplt.plot(range(len(KNN_list)),KNN_list)\nplt.plot(range(len(list2)),list2)\nplt.show()\n\n#choose 13 as the optimal K\nclf = KNeighborsClassifier(13, weights=\"distance\")\nclf = clf.fit(X_train, y_train)\ntest_predict = clf.predict(X_test)\nprint(\"The prediction accuracy of KNN classifier with k=13 is \" + str(accuracy_score(y_test, test_predict)))\n\n#learning curve of KNN classifier with K=13\nlist1=[]\nlist2=[]\nfor i in range(1,95):\n clf = KNeighborsClassifier(13, weights=\"distance\")\n \n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=1-i/100)\n clf.fit(X_train, y_train)\n train_predict = clf.predict(X_train)\n test_predict = clf.predict(X_test)\n list1.append(accuracy_score(y_train, train_predict))\n list2.append(accuracy_score(y_test, test_predict))\nplt.plot(range(len(list2)),list2)\nplt.plot(range(len(list1)),list1)\nplt.show()\n\n#########################################\n#learning curve function from sklearn tutorial\n\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.ensemble import GradientBoostingClassifier\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n \"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n###############################################\n#Titanic survival data set\ndata = pd.read_csv('titanic_train.csv')\nX = data.iloc[:,2:]\ny = data.iloc[:,1]\nfeatures = list(X.columns.values)\n\n#Decision Tree classifier\n#decision tree learning curve of tree depth 3\nclf = DecisionTreeClassifier(random_state=0, criterion='gini', max_depth=3)\nplot_learning_curve(clf, \"Decision Tree with max depth 3\", X, y, ylim=[0,1])\n\nclf = clf.fit(X, y)\ndot_data = tree.export_graphviz(clf, out_file=None, \n feature_names=features, \n class_names=list(map(str, set(y))), \n filled=True, rounded=True, \n special_characters=True)\ngraph = graphviz.Source(dot_data)\ngraph\n\n#Neural network classifier\nclf = MLPClassifier(hidden_layer_sizes=(5), random_state=0, solver=\"lbfgs\")\nplot_learning_curve(clf, \"MLP with hidden layers as (5)\", X, y, ylim=[0,1])\n\n#Boosted DT classifier\nclf = AdaBoostClassifier(n_estimators=500)\nplot_learning_curve(clf, \"Adaboost with n_estimators 500\", X, y, ylim=[0,1])\n\nclf = GradientBoostingClassifier(n_estimators=1000, learning_rate=0.001, max_depth=3, random_state=0, max_leaf_nodes=5)\nplot_learning_curve(clf, \"Gradient Boosting with n_estimators 1000\", X, y, ylim=[0,1])\n\n#SVM classifier\nfrom sklearn.model_selection import GridSearchCV\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-4, 1e-3, 1e-2, 0.1, 1, 10, 100],\n 'C': [0.01, 0.1, 1, 10, 100, 1000]} ]\nclf = GridSearchCV(svm.SVC(), tuned_parameters, cv=5, scoring='accuracy')\nclf.fit(X, y)\nprint(clf.best_params_)\n\nclf = svm.SVC(C=10, kernel=\"rbf\", gamma=0.01)\nplot_learning_curve(clf, \"SVM with RBF kernel, gamma=0.01\", X, y, ylim=[0,1])\n\nclf = svm.SVC(C=100, kernel=\"linear\", gamma=0.0001)\nplot_learning_curve(clf, \"SVM with linear kernel, gamma=0.0001\", X, y, ylim=[0,1])\n\n#KNN classifier\nclf = KNeighborsClassifier(1, weights=\"distance\", p=2)\nplot_learning_curve(clf, \"K nearest neighbors with K=1\", X, y, ylim=[0,1])\n\nclf = KNeighborsClassifier(5, weights=\"distance\", p=2)\nplot_learning_curve(clf, \"K nearest neighbors with K=5\", X, y, ylim=[0,1])\n\nclf = KNeighborsClassifier(10, weights=\"distance\", p=2)\nplot_learning_curve(clf, \"K nearest neighbors with K=10\", X, y, ylim=[0,1])\n\nclf = KNeighborsClassifier(10, weights=\"uniform\", p=2)\nplot_learning_curve(clf, \"K nearest neighbors with K=10, uniform weights\", X, y, ylim=[0,1])\n","repo_name":"py2ras/CS7641","sub_path":"HW1/HW1.py","file_name":"HW1.py","file_ext":"py","file_size_in_byte":11912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24381230855","text":"# coding=utf-8\n###########################################################################\n# Game launcher for OneLauncher.\n#\n# Based on PyLotRO\n# (C) 2009 AJackson \n#\n# Based on LotROLinux\n# (C) 2007-2008 AJackson \n#\n#\n# (C) 2019-2023 June Stepp \n#\n# This file is part of OneLauncher\n#\n# OneLauncher is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# OneLauncher is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with OneLauncher. If not, see .\n###########################################################################\nfrom datetime import datetime\nimport logging\nfrom typing import Optional\n\nfrom PySide6 import QtCore, QtWidgets\n\nfrom ..config import platform_dirs\nfrom ..config.games.addons import get_addons_manager_from_game\nfrom ..config.games.game import save_game\nfrom ..game import Game\nfrom ..network.game_launcher_config import GameLauncherConfig\nfrom ..network.world import World\nfrom ..start_game import MissingLaunchArgumentError, get_qprocess\nfrom .start_game_uic import Ui_startGameDialog\nfrom ..utilities import QByteArray2str\n\n\nclass StartGame(QtWidgets.QDialog):\n def __init__(\n self,\n game: Game,\n game_launcher_config: GameLauncherConfig,\n world: World,\n account_number: str,\n ticket,\n ):\n self.game = game\n self.game_launcher_config = game_launcher_config\n self.world = world\n self.account_number = account_number\n self.ticket = ticket\n\n super(\n StartGame,\n self).__init__(\n QtCore.QCoreApplication.instance().activeWindow(),\n QtCore.Qt.WindowType.FramelessWindowHint)\n\n self.ui = Ui_startGameDialog()\n self.ui.setupUi(self)\n\n self.ui.btnStart.setText(\"Back\")\n self.ui.btnStart.setEnabled(False)\n self.ui.btnSave.setText(\"Save\")\n self.ui.btnSave.setEnabled(False)\n self.ui.btnStop.setText(\"Exit\")\n self.ui.btnStart.clicked.connect(self.btnStartClicked)\n self.ui.btnSave.clicked.connect(self.btnSaveClicked)\n self.ui.btnStop.clicked.connect(self.btnStopClicked)\n\n self.aborted = False\n self.finished = False\n\n self.show()\n\n def get_qprocess(self) -> Optional[QtCore.QProcess]:\n \"\"\"Return setup qprocess with connected signals\"\"\"\n try:\n process = get_qprocess(\n self.game_launcher_config,\n self.game,\n self.world,\n self.account_number,\n self.ticket)\n except MissingLaunchArgumentError:\n # TODO: Easy bug report\n self.ui.txtLog.append(\n \"Game launch argument missing. \"\n \"Please report this error if using a supported server.< /font >\")\n logger.exception(\"Launch argument missing.\")\n self.reset_buttons()\n return None\n\n process.readyReadStandardOutput.connect(self.readOutput)\n process.readyReadStandardError.connect(self.readErrors)\n process.finished.connect(self.process_finished)\n return process\n\n def readOutput(self):\n text = QByteArray2str(self.process.readAllStandardOutput())\n self.ui.txtLog.append(text)\n logger.debug(f\"Game: {text}\")\n\n def readErrors(self):\n text = QByteArray2str(self.process.readAllStandardError())\n self.ui.txtLog.append(text)\n logger.debug(f\"Game: {text}\")\n\n def process_finished(self, exit_code, exit_status):\n self.reset_buttons()\n if self.aborted:\n self.ui.txtLog.append(\"*** Aborted ***\")\n else:\n self.ui.txtLog.append(\"*** Finished ***\")\n\n def reset_buttons(self):\n self.finished = True\n self.ui.btnStop.setText(\"Exit\")\n self.ui.btnSave.setEnabled(True)\n self.ui.btnStart.setEnabled(True)\n\n def btnStartClicked(self):\n if self.finished:\n self.close()\n\n def btnStopClicked(self):\n if self.finished:\n QtCore.QCoreApplication.instance().quit()\n else:\n self.aborted = True\n self.process.kill()\n\n # Saves a file with the debug log generated by running the game\n def btnSaveClicked(self):\n filename = QtWidgets.QFileDialog.getSaveFileName(\n self, \"Save log file\", str(\n platform_dirs.user_log_path)\n )[0]\n\n if filename != \"\":\n with open(filename, \"w\") as outfile:\n outfile.write(self.ui.txtLog.toPlainText())\n\n def run_startup_scripts(self):\n \"\"\"Runs Python scripts from add-ons with one that is approved by user\"\"\"\n addons_manager = get_addons_manager_from_game(self.game)\n for script in addons_manager.enabled_startup_scripts:\n try:\n self.ui.txtLog.append(\n f\"Running '{script.relative_path}' startup script...\")\n script.run()\n except FileNotFoundError:\n self.ui.txtLog.append(\n f\"'{script.relative_path}' startup script does not exist\")\n except SyntaxError as e:\n self.ui.txtLog.append(\n f\"'{script.relative_path}' ran into syntax error: {e}\")\n\n def start_game(self):\n self.game.last_played = datetime.now()\n save_game(self.game)\n\n self.process = self.get_qprocess()\n if self.process is None:\n return\n\n self.finished = False\n self.ui.btnStop.setText(\"Abort\")\n self.run_startup_scripts()\n self.process.start()\n logger.info(\n f\"Game started with: {self.process.program(), self.process.arguments()}\")\n self.ui.txtLog.append(f\"Connecting to world: {self.world.name}\")\n\n self.exec()\n\n\nlogger = logging.getLogger(\"main\")\n","repo_name":"JuneStepp/OneLauncher","sub_path":"src/onelauncher/ui/start_game_window.py","file_name":"start_game_window.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"61"} +{"seq_id":"37006885669","text":"import math\n\nclass MergeSorter:\n 'Merge sorts...'\n\n def __merge(self, arr1, arr2):\n arr1Index = 0\n arr1Length = len(arr1)\n\n arr2Index = 0\n arr2Length = len(arr2)\n\n result = []\n\n smallerArrayLength = arr1Length if arr1Length < arr2Length else arr2Length\n\n while (arr1Index < smallerArrayLength and\n arr2Index < smallerArrayLength):\n \n if (arr1[arr1Index] < arr2[arr2Index]):\n result.append(arr1[arr1Index])\n arr1Index += 1\n else:\n result.append(arr2[arr2Index])\n arr2Index += 1\n\n while arr1Index < arr1Length:\n result.append(arr1[arr1Index])\n arr1Index += 1\n\n while arr2Index < arr2Length:\n result.append(arr2[arr2Index])\n arr2Index += 1\n\n return result\n\n\n def sort(self, array):\n if len(array) == 1:\n return array\n else:\n arrayLength = len(array)\n arrayHalf = int(math.floor(arrayLength/2))\n\n return self.__merge( self.sort(array[0:arrayHalf]),\n self.sort(array[arrayHalf:arrayLength]) )\n\na = MergeSorter()\nprint(a.sort([3, 2, 6, 2, 5, 1, 9]))\n","repo_name":"mrdave-dev/CTCI","sub_path":"mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11945758903","text":"from flask_app import app\nfrom flask import render_template,redirect,request,session,flash\nfrom flask_app.models.ninja import Ninja\nfrom flask_app.models.dojo import Dojo\n\n@app.route('/ninjas')\ndef show_form():\n dojos = Dojo.select_all()\n return render_template('new-ninja.html', dojos=dojos)\n\n@app.route('/ninja-post', methods=['POST'])\ndef persist_ninja():\n redirectHere = \"/dojos/\" + request.form['dojo_id']\n data = {\n 'dojo_id': request.form['dojo_id'],\n 'first_name': request.form['first_name'],\n 'last_name' : request.form['last_name'],\n 'age': int(request.form['age'])\n }\n returnedID = Ninja.save_ninja(data)\n return redirect(redirectHere)","repo_name":"kpartain/python","sub_path":"flask_mysql/dojosninjas/flask_app/controllers/ninjas.py","file_name":"ninjas.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21608365466","text":"def build(bld):\n\n bld.CryResourceCompilerModule(\n #==============================\n # Settings\n #==============================\n target = 'ResourceCompilerPC',\n vs_filter = 'RC',\n file_list = 'resourcecompilerpc.waf_files',\n platforms = ['win'],\n configurations = ['debug_all', 'profile_all'],\n\n #==============================\n # Common\n #==============================\n cxxflags = '/GR',\n use = ['lua', 'AzToolsFramework'],\n includes = [ '.',\n bld.Path('Code/Tools'),\n bld.Path('Code/Tools/RC/ResourceCompiler'),\n bld.Path('Code/Tools/CryCommonTools'),\n bld.Path('Code/CryEngine/Cry3DEngine'),\n bld.Path('Code/CryEngine/CryScriptSystem'),\n bld.Path('Code/Sandbox'),\n bld.Path('Code/Tools/SDKs/Alembic/alembic/include'),\n bld.Path('Code/Tools/SDKs/Alembic/ilmbase/include/OpenEXR')],\n autod_uselib = [ 'QT5CORE',\n 'QT5GUI',\n 'QT5WIDGETS'],\n\n lib = [ 'Gdi32',\n 'Shell32',\n 'Ole32' ],\n\n defines = [ 'CRYTOOLS',\n 'RESOURCECOMPILERPC_EXPORTS',\n 'OFFLINE_COMPUTATION',\n 'NOT_USE_CRY_MEMORY_MANAGER' ],\n enable_rtti = True,\n export_definitions = 'ResourceCompilerPlugin.def',\n \n #==============================\n # Testing\n #==============================\n test_all_file_list = ['resourcecompilerpc_test.waf_files'],\n )\n","repo_name":"Chronosnoj/LumberyardEnvironment","sub_path":"1.6.0.0/dev/Code/Tools/RC/ResourceCompilerPC/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23095798271","text":"import socket\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.fernet import Fernet\n\nHEADER_SIZE = 32\ndef make_header(length):\n header_format = '{:<' + str(HEADER_SIZE) + '}'\n return header_format.format(length).encode('utf-8')\n\ndef get_length(header):\n return int(header.strip())\n\n\nclass Connection:\n def __init__(self, hostname=\"localhost\", port=8000):\n self.hostname = str(hostname)\n self.port = int(port)\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.fernet = None\n\n def set_socket(self, socket):\n self.socket = socket\n\n def connect(self):\n self.socket.connect((self.hostname, self.port))\n self.key_exchange_client()\n\n def bind(self):\n self.socket.bind((self.hostname, self.port))\n\n def listen(self, connection_count):\n self.socket.listen(connection_count)\n\n def get_server(self, connection_count):\n self.bind()\n self.listen(connection_count)\n while True:\n (client, address) = self.socket.accept()\n client_connection = Connection()\n client_connection.set_socket(client)\n try:\n client_connection.key_exchange_server()\n yield client_connection\n except Exception as e:\n print('Error in key exchange:', e)\n\n def key_exchange_client(self):\n public_raw = self.recv_bytes()\n public_key = serialization.load_pem_public_key(public_raw)\n\n enc_key = Fernet.generate_key()\n\n cipher_key = public_key.encrypt(\n enc_key,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n\n self.send_bytes(cipher_key)\n\n self.enc_key = enc_key\n self.fernet = Fernet(enc_key)\n\n\n def key_exchange_server(self):\n private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = private_key.public_key()\n\n public_raw = public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.PKCS1,\n )\n\n self.send_bytes(public_raw)\n\n cipher_raw = self.recv_bytes()\n\n enc_key = private_key.decrypt(\n cipher_raw,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n\n self.enc_key = enc_key\n self.fernet = Fernet(enc_key)\n\n def send_bytes(self, message):\n if self.fernet:\n message = self.fernet.encrypt(message)\n length = len(message)\n header = make_header(length)\n self.socket.send(header)\n self.socket.send(message)\n return length\n\n def recv_bytes(self):\n header = self.socket.recv(HEADER_SIZE)\n length = get_length(header)\n message = self.socket.recv(length)\n if self.fernet:\n return self.fernet.decrypt(message)\n else:\n return message\n\n def send(self, message):\n return self.send_bytes(message.encode('utf-8'))\n\n def recv(self):\n return self.recv_bytes().decode('utf-8')\n\n def close(self):\n self.socket.close()\n\n","repo_name":"aneeshsharma/TCPCrypt","sub_path":"pycomm/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19258620876","text":"import pandas as pd\nimport math\n\nclass Column_att(object):\n def __init__(self):\n self.df = pd.read_excel(r'../../data/LDH_attributes.xlsx',\n sheet_name='column', skiprows=1)\n self.df.set_index('key', inplace=True)\n self.column_length = self.df['value'].loc['length']\n self.column_diameter = self.df['value'].loc['diameter']\n self.Li_recovery = self.df['value'].loc['Li_recovery']\n self.bed_volume = self.Bed_volume()\n self.brine_flow_rate = self.bed_volume * self.df['value'].loc['brine_flow_rate']\n self.washing_flow_rate = self.bed_volume * self.df['value'].loc['washing_flow_rate']\n self.stripping_flow_rate = self.bed_volume * self.df['value'].loc['stripping_flow_rate']\n return\n\n def Bed_volume(self):\n \"\"\"\n :return: Bed volume of the column to estimate brine flow rate, washing flow rate and stripping flow rate\n for column extraction\n \"\"\"\n bed_volumn = self.column_length * math.pi * self.column_diameter**2\n return bed_volumn\n\nif __name__ == '__main__':\n test = Column_att()\n print(test)","repo_name":"jfgphillips/FUSE-V2","sub_path":"Geothermal_IO/LDH/Column.py","file_name":"Column.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23141557739","text":"import pika\n\nparameters = pika.ConnectionParameters(host='localhost', port=5672)\n\ntry:\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n\n print(\"Connection to RabbitMQ successful!\")\n\n connection.close()\n\nexcept pika.exceptions.AMQPConnectionError:\n print(\"Failed to connect to RabbitMQ. Please check your connection parameters.\")\n\nexcept Exception as e:\n print(\"An error occurred:\", str(e))","repo_name":"Tafazzolian/book_library","sub_path":"rabbit_connection_test.py","file_name":"rabbit_connection_test.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"196527545","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport os\nimport sys\nimport re\nimport math\nimport pandas as pd\nimport numpy as np\nimport jieba \nimport time\nimport shutil\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n#start_time = time.time()\n'''清空非空目录'''\ndef CleanDir( Dir ):\n if os.path.isdir( Dir ):\n paths = os.listdir( Dir )\n for path in paths:\n filePath = os.path.join( Dir, path )\n if os.path.isfile( filePath ):\n try:\n os.remove( filePath )\n except os.error:\n print( \"remove %s error.\" %filePath )#引入logging\n elif os.path.isdir( filePath ):\n# if filePath[-4:].lower() == \".svn\".lower():\n# continue\n shutil.rmtree(filePath,True)\n\n'''创建新的保存目录'''\ndef add_new_path(path):\n if os.path.exists(path):\n pass \n else:\n os.mkdir(path)\n return path\n\n'''判断是否为数字字符'''\ndef not_number(s):\n try:\n float(s)\n return False\n except:\n return True\n\nprint('当前执行路径:',os.getcwd())\n\n# In[17]:\n\n'''读取nz_data'''\ndef read_nz_data(path, key_list, top_new_grams=0):\n cop = re.compile(\"[^\\u4e00-\\u9fa5^a-z^A-Z^0-9]\")\n f = open(path,'r',encoding='utf-8')\n words_nz = {}\n if top_new_grams == 0:\n top_new_grams = -1 \n for line in f.readlines()[0:top_new_grams]: #2500万条数据\n if len(line):\n line1 = line.strip('\\n') \n linel = line1.split('\\t') #[word, dop, l_en, r_en, score]\n word = linel[0]\n try:\n float(linel[1]) and float(linel[2]) and float(linel[3]) and \\\n float(linel[4]) \n# if float(linel[2])==0 or float(linel[3])==0:\n# print(linel)\n string = cop.sub(\"\", word)\n if len(string) and (not_number(string)):\n word_nz = { key_list[index]: float(value) for (index, value) \\\n in enumerate(linel[1:5]) }\n words_nz[string] = word_nz\n except:\n print('丢弃的错误词汇:',linel)\n return words_nz\n\n\n# In[12]:\n\n\n'''为新提取的特征词匹配左右熵'''\ndef find_dop_en(words_cos_tf, words_nz, key_list):\n words_cos_tf = words_cos_tf.set_index(['words'])\n for col in key_list:\n words_cos_tf[col] = np.nan\n words_add = list(words_cos_tf.index)\n for word in words_add:\n word_nz = words_nz.get(word, None)\n if word_nz:\n for col in key_list:\n value = word_nz.get(col, np.nan)\n words_cos_tf.loc[word,col] = value\n words_cos_tf['diff_en'] = words_cos_tf['l_en'] - words_cos_tf['r_en']\n return words_cos_tf\n\n# In[20]:\n'''删除左右熵差大的词,减少匹配遍历'''\ndef del_wsplit_words_nz(words_nz):\n words = list(words_nz.keys())\n #print(words[0:10])\n for w in words:\n diff_en = words_nz[w]['l_en'] - words_nz[w]['r_en']\n if abs(diff_en) > 0.5:\n del words_nz[w]\n return words_nz\n\n#%%\n'''进行错词匹配'''\ndef match_add_reco(add_diff_en, words_nz):\n #add_diff_en中words作为index,diff_en in columns\n words_reco = list(words_nz.keys())#识别新词列表 按score值降序排列的前提\n add_replace_map = {}\n add_nomatch = {}\n words_wsplit = list(add_diff_en.index.values) \n \n i = 1 #用于计数\n for ww in words_wsplit:\n #try:\n if len(ww):\n diff_en_ww = add_diff_en.loc[ww,'diff_en']\n l_ww = len(ww)\n \n for wr in words_reco:\n if ww in wr: \n \n if diff_en_ww > 0: #diff_en_ww的绝对值大于阈值 右熵小往右扩充 \n if ww in wr[0:l_ww]: #包含ww==wr的情况\n add_replace_map[ww] = wr\n break\n if diff_en_ww < 0: #diff_en_ww的绝对值大于阈值 左熵小往左扩充 \n if ww in wr[-l_ww:]: #包含ww==wr的情况\n add_replace_map[ww] = wr\n break\n# except:\n# print('当前匹配出错的错切词为:',ww)\n \n match = add_replace_map.get(ww,0)\n if match == 0:\n add_nomatch[ww] = 1\n \n i = i + 1\n if i % 1000 == 0:\n print('正在匹配第%d千个错切词'%(i//1000))\n return add_nomatch, add_replace_map\n\n\n#%%\n'''删掉匹配结尾的‘的,了’等'''\ndef clean_rep_map(words_rep,stopwords):\n rep_map = pd.DataFrame(words_rep,index=['rep_words']).T\n rep_map['rep_words_1'] = rep_map['rep_words'].apply(lambda x:x[1:] if x[0] in \\\n stopwords else x)\n rep_map['rep_words_2'] = rep_map['rep_words_1'].apply(lambda x:x[0:-1] if x[-1] in\\\n stopwords else x)\n rep_map = rep_map[['rep_words_2']]\n rep_map = rep_map.reset_index().rename(columns={'index':'words','rep_words_2':\\\n 'rep_words'})\n rep_map = rep_map.drop_duplicates(subset=['words'])\n rep_map = rep_map[rep_map.rep_words.apply(lambda x: len(x)>1)]\n rep_map = rep_map.loc[rep_map.words!=rep_map.rep_words,:]\n return rep_map\n\n#%%\n'''整合和保存错切词匹配map'''\ndef find_info_for_map(add_diff_en1,words_nz):\n rep_map = add_diff_en1.copy()\n for col in ['dop1','l_en1','r_en1','diff_en1','score1']:\n rep_map[col] = np.nan\n \n for i in rep_map.index:\n wr = rep_map.loc[i,'rep_words']\n try:\n if len(wr) > 1: \n nz_wr = words_nz[wr]\n for col in ['dop1','l_en1','r_en1','score1']:\n rep_map.loc[i,col] = nz_wr[col[0:-1]]\n except:\n print('保存时匹配不到左右熵的词:',wr)\n \n rep_map['diff_en1'] = rep_map['l_en1'] - rep_map['r_en1']\n return rep_map\n\n#%%\n''''数据整合保存'''\ndef data_integrate_tosave(add_nz,words_rep_nsame,words_nomatch):\n add_data = add_nz.reset_index().rename(columns={'index':'words'}) #保证words in add_nz.columns\n add_data['words'] = add_data['words'].apply(lambda x: words_rep_nsame.get(x,x))\n #add_rep_words = add_data.replace({'words':words_rep_nsame})\n words_rep_nsame = pd.DataFrame(words_rep_nsame,index=['words']).T.reset_index()\n words_rep_nsame.columns = ['raw_words','words']\n words_nomatch = pd.DataFrame(words_nomatch,index=['nomatch']).T.reset_index()\n words_nomatch.columns = ['words','nomatch']\n add_rep_words = pd.merge(add_data,words_rep_nsame,on='words',how='left')\n add_rep_words = pd.merge(add_rep_words,words_nomatch,on='words',how='left')\n add_rep_words = add_rep_words.drop_duplicates(['words'])\n return add_rep_words\n\n\n#%%\n\ndef main(feature_inputfile,newgrams_file,save_path=0,words_top=0):\n if save_path==0:\n save_path = './'\n else:\n save_path = add_new_path(save_path)\n print('读取待处理的特征词')\n #通常为做过词义相似度计算的特征词\n words_df = pd.read_table(feature_inputfile)\n if words_top:\n words_df = words_df[0:words_top]\n print('需要处理的特征词量:',len(words_df))\n \n print('读取nz_data')\n key_list = ['dop', 'l_en', 'r_en', 'score'] #跟输入文件的格式有关\n words_nz = read_nz_data(newgrams_file, key_list,top_new_grams=0)\n print('美白 :', words_nz.get('美白', '该词不存在'))\n print('清洗后识别新词个数:', len(words_nz))\n \n print('为新提取的特征词匹配左右熵')\n add_nz = find_dop_en(words_df, words_nz, key_list) \n print('未找到左右熵的特征词量:',len(add_nz[add_nz['diff_en'].isnull()]))\n \n print('根据大左右熵差,选择切错的词汇')\n threshold_wsplit = 0.5\n add_diff_en = add_nz[(add_nz['diff_en']>threshold_wsplit) | (add_nz['diff_en']<-threshold_wsplit)] #左右熵阈值可以选择\n print('阈值为%.2f时错切词汇量为: %d' % (threshold_wsplit, len(add_diff_en)))\n \n print('删除words_nz中左右熵差大的词,减少错词匹配时的遍历')\n words_nz = del_wsplit_words_nz(words_nz)\n print('删除左右熵差大的词后识别新词个数:', len(words_nz))\n \n print('为错切词匹配做正确匹配') \n words_nomatch, words_rep = match_add_reco(add_diff_en, words_nz) \n print('匹配到的错切词量:',len(words_rep))\n print('没有匹配到的错切词量:',len(words_nomatch))\n #print('没有匹配到的错切词:',words_nomatch)\n \n print('删掉匹配结尾的‘的,了’等')\n del_words = ['的','后','何','是','和','啊','都','很','太','呀','最','吧','更',\n '啦','要','了','也','于','得']\n rep_map = clean_rep_map(words_rep,del_words)\n words_rep_nsame = {rep_map.loc[i,'words']:rep_map.loc[i,'rep_words'] for i in \\\n rep_map.index }\n \n print('整合和保存文件')\n add_diff_en = add_diff_en.reset_index().rename(columns={'index':'words'})\n add_diff_en = pd.merge(add_diff_en,rep_map,on='words',how='outer') \n \n add_diff_en = find_info_for_map(add_diff_en,words_nz)\n add_diff_en.to_csv(save_path+'/rep_map.csv',index=False,encoding='utf_8_sig') #注意修改保存位置\n \n add_rep_words = data_integrate_tosave(add_nz,words_rep_nsame,words_nomatch)\n add_rep_words.to_csv(save_path+'/words_rep_wsplit.csv',encoding='utf_8_sig')\n words_no_en = add_rep_words[add_rep_words.diff_en.isnull()]\n words_no_en.to_csv(save_path+'/words_no_en.csv',index=False,encoding='utf_8_sig')\n print('程序运行完毕')\n return add_rep_words\n#%%\nif __name__ == '__main__':\n \n feature_extract_method = 'tf-idf'\n inputfile = 'words_cos_'+feature_extract_method+'.csv'\n newgrams_file = 'new_grams'\n add_rep_words = main(inputfile,newgrams_file)\n# outfile = 'words_rep_wsplit.csv'\n# add_rep_words.to_csv(outfile,index=False,encoding='utf_8_sig')\n \n\n\n# In[ ]:\n \n\n\n\n\n","repo_name":"OuYanXi/repairing-incomplete-words-in-phrase-mining-","sub_path":"match_wsplit_dict.py","file_name":"match_wsplit_dict.py","file_ext":"py","file_size_in_byte":10195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7517324883","text":"import numpy as np\r\nimport sys\r\n\r\nsample_k = 2\r\nsample_m = 2\r\nsample_data = np.array([[1.3, 1.1],\r\n [1.3, 0.2],\r\n [0.6, 2.8],\r\n [3.0, 3.2],\r\n [1.2, 0.7],\r\n [1.4, 1.6],\r\n [1.2, 1.0],\r\n [1.2, 1.1],\r\n [0.6, 1.5],\r\n [1.8, 2.6],\r\n [1.2, 1.3],\r\n [1.2, 1.0],\r\n [0.0, 1.9]])\r\n\r\ndef read2C(filename):\r\n \"\"\"Read 2C input data\"\"\"\r\n with open(filename, \"r\") as file:\r\n k, m = file.readline().strip().split()\r\n data = np.genfromtxt(file, dtype=np.longdouble) # Default type is float\r\n return int(k), int(m), data\r\n\r\n# def centers_of_gravity(clusters):\r\n# centers = np.array()\r\n# for cluster in clusters:\r\n# center = np.average(cluster)\r\n# centers.append(center)\r\n# \r\n# return centers\r\n\r\ndef lloyd(k, m, data):\r\n centers = np.full((k,m), fill_value = 0.0)\r\n \r\n #initial centers\r\n for i in range(0, k):\r\n centers[i] = data[i]\r\n \r\n changing = True\r\n \r\n while changing == True:\r\n clusters = np.full((k,m), fill_value = 0.0)\r\n count = np.full((k,), fill_value = 0)\r\n \r\n #centers to clusters\r\n for point in data:\r\n smallest_distance = 9999999\r\n closest_center = -1\r\n for i, center in enumerate(centers):\r\n distance = np.linalg.norm(point-center) #euclidean distance\r\n if distance < smallest_distance:\r\n smallest_distance = distance\r\n closest_center = i\r\n clusters[closest_center] += point\r\n count[closest_center] += 1\r\n \r\n old_centers = centers\r\n #print(count)\r\n \r\n centers = clusters / count[:,np.newaxis]\r\n\r\n \r\n #clusters to centers\r\n #centers = centers_of_gravity(clusters)\r\n \r\n #track change\r\n# if len(centers) == len(old_centers):\r\n# counter = 0\r\n# for i in range(0, len(centers)):\r\n# for j in range(len(centers[i])):\r\n# percent_change = (centers[i,j] - old_centers[i,j])/old_centers[i,j]\r\n# if percent_change < 0.001:\r\n# counter += 1\r\n# if counter == len(centers) * m:\r\n# changing = False\r\n if np.allclose(centers, old_centers) == True:\r\n changing = False\r\n \r\n return centers\r\n \r\n## TEST\r\n# sample_output = lloyd(sample_k, sample_m, sample_data)\r\n# sample_processed_output= np.savetxt(sys.stdout, np.transpose(sample_output), fmt=\"%.3f\")\r\n# print(sample_processed_output)\r\n\r\n## OUTPUT\r\nk, m, data = read2C(\"rosalind_ba8c.txt\")\r\nprint(k)\r\nprint(m)\r\nprint(data)\r\noutput = lloyd(k, m, data)\r\nprocessed_output= np.savetxt(sys.stdout, output, fmt=\"%.3f\")\r\nprint(processed_output)","repo_name":"Evelyn-Kriter/Bioinformatics-Algorithms","sub_path":"Lloyd Algorithm.py","file_name":"Lloyd Algorithm.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74132262275","text":"'''\nLeetCode 121. Best Time to Buy and Sell Stock\n\nSay you have an array for which the ith element is the price of a\ngiven stock on day i.\n\nIf you were only permitted to complete at most one transaction\n(i.e., buy one and sell one share of the stock), design an algorithm\nto find the maximum profit.\n\nNote that you cannot sell a stock before you buy one.\n'''\ndef max_profit(prices: [int]) -> int:\n max_profit = 0\n buy_time = 0\n for time in range(len(prices)):\n profit = prices[time] - prices[buy_time] if time > buy_time else 0\n if profit > max_profit:\n max_profit = profit\n if prices[time] < prices[buy_time]:\n buy_time = time \n return max_profit\n","repo_name":"Hendryboyz/algo-reviews","sub_path":"dynamic-programming/sell_stock.py","file_name":"sell_stock.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585364821","text":"import math\r\nfor t in range(int(input())):\r\n (N,K) = [int(i) for i in input().strip().split(' ')]\r\n P = [[int(i) for i in input().strip().split(' ')] for j in range(N)]\r\n for p in P: p.append(p[0]*p[1])\r\n H = sorted(P,key=lambda x:(x[2]),reverse=True)\r\n HB = sorted(H[0:K],key=lambda x:(x[0]),reverse=True)\r\n B = sorted(P,key=lambda x:x[0],reverse=True)\r\n #print(P,H,B)\r\n maxH = 0\r\n for i in range(K): maxH+=H[i][2]\r\n maxT = math.pi*(HB[0][0]**2)+2*math.pi*maxH\r\n if B[0] not in H[0:K]:\r\n maxNew = math.pi*(B[0][0]**2)+2*math.pi*(maxH-H[K-1][2]+B[0][2])\r\n if maxNew>maxT: maxT=maxNew\r\n print(\"Case #{0}: {1}\".format(t+1,round(maxT,9)))\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/166.py","file_name":"166.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23612208331","text":"#!/usr/bin/env python\n\nFNAME = \"A-small-attempt0.in\"\n\nclass Case(object):\n def __init__(self, coords):\n self.coords = coords\n \ndef getcentroid(c1, c2, c3):\n xsum = c1[0] + c2[0] + c3[0]\n ysum = c1[1] + c2[1] + c3[1]\n return xsum % 3 == 0 and ysum % 3 == 0\n\ndef combinations(items, n):\n if n==0: yield []\n else:\n for i in xrange(len(items)):\n for cc in combinations(items[i+1:],n-1):\n yield [items[i]]+cc \n\n\ndef getnumtrigs(coords):\n s = 0\n combos = combinations(coords, 3)\n for combo in combos:\n c = getcentroid(*combo)\n if c:\n s += 1\n return s\n\ndef getcoords(n, A, B, C, D, x0, y0, M):\n coords = []\n X = x0\n Y = y0\n coords.append((X, Y))\n for i in range(n - 1):\n X = (A * X + B) % M\n Y = (C * Y + D) % M\n coords.append((X, Y))\n return coords\n\ndef parse(lines):\n cases = []\n for line in lines[1:]:\n ints = [int(value) for value in line.split()]\n cases.append(Case(getcoords(*ints))) \n return cases\n \nif __name__ == \"__main__\":\n lines = file(FNAME).read().splitlines()\n cases = parse(lines)\n answers = [getnumtrigs(case.coords) for case in cases]\n outlines = [\"Case #%d: %d\\n\" % (i + 1, answer) for i, answer in enumerate(answers)]\n file(FNAME + \".out\", \"w\").writelines(outlines)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_7/162.py","file_name":"162.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74645958275","text":"import json\nfrom datetime import datetime\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\nfrom base64 import b64encode\nfrom pages import extractor_transformation as extractor_trans\n\n\ndef call_api(sparql, link_query):\n values = urlencode(\n {\n 'query': 'PREFIX aitslt:' + sparql})\n credentials = b64encode('admin:admin'.encode('ascii')) # username:password\n headers = {\n 'Authorization': 'Basic %s' % credentials.decode('ascii'),\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/sparql-results+json'\n }\n data = values.encode('ascii')\n request = Request(link_query, data=data, headers=headers)\n try:\n response_body = json.loads(urlopen(request).read().decode('utf8'))\n return extractor_trans.transform_api(response_body)\n except HTTPError as e:\n print(e.code)\n print(e.reason)\n print(request.__dict__)\n response_body = {\n \"head\": {\n \"vars\": [\n \"subject\",\n \"predicate\",\n \"object\"\n ]\n },\n \"results\": {\n \"bindings\": [\n {\n \"subject\": {\n \"type\": \"literal\",\n \"value\": \"Error\"\n },\n \"predicate\": {\n \"type\": \"literal\",\n \"value\": e.code\n },\n \"object\": {\n \"type\": \"literal\",\n \"value\": e.reason\n }\n }\n ]\n }\n }\n return response_body\n\n\ndef nested_filter_query(prefixes_list, subject_domain, predicate, list_object):\n nested = '{ select distinct ?subject where { ?subject rdf:type ' + subject_domain \\\n + ' . ?subject ?predicate ?object . ' \\\n + 'filter(?object != owl:NamedIndividual)'\n print(prefixes_list) # && ?predicate != rdf:type\n if len(prefixes_list) == 2:\n for p in prefixes_list:\n if p in ['integer']:\n for idx, each in enumerate(list_object):\n list_object[idx] = each\n elif p in ['float']:\n for idx, each in enumerate(list_object):\n list_object[idx] = '\"{}\"^^xsd:float'.format(each)\n elif p in ['literal']:\n for idx, each in enumerate(list_object):\n list_object[idx] = '\"{}\"'.format(each)\n elif p in ['dateTime']:\n for idx, each in enumerate(list_object):\n list_object[idx] = '\"{}\"^^xsd:dateTime'.format(each)\n # list_object[idx] = '\"{}\"^^xsd:dateTime'.format(datetime.strptime(each, \"%d %b'%y %H:%M:%S\").isoformat())\n elif p in ['http://www.w3.org/1999/02/22-rdf-syntax-ns']:\n for idx, each in enumerate(list_object):\n # list_object[idx] = '<' + prefixes_list[0] + '#' + each + '>'\n list_object[idx] = '<' + each + '>'\n prefix_predicate = '<' + p + '#' + predicate + '>'\n else:\n prefix_predicate = '<' + p + '#' + predicate + '>'\n else:\n prefix_predicate = '<' + prefixes_list[0] + '#' + predicate + '>'\n for idx, each in enumerate(list_object):\n # list_object[idx] = '<' + prefixes_list[0] + '#' + each + '>'\n list_object[idx] = '<' + each + '>'\n\n nested += 'filter(?predicate = ' + prefix_predicate\n text = make_filter_sparql(list_object, 'object')\n\n nested += text + ')}}'\n return nested\n\n\ndef make_filter_sparql(list_filter, object_var):\n text = ''\n # print(list_filter)\n for idx, each in enumerate(list_filter):\n if idx == 0:\n text += ' && (?' + object_var + ' = ' + each\n else:\n text += ' || ?' + object_var + ' = ' + each\n if text != '':\n text += ')'\n return text","repo_name":"mamilkew/aitsltkms","sub_path":"pages/sparql_wrapper.py","file_name":"sparql_wrapper.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40636841732","text":"from abc import ABCMeta,abstractmethod\nimport numpy as np\nimport pickle\nimport os\nimport dataset\nimport h5py\n\nclass Loader (object):\n __metaclass__ = ABCMeta\n\n def __init__(this,autocontrast=True,normalise=True,rescale=None,*args,**kwargs):\n this.autocontrast = autocontrast\n this.normalise = normalise\n this.rescale = rescale\n\n @abstractmethod\n def get_data(this,set,split=None):\n pass\n\n @abstractmethod\n def number_splits(this):\n pass\n\n\nclass CVPPP17Loader(Loader):\n\n def __init__(this,training_path,testing_path,a1=True,a2=True,a3=True,a4=True,a5=False,*args,**kwargs):\n super(CVPPP17Loader, this).__init__(*args, **kwargs)\n this.data_to_load = {\"a1\":a1,\"a2\":a2,\"a3\":a3,\"a4\":a4,\"a5\":a5}\n\n this.training_path = training_path\n this.testing_path = testing_path\n\n\n\n this.splits = [[[1,2,3], 4,4],\n [[2,3,4], 1,1],\n [[3,4,1], 2,2],\n [[4,1,2], 3,3]]\n\n\n def number_splits(this):\n return len(this.splits)\n\n def get_data(this,set,split=0):\n if (set==\"training\"):\n x_train_all, x_test_all, y_train_all, y_test_all, x_train_set, x_test_set = dataset.get_data(this.splits[split],this.training_path)\n \n filter_training = np.zeros((x_train_set.shape[0],))\n filter_testing = np.zeros((x_test_set.shape[0],))\n\n for t in this.data_to_load:\n if (this.data_to_load[t]==True):\n T = t.upper()\n filter_training = np.logical_or(filter_training,x_train_set==T)\n filter_testing = np.logical_or(filter_testing, x_test_set == T)\n\n x_train = x_train_all[filter_training,...]\n y_train = y_train_all[filter_training]\n x_test = x_test_all[filter_testing,...]\n y_test = y_test_all[filter_testing]\n \n elif (set==\"testing\"):\n x_train, y_train = (None, None)\n\n x_test_all,y_test_all,test_sets = dataset.get_data_testing(this.testing_path)\n\n filter = np.zeros((test_sets.shape[0],))\n\n for t in this.data_to_load:\n if (this.data_to_load[t] == True):\n T = t.upper()\n filter = np.logical_or(filter, test_sets == T)\n\n x_test = x_test_all[filter, ...]\n y_test = y_test_all[filter]\n\n else:\n raise ValueError(\"Set {} is not valid\".format(set))\n\n return x_train,y_train,x_test,y_test\n\nclass MultiModalDataLoader(Loader):\n\n def __init__(this,path,data_file=\"mm_data.h5\",split_file=\"splits.4.h5\",rgb=True,ir=False,fmp=False,depth=False,*args,**kwargs):\n super(MultiModalDataLoader, this).__init__(*args, **kwargs)\n\n this.rgb = rgb\n this.ir=ir\n this.fmp=fmp\n this.depth=depth\n\n this.path = path\n this.data_file=data_file\n this.split_file= split_file\n\n with open(os.path.join(this.path,this.split_file),'rb') as h:\n this.splits = pickle.load(h, encoding='latin1')['splits']\n\n def number_splits(this):\n return len(this.splits)\n\n def get_data(this,set,split=0):\n\n with open(os.path.join(this.path,this.data_file),'rb') as h:\n data = pickle.load(h, encoding='latin1')\n\n xs = []\n\n if (this.rgb):\n rgb = data['rgb'].transpose((0,2,3,1))\n xs.append(rgb)\n\n if (this.ir):\n ir = data['ir'].transpose((0,2,3,1))\n\n xs.append(ir)\n\n if (this.fmp):\n fmp = data['fmp'].transpose((0,2,3,1))\n\n xs.append(fmp)\n\n if (this.depth):\n depth = data['depth'].transpose((0,2,3,1))\n\n xs.append(depth)\n\n y = data['count']\n\n idx_train = this.splits[split][2]\n idx_val = this.splits[split][1]\n idx_test = this.splits[split][0]\n\n if (set==\"training\"):\n x_train = [x[idx_train] for x in xs]\n x_val = [x[idx_val] for x in xs]\n\n y_train = y[idx_train]\n y_val = y[idx_val]\n\n return x_train,y_train,x_val,y_val\n\n\n elif (set==\"testing\"):\n x_test = [x[idx_test] for x in xs]\n y_test = y[idx_test]\n\n\n return None, None, x_test, y_test\n else:\n raise ValueError(\"Set {} is not valid\".format(set))\n\n\nclass KomatsunaLoader(Loader):\n\n def __init__(this, filename, *args, **kwargs):\n super(KomatsunaLoader,this).__init__(*args,**kwargs)\n\n this.filename=filename\n\n def number_splits(this):\n return 1\n\n def get_data(this,set,split=0):\n data = h5py.File(this.filename, \"r\")\n\n X = data['X'][()]\n Y = data['Y'][()][0]\n ID = data['ID'][()][0]\n #TS = data['TS'].value[0]\n\n #print(TS)\n \n X = X.transpose((3,0,1,2))\n \"\"\"\n if (this.autocontrast):\n X=dataset.autocontrast(np.asarray(X,dtype='uint8'))\n\n if (this.rescale != None):\n X=dataset.rescale(X,this.rescale)\n\n if (this.normalise):\n X=dataset.normalise(X)\n \"\"\"\n idx_train = (ID == 0) + (ID == 1)\n idx_val = (ID == 4)\n idx_test = (ID == 2) + (ID == 3)\n\n if (set==\"training\"):\n return X[idx_train],Y[idx_train],X[idx_val],Y[idx_val]\n\n if (set==\"testing\"):\n return None, None, X[idx_test],Y[idx_test]\n\n","repo_name":"MattiaLitrico/Semi-supervised-Domain-Adaptation-for-Holistic-Counting-under-Label-Gap","sub_path":"LeafCode/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29964772453","text":"\"\"\"\nUnittests for FEN utils\n\"\"\"\nimport unittest\n\nimport numpy as np\nimport numpy.testing as np_testing\n\nfrom engine import Board, Game\nfrom engine.fen_utils import from_fen, to_fen\n\n\nclass TestUtils(unittest.TestCase):\n\n CLASSIC_FEN = \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR\"\n INVALID_RANKS_FEN = \"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR/8\" # 9 ranks\n\n def test_to_fen_classic(self) -> None:\n board_arr = np.load(\"tests/data/classic_board_dump.npy\")\n expected_fen = self.CLASSIC_FEN\n board = Board()\n board.board = board_arr\n self.assertEqual(to_fen(board), expected_fen)\n\n def test_from_fen_classic(self) -> None:\n fen = self.CLASSIC_FEN\n expected_board_arr = np.load(\"tests/data/classic_board_dump.npy\")\n game = Game()\n from_fen(fen, game)\n np_testing.assert_array_equal(game.board.board, expected_board_arr)\n\n def test_from_fen_invalid_ranks(self) -> None:\n fen_string = self.INVALID_RANKS_FEN\n game = Game()\n with self.assertRaises(ValueError):\n from_fen(fen_string, game)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"urav06/chess","sub_path":"tests/test_fen_utils.py","file_name":"test_fen_utils.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14461137620","text":"import pgzrun\n\nWIDTH = 600\nHEIGHT = 600\nTITLE = 'Tic Tac Toe'\n\nWBOX = 400\nOBOX = 100\n\nwdiv3 = WBOX / 3\nhdiv3 = WBOX / 3\nwdiv32 = wdiv3*2.0\nhdiv32 = hdiv3*2.0\n\nwoffset = wdiv3 / 2\nhoffset = hdiv3 / 2\n\nWHITE=(255,255,255)\nBACK_GROUND = (0xEE, 0xFF, 0xFF)\nBLUE10 = (125,125,255)\n\no133 = Actor('o133')\nx133 = Actor('x133')\n\nstage = 0\ntileStatus = [\n\t\t\t0,0,0,\n\t\t\t0,0,0,\n\t\t\t0,0,0]\nwinList = [\n\t\t[0,1,2],\n\t\t[3,4,5],\n\t\t[6,7,8],\n\t\t[0,3,6],\n\t\t[1,4,7],\n\t\t[2,5,8],\n\t\t[0,4,8],\n\t\t[2,4,6]\n\t]\n\ndef drawLine2(color, x1, y1, w, h):\n rect = Rect((x1, y1), (w, h))\n screen.draw.filled_rect(rect, color)\n\ndef drawX(tile):\n px = tile.x + OBOX\n py = tile.y + OBOX\n Actor('x133', (px+woffset, py+hoffset)).draw()\n\ndef drawO(tile):\n px = tile.x + OBOX\n py = tile.y + OBOX\n Actor('o133', (px+woffset, py+hoffset)).draw()\n\nclass Tile:\n def __init__(self, x, y, w, h):\n self.x = x\n self.y = y\n self.width = w\n self.height = h\n\n def contains(self, px, py):\n return self.x < px and px < self.x+self.width and self.y < py and py < self.y+self.height\n\ntiles = [ \n Tile(0,0,wdiv3,hdiv3), Tile(wdiv3,0,wdiv3,hdiv3), Tile(wdiv32,0,wdiv3,hdiv3),\n Tile(0,hdiv3,wdiv3,hdiv3), Tile(wdiv3,hdiv3,wdiv3,hdiv3), Tile(wdiv32,hdiv3,wdiv3,hdiv3),\n Tile(0,hdiv32,wdiv3,hdiv3), Tile(wdiv3,hdiv32,wdiv3,hdiv3), Tile(wdiv32,hdiv32,wdiv3,hdiv3)]\n\ndef findMousePointTile(px, py):\n for i, ti in enumerate(tiles):\n if ti.contains(px, py):\n return i;\n return -1;\n\ndef checkWinner():\n for i, seq3 in enumerate(winList):\n winner = tileStatus[seq3[0]]\n if winner != 0:\n if winner == tileStatus[seq3[1]] and winner == tileStatus[seq3[2]]:\n return winner\n return 0\n \ndef draw():\n screen.fill(WHITE)\n screen.draw.filled_rect(Rect(OBOX,OBOX,WBOX,WBOX), BACK_GROUND)\n\n # Draw OX Mark\n for i,t in enumerate(tiles):\n if tileStatus[i]==1: \n drawX(tiles[i])\n elif tileStatus[i]==2:\n drawO(tiles[i])\n \n # Draw Tile Line\n for i in range(4):\n drawLine2(BLUE10, wdiv3*i+OBOX, 0+OBOX, 2, WBOX+2)\n drawLine2(BLUE10, 0+OBOX, hdiv3*i+OBOX, WBOX+2, 2)\n \ndef update():\n winner = checkWinner()\n if (winner!=0):\n mark = [\"\", \"X\", \"O\"]\n print(\"Winner is %s\" % mark[winner])\n\ndef on_mouse_down(pos):\n global stage\n px, py = pos\n px -= OBOX\n py -= OBOX\n pos = findMousePointTile(px, py)\n if tileStatus[pos]==0:\n stage += 1\n if stage%2 == 0:\n tileStatus[pos] = 2\n else:\n tileStatus[pos] = 1\n\npgzrun.go()\n","repo_name":"chomskim/OSS","sub_path":"web-game/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15879469709","text":"import unittest\n\nfrom view_layer_common import *\n\n\n# ############################################################\n# Testing\n# ############################################################\n\nclass UnitTesting(ViewLayerTesting):\n def test_group_write_load(self):\n \"\"\"\n See if saving/loading is working for groups\n \"\"\"\n import bpy\n scene = bpy.context.scene\n layer_collection = bpy.context.layer_collection\n\n while len(scene.view_layers) > 1:\n scene.view_layers.remove(scene.view_layers[1])\n\n # create group\n group = layer_collection.create_group()\n\n self.assertEqual(1, len(bpy.data.groups))\n self.assertEqual(1, bpy.data.groups[0].users)\n self.assertEqual(3, len(bpy.data.groups[0].objects))\n\n import os\n import tempfile\n with tempfile.TemporaryDirectory() as dirpath:\n filepath = os.path.join(dirpath, 'layers.blend')\n\n for i in range(3):\n # save and re-open file\n bpy.ops.wm.save_mainfile('EXEC_DEFAULT', filepath=filepath)\n bpy.ops.wm.open_mainfile('EXEC_DEFAULT', filepath=filepath)\n\n self.assertEqual(1, len(bpy.data.groups))\n self.assertEqual(1, bpy.data.groups[0].users)\n self.assertEqual(3, len(bpy.data.groups[0].objects))\n\n # empty the group of objects\n group = bpy.data.groups[0]\n while group.objects:\n group.view_layer.collections[0].collection.objects.unlink(group.objects[0])\n\n # save and re-open file\n bpy.ops.wm.save_mainfile('EXEC_DEFAULT', filepath=filepath)\n bpy.ops.wm.open_mainfile('EXEC_DEFAULT', filepath=filepath)\n\n self.assertEqual(1, len(bpy.data.groups))\n self.assertEqual(0, bpy.data.groups[0].users)\n self.assertEqual(0, len(bpy.data.groups[0].objects))\n\n # save and re-open file\n bpy.ops.wm.save_mainfile('EXEC_DEFAULT', filepath=filepath)\n bpy.ops.wm.open_mainfile('EXEC_DEFAULT', filepath=filepath)\n\n self.assertEqual(0, len(bpy.data.groups))\n\n\n# ############################################################\n# Main - Same For All Render Layer Tests\n# ############################################################\n\nif __name__ == '__main__':\n UnitTesting._extra_arguments = setup_extra_arguments(__file__)\n unittest.main()\n","repo_name":"blender/blender","sub_path":"tests/python/view_layer/test_group_d.py","file_name":"test_group_d.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"32056623924","text":"# Bubble Sort は安定ソートの一つ\nimport sys\nimport math\nfrom typing import Tuple\ninput = sys.stdin.readline\n\ndef print_vec (A: list):\n print (\" \".join(map (str, A)))\n\ndef swap (A, i, j):\n temp = A [i]\n A [i] = A [j]\n A [j] = temp\n \n\ndef bubble_sort(A: list) -> Tuple[list, int]:\n if len (A) <= 1:\n return (A, 0)\n swap_cout = 0\n for i in range(len(A)):\n for j in range(len(A) - 1, i + 1 - 1, -1):\n if A[j] < A[j - 1]:\n swap_cout += 1\n swap (A, j, j - 1)\n return (A, swap_cout)\n\n\n\nif __name__ == \"__main__\":\n n = int(input())\n res = 0\n vec = list (map (int, input ().split ()))\n result= bubble_sort(vec)\n print_vec (result [0])\n print(result [1])\n","repo_name":"MokkeMeguru/dsa","sub_path":"aoj/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4855263232","text":"if __name__ == '__main__':\n num_of_cases = int(input())\n for buf in range(num_of_cases):\n num_of_data = int(input())\n\n # create sorted positive_list/negative_list\n positive_list, negative_list = [], []\n for buff in range(num_of_data):\n data = int(input())\n if data > 0:\n positive_list.append(data)\n else:\n negative_list.append(data)\n positive_list.sort()\n negative_list.sort(reverse = True)\n\n # init p_i/n_i/height/area and +/- list and p_n_flag(+ or -)\n p_i, n_i, height, area = 0, 0, 0, 0\n if positive_list[p_i] < abs(negative_list[n_i]):\n area = positive_list[p_i]\n p_i += 1\n height += 1\n p_n_flag = False\n else:\n area = abs(negative_list[n_i])\n n_i += 1\n height += 1\n p_n_flag = True\n\n # crossed check +/- list until (index over + your turn)\n while True:\n #print('p_i, n_i, height, area:', p_i, n_i, height, area)\n if p_i == len(positive_list) and p_n_flag == True:\n break\n if n_i == len(negative_list) and p_n_flag == False:\n break\n\n if p_n_flag:\n if positive_list[p_i] > area:\n area = positive_list[p_i]\n height += 1\n p_n_flag = False\n p_i += 1\n else:\n if abs(negative_list[n_i]) > area:\n area = abs(negative_list[n_i])\n height += 1\n p_n_flag = True\n n_i += 1\n\n print(height)","repo_name":"110621013/algorithm","sub_path":"hw/HW8_0518/Building.py","file_name":"Building.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40528442739","text":"import json\n\n# person = '{\"name\": \"Bob\", \"languages\": [\"English\", \"French\"]}'\n# person_dict = json.loads(person)\n#\n# print(person_dict)\n# print(person_dict['languages'])\n\nf = open(\"DataFile.txt\", \"r\")\ndata = json.loads(f)\nprint(data)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# f = open(\"DataFile.txt\", \"r\")\n# c = f.read(DataFile.txt)\n# f.write(\"Hello\")\n#split(\"\") #Splits sentence into words or a paragragh into sentences\n # You can split by any character that you put in the brackets\n\n#when you open a file you have to close it or it will crash\n# f.close()","repo_name":"GilchrisK/ComputerScience","sub_path":"Old stuff/#Compuer Science/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44753752233","text":"# modules\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import colors\r\n\r\n# variables 1\r\nv_density = 30\r\nl_density = 80\r\np_density = 80\r\nd_density = 500\r\nc_pos = (1, 1)\r\nc_radius = 0.95\r\n\r\n# variables 2\r\nl_viscosity = 148.1408929\r\nrho_a = 7850\r\nrho_g = 1260\r\nradius = 0.0005\r\npi = np.pi\r\ng = 9.81\r\nsum_f = (\r\n (\r\n -\r\n (rho_g * 2 * (4/3) * pi * radius**3 * g)\r\n -\r\n (6 * pi * radius * l_viscosity)\r\n +\r\n (rho_a * (4/3) * 2 * pi * radius**3 * g)\r\n )\r\n)\r\n\r\n# main\r\ndef flow(\r\n psi,\r\n mask = None,\r\n x=np.linspace(-4, 4, d_density),\r\n y=np.linspace(-4, 4, d_density),\r\n h=1e-10\r\n):\r\n x_axis, y_axis = np.meshgrid(x, y)\r\n if mask:\r\n x_axis, y_axis = (\r\n np.ma.masked_where(mask(x_axis, y_axis), x_axis),\r\n np.ma.masked_where(mask(x_axis, y_axis), y_axis)\r\n )\r\n\r\n # complex analysis navier-stokes\r\n u_axis = - (psi(x_axis, y_axis + h) - psi(x_axis, y_axis - h)) / (2 * h)\r\n v_axis = (psi(x_axis + h, y_axis) - psi(x_axis - h, y_axis)) / (2 * h)\r\n\r\n plt.figure(figsize=(8, 8), dpi=p_density)\r\n plt.quiver(\r\n x_axis[::v_density, ::v_density],\r\n y_axis[::v_density, ::v_density],\r\n u_axis[::v_density, ::v_density],\r\n v_axis[::v_density, ::v_density],\r\n alpha=0.8\r\n )\r\n cmap = colors.ListedColormap(['black', 'black'])\r\n bounds = [0, 0, 0]\r\n norm = colors.BoundaryNorm(bounds, cmap.N)\r\n plt.contour(\r\n x_axis,\r\n y_axis,\r\n psi(x_axis, y_axis),\r\n levels=l_density,\r\n cmap=cmap,\r\n norm=norm,\r\n alpha=0.5\r\n )\r\n plt.axis('equal')\r\n\r\n centre = plt.Circle((0, 0), 0.95, color='gray')\r\n plt.gca().add_patch(centre)\r\n\r\n# speed over time (dv/dt=Sf*v)\r\nplt.rcParams[\"figure.autolayout\"] = True\r\n\r\ndef speed(x):\r\n return np.e**(sum_f * x)\r\n\r\nx = np.linspace(-10, 10, 100)\r\nplt.plot(\r\n x, speed(x),\r\n color='black',\r\n alpha=0.8\r\n)\r\nplt.ylabel(\"v [m/s]\")\r\nplt.xlabel(\"t [s]\")\r\nax = plt.gca()\r\nax.axes.xaxis.set_ticklabels([])\r\nax.axes.yaxis.set_ticklabels([])\r\nplt.grid()\r\n\r\ndef v_lim(x):\r\n return -5000\r\n\r\nt1 = np.arange(-10, 10, 0.001)\r\nplt.plot(\r\n t1,\r\n np.full(t1.shape, v_lim(t1)),\r\n alpha=0.6, color='gray')\r\nplt.text(-9.8, 15000, 'v lim', color='black')\r\n\r\n# flow lines functions\r\nball = lambda y, x : y - y / (x**2 + y**2)\r\nball_mask = lambda y, x : x**2 + y**2 < 0.95\r\nflow(ball, ball_mask)\r\n\r\nplt.show()\r\n","repo_name":"mightykatun/Python-Vectorial-Fields","sub_path":"Fluid_3.1.py","file_name":"Fluid_3.1.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11216177170","text":"#\n# [345] Reverse Vowels of a String\n#\n# https://leetcode.com/problems/reverse-vowels-of-a-string/description/\n#\n# algorithms\n# Easy (39.07%)\n# Total Accepted: 102.1K\n# Total Submissions: 261.2K\n# Testcase Example: '\"hello\"'\n#\n# Write a function that takes a string as input and reverse only the vowels of\n# a string.\n# \n# \n# Example 1:\n# Given s = \"hello\", return \"holle\".\n# \n# \n# \n# Example 2:\n# Given s = \"leetcode\", return \"leotcede\".\n# \n# \n# \n# Note:\n# The vowels does not include the letter \"y\".\n# \n#\nclass Solution(object):\n def reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n # 3 star\n vowels = \"aeiou\"\n left, right = 0, len(s) - 1\n l = list(s)\n while left <= right:\n while left <= right and l[left].lower() not in vowels:\n left += 1\n while left <= right and l[right].lower() not in vowels:\n right -= 1\n if left <= right:\n l[left], l[right] = l[right], l[left]\n left += 1\n right -= 1\n return \"\".join(l)\n","repo_name":"goalong/lc","sub_path":"v1/345.reverse-vowels-of-a-string.135325188.ac.py","file_name":"345.reverse-vowels-of-a-string.135325188.ac.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"36763316927","text":"import statistics\nfrom collections import Counter\n\n#For all days\ndef print_count(countValue):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Total Count: {countValue}') # Press Ctrl+F8 to toggle the breakpoint.\n\ndef split(word):\n return [char for char in word]\n\ndef splitInt(word):\n return [int(char) for char in word]\n\ndef IsValidIndex( listToCheck:list, index):\n if 0 <= index < len(listToCheck):\n return True\n return False\n\ndef IsValidCoordinate( listToCheck, rowIndex, colIndex):\n if IsValidIndex(listToCheck, rowIndex):\n if IsValidIndex(listToCheck[rowIndex], colIndex):\n return True\n return False\n\ndef IsValidCoordinateTuple( listToCheck, xyTuple:tuple):\n return IsValidCoordinate(listToCheck, xyTuple[1], xyTuple[0])\n\ndef GetValAtCoordinate( list:list, xy:tuple):\n return list[xy[1]][xy[0]]\n\ndef SetValAtCoordinate( list:list, xy:tuple, val):\n list[xy[1]][xy[0]] = val\n return\n\ndef GetCharacterCount(char, wordList):\n charCount = 0\n for word in wordList:\n count = Counter(word)\n if count[char] > 0:\n charCount += 1\n return charCount\n\ndef Make2DDataArray(fileData):\n dataArray = []\n for fileLine in fileData:\n dataArray.append(splitInt(fileLine.strip()))\n\n return dataArray\n\ndef AddCountToDict(dict:dict, key, count):\n if key in dict.keys():\n dict[key] += count\n else:\n dict[key] = count\n\ndef MakeListInitialVal(length, initialVal):\n list = [initialVal] * length\n return list\n\ndef Make2DList(rowLen, colLen, initialVal):\n list = []\n for colIndex in range(0, colLen):\n list.append(MakeListInitialVal(rowLen, initialVal))\n\n return list\n#For current day\n\ndef AddXY(a,b):\n c = (a[0] + b[0], a[1] + b[1])\n return c\n\ndef AddNeighbors(listToUpdate, currentElem, previousElem, map:list):\n north = (0, 1)\n south = (0, -1)\n east = (1, 0)\n west = (-1, 0)\n\n northElem = AddXY(currentElem, north)\n southElem = AddXY(currentElem, south)\n eastElem = AddXY(currentElem, east)\n westElem = AddXY(currentElem, west)\n\n if northElem != previousElem and IsValidCoordinateTuple(map, northElem) and northElem not in listToUpdate:\n listToUpdate.append(northElem)\n if southElem != previousElem and IsValidCoordinateTuple(map, southElem) and southElem not in listToUpdate:\n listToUpdate.append(southElem)\n if eastElem != previousElem and IsValidCoordinateTuple(map, eastElem) and eastElem not in listToUpdate:\n listToUpdate.append(eastElem)\n if westElem != previousElem and IsValidCoordinateTuple(map, westElem) and westElem not in listToUpdate:\n listToUpdate.append(westElem)\n\ndef UpdateNeighbors(listToUpdate:list, heatMap:list, cavernMap:list):\n if len(listToUpdate) == 0:\n return\n lowestElem = listToUpdate[0]\n lowestVal = GetValAtCoordinate(heatMap,lowestElem)\n for elem in listToUpdate:\n if lowestVal == -1:\n lowestVal = GetValAtCoordinate(heatMap, elem)\n lowestElem = elem\n else:\n currentVal = GetValAtCoordinate(heatMap,elem)\n if currentVal < lowestVal:\n lowestVal = currentVal\n lowestElem = elem\n listToUpdate.remove(lowestElem)\n elemToUpdate = lowestElem\n if not IsValidCoordinateTuple(heatMap,elemToUpdate):\n return\n\n north = (0,1)\n south = (0,-1)\n east = (1, 0)\n west = (-1,0)\n\n northElem = AddXY(elemToUpdate, north)\n southElem = AddXY(elemToUpdate, south)\n eastElem = AddXY(elemToUpdate, east)\n westElem = AddXY(elemToUpdate, west)\n\n currentHeatVal = GetValAtCoordinate(heatMap,elemToUpdate)\n\n if IsValidCoordinateTuple(cavernMap, northElem):\n heatMapVal = GetValAtCoordinate(heatMap,northElem)\n riskVal = GetValAtCoordinate(cavernMap,northElem)\n newPossibleHeatVal = currentHeatVal + riskVal\n if heatMapVal == -1 or heatMapVal > newPossibleHeatVal:\n SetValAtCoordinate(heatMap, northElem, newPossibleHeatVal)\n listToUpdate.append(northElem)\n\n if IsValidCoordinateTuple(cavernMap, southElem):\n heatMapVal = GetValAtCoordinate(heatMap,southElem)\n riskVal = GetValAtCoordinate(cavernMap,southElem)\n newPossibleHeatVal = currentHeatVal + riskVal\n if heatMapVal == -1 or heatMapVal > newPossibleHeatVal:\n SetValAtCoordinate(heatMap, southElem, newPossibleHeatVal)\n listToUpdate.append(southElem)\n\n if IsValidCoordinateTuple(cavernMap, eastElem):\n heatMapVal = GetValAtCoordinate(heatMap,eastElem)\n riskVal = GetValAtCoordinate(cavernMap,eastElem)\n newPossibleHeatVal = currentHeatVal + riskVal\n if heatMapVal == -1 or heatMapVal > newPossibleHeatVal:\n SetValAtCoordinate(heatMap, eastElem, newPossibleHeatVal)\n listToUpdate.append(eastElem)\n\n if IsValidCoordinateTuple(cavernMap, westElem):\n heatMapVal = GetValAtCoordinate(heatMap,westElem)\n riskVal = GetValAtCoordinate(cavernMap,westElem)\n newPossibleHeatVal = currentHeatVal + riskVal\n if heatMapVal == -1 or heatMapVal > newPossibleHeatVal:\n SetValAtCoordinate(heatMap, westElem, newPossibleHeatVal)\n listToUpdate.append(westElem)\n\n return\n\n\n\ndef MakeHeatMap(cavernMap:list, startPostion:tuple):\n\n listToUpdate = []\n heatMap = Make2DList( len(cavernMap[0]), len(cavernMap), -1)\n SetValAtCoordinate(heatMap, startPostion, GetValAtCoordinate(cavernMap,startPostion))\n listToUpdate.append(startPostion)\n\n\n\n while( len(listToUpdate) > 0):\n UpdateNeighbors(listToUpdate, heatMap, cavernMap)\n\n return heatMap\n\ndef MakeDay2CavernMap( cavernMap):\n rowLen = len(cavernMap[0])\n colLen = len(cavernMap)\n totalRowLen = len(cavernMap[0])*5\n totalColLen = len(cavernMap)*5\n\n BigMap = Make2DList(totalRowLen, totalColLen, 0)\n\n for rowIndex in range(0, len(BigMap)):\n for colIndex in range(0, len(BigMap[0])):\n rowMod = rowIndex % rowLen\n colMod = colIndex % colLen\n rowDiv = rowIndex // rowLen\n colDiv = colIndex // colLen\n newVal = GetValAtCoordinate(cavernMap, (colMod,rowMod)) + rowDiv + colDiv\n if newVal > 9:\n newVal %= 10\n newVal += 1\n SetValAtCoordinate(BigMap, (colIndex,rowIndex), newVal)\n\n return BigMap\n\n\n\ndef SolveDayPartA(filepath):\n with open(filepath, \"r\") as openedFile:\n fileData = openedFile.readlines()\n\n cavernMap = Make2DDataArray(fileData)\n\n startPostion = (0,0)\n endPosition = (len(cavernMap[0]) - 1, len(cavernMap) - 1)\n\n heatMap = MakeHeatMap(cavernMap, endPosition)\n lowestRisk = heatMap[0][0]\n return lowestRisk - GetValAtCoordinate(cavernMap,startPostion)\n\n\n\ndef SolveDayPartB(filepath):\n with open(filepath, \"r\") as openedFile:\n fileData = openedFile.readlines()\n\n cavernMap = Make2DDataArray(fileData)\n\n cavernMap = MakeDay2CavernMap(cavernMap)\n startPostion = (0,0)\n endPosition = (len(cavernMap[0]) - 1, len(cavernMap) - 1)\n\n heatMap = MakeHeatMap(cavernMap, endPosition)\n lowestRisk = heatMap[0][0]\n return lowestRisk - GetValAtCoordinate(cavernMap,startPostion)\n\nfilePath = \"C:\\\\dev\\\\AdventOfCode\\\\Input\\\\Day15.txt\"\nprint_count(SolveDayPartA(filePath))\nprint_count(SolveDayPartB(filePath))","repo_name":"JSarasua/AdventOfCode2021","sub_path":"Source/Day15.py","file_name":"Day15.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35998864785","text":"import cv2\nimport numpy as np\ncounter = 0\nfield_corners = np.empty([4,2], dtype = \"float32\")\n\n'''\n手动标记足球场的顶点,和获取坐标值\n'''\n\ndef click(event, x, y, flags, param):\n videoFile = '../vid/panorama.avi'\n vid_cap = cv2.VideoCapture(videoFile)\n if vid_cap.isOpened():\n _,first_frame = vid_cap.read()\n first_frame = cv2.resize(first_frame, (2000,600))\n hsv = cv2.cvtColor(first_frame, cv2.COLOR_BGR2HSV)\n\n global counter\n if (event == cv2.EVENT_LBUTTONUP):\n if (counter >=4):\n print (\"Press any key to continue\")\n else:\n field_corners[counter,:] = [x,y]\n print(x,y)\n print(\"hue: \",hsv[y,x,0])\n counter +=1\n\ndef getground():\n print (\"Select the four corners from the Background\")\n print (\"The corners should be selected: Left-Down, Left-Top, Right-Top, Right-Down\")\n videoFile = '../vid/panorama.avi'\n vid_cap = cv2.VideoCapture(videoFile)\n if vid_cap.isOpened():\n _,first_frame = vid_cap.read()\n first_frame = cv2.resize(first_frame, (2000,600))\n hsv = cv2.cvtColor(first_frame, cv2.COLOR_BGR2HSV)\n cv2.namedWindow('Top')\n cv2.setMouseCallback('Top', click, None)\n cv2.imshow('Top', first_frame)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n#getground()\n\n#top view ground corner coordinate (42,34) (42,392) (598,34) (598,392)","repo_name":"wangyimosquito/SJTU-CS386-Real_Time_Football_Tracking_System","sub_path":"src/getTopViewGround.py","file_name":"getTopViewGround.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5870232638","text":"import os\nimport threading\nfrom urllib import unquote_plus\nfrom gi.repository import GObject\nfrom gi.repository import Gtk\nfrom gi.repository import Nautilus as FileManager\nfrom printit.comun import _, EXTENSIONS_FROM\nfrom printit.printdialog import PrintDialog\n\n\ndef get_files(files_in):\n files = []\n for file_in in files_in:\n print(file_in)\n file_in = unquote_plus(file_in.get_uri()[7:])\n if os.path.isfile(file_in):\n files.append(file_in)\n return files\n\n\nclass PrintItMenuProvider(GObject.GObject, FileManager.MenuProvider):\n \"\"\"Implements the 'Replace in Filenames' extension to the File Manager\n right-click menu\"\"\"\n\n def __init__(self):\n \"\"\"File Manager crashes if a plugin doesn't implement the __init__\n method\"\"\"\n pass\n\n def all_files_are_images(self, items):\n for item in items:\n fileName, fileExtension = os.path.splitext(\n unquote_plus(item.get_uri()[7:]))\n if fileExtension.lower() in EXTENSIONS_FROM:\n return True\n return False\n\n def printit(self, menu, selected):\n files = get_files(selected)\n printDialog = PrintDialog(_('Print'), files)\n if printDialog.run() == Gtk.ResponseType.ACCEPT:\n printDialog.hide()\n printDialog.pprint()\n printDialog.destroy()\n\n def get_file_items(self, window, sel_items):\n \"\"\"Adds the 'Replace in Filenames' menu item to the File Manager right\n -click menu, connects its 'activate' signal to the 'run' method\n passing the selected Directory/File\"\"\"\n if self.all_files_are_images(sel_items):\n top_menuitem = FileManager.MenuItem(\n name='PrintItMenuProvider::Gtk-printit-tools',\n label=_('Print images'),\n tip=_('Tool to print images'))\n top_menuitem.connect('activate', self.printit, sel_items)\n #\n return top_menuitem,\n return\nif __name__ == '__main__':\n files = ['/home/lorenzo/Escritorio/sample1.jpg',\n '/home/lorenzo/Escritorio/sample2.jpg',\n '/home/lorenzo/Escritorio/sample1.jpg',\n '/home/lorenzo/Escritorio/sample2.jpg',\n '/home/lorenzo/Escritorio/sample1.jpg',\n '/home/lorenzo/Escritorio/sample2.jpg',\n '/home/lorenzo/Escritorio/sample1.jpg',\n '/home/lorenzo/Escritorio/sample2.jpg']\n printDialog = PrintDialog(_('Print'), files)\n if printDialog.run() == Gtk.ResponseType.ACCEPT:\n printDialog.hide()\n t = threading.Thread(target=printDialog.pprint)\n #t.daemon = True\n t.start()\n #printDialog.pprint()\n","repo_name":"atareao/nautilus-printit","sub_path":"src/nautilus-printit.py","file_name":"nautilus-printit.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32684195671","text":"#!/usr/bin/env python3\n\nimport sys\n\nfor numbers in sys.stdin:\n numbers = numbers.strip()\n num_split = numbers.split()\n base = num_split[1]\n num = num_split[0]\n conv = 0\n\n i = 1\n while i < (len(num) + 1):\n digit = num[len(num) - i]\n conv = conv + ((int(base) ** (i - 1)) * int(digit))\n i = i + 1\n print (conv)\n","repo_name":"thomashazekamp/CA117","sub_path":"01week/labsheet-1.1/conv2decimal_011.py","file_name":"conv2decimal_011.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35403150833","text":"import unittest\nfrom foobar import FoobarClass \n \n \nclass FooBarTest(unittest.TestCase):\n \n def setUp(self):\n \n self.foo = FoobarClass()\n\n def test_read_data(self):\n\n # check for empty dictionary \n\n self.foo.code_map = {}\n self.assertEqual(list,type(self.foo.read_data()))\n\n # check for different data type (list, str, int) \n self.foo.code_map = 0\n self.assertEqual([],self.foo.read_data())\n\n self.foo.code_map =''\n self.assertEqual([],self.foo.read_data())\n\n self.foo.code_map = []\n self.assertEqual([],self.foo.read_data())\n\n def test_compute_score(self):\n\n # check for zero divison \n\n self.foo.max_score=0\n self.assertEqual(list,type(self.foo.compute_score()))\n\n # check for different data type (list, str, int) \n\n self.foo.max_score=''\n self.assertEqual(list,type(self.foo.compute_score()))\n\n self.foo.max_score={}\n self.assertEqual(list,type(self.foo.compute_score()))\n\n\n\n\n\n\n\n\nif __name__=='__main__':\n unittest.main()\n","repo_name":"ashutoshkarna03/foobar","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8498432511","text":"# ---------------------------------------------------------\n# Imports\n# ---------------------------------------------------------\n\nimport click\nimport json\nimport os\nimport unittest\nfrom flask import Flask, request, abort, jsonify\nfrom flask_cors import CORS\nfrom .models import Actor, Movie, setup_db\nfrom .auth.auth import *\n\n# ---------------------------------------------------------\n# Config\n# ---------------------------------------------------------\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n app.url_map.strict_slashes = False\n setup_db(app)\n\n # CORS app\n cors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n # CORS Headers\n @app.after_request\n def after_request(response):\n response.headers.add(\n 'Access-Control-Allow-Headers',\n 'Content-Type,Authorization,true'\n )\n response.headers.add(\n 'Access-Control-Allow-Methods',\n 'GET,PATCH,POST,DELETE,OPTIONS'\n )\n return response\n\n# ---------------------------------------------------------\n# Routes\n# ---------------------------------------------------------\n\n # GET endpoint for list of actors in database.\n @app.route('/actors', methods=['GET'])\n def get_actors():\n actors = Actor.query.all()\n\n if not actors:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'actors': [actor.format() for actor in actors]\n }), 200\n\n # GET endpoint for list of movies in database.\n @app.route('/movies', methods=['GET'])\n def get_movies():\n movies = Movie.query.all()\n\n if not movies:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'movies': [movie.format() for movie in movies]\n }), 200\n\n # POST endpoint to add an actor to the database.\n @app.route('/add-actor', methods=['POST'])\n @requires_auth('post:actors')\n def add_actor():\n data = request.get_json()\n\n if 'name' not in data:\n abort(422)\n if 'age' not in data:\n abort(422)\n if 'gender' not in data:\n abort(422)\n\n actor = Actor(\n name=data['name'],\n age=data['age'],\n gender=data['gender']\n )\n actor.insert()\n\n return jsonify({\n 'success': True,\n 'actor': actor.format()\n }), 200\n\n # POST endpoint to add a movie to the database.\n @app.route('/add-movie', methods=['POST'])\n @requires_auth('post:movies')\n def add_movie():\n data = request.get_json()\n\n if 'title' not in data:\n abort(422)\n if 'release' not in data:\n abort(422)\n\n movie = Movie(title=data['title'], release=data['release'])\n movie.insert()\n\n return jsonify({\n 'success': True,\n 'movie': movie.format()\n }), 200\n\n # PATCH endpoint to update an actor in the database.\n @app.route('/actors/', methods=['PATCH'])\n @requires_auth('patch:actor')\n def update_actor(actor_id):\n if not actor_id:\n abort(404)\n\n actor = Actor.query.get(actor_id)\n if not actor:\n abort(404)\n\n data = request.get_json()\n\n if 'name' in data and data['name']:\n actor.name = data['name']\n\n if 'age' in data and data['age']:\n actor.age = data['age']\n\n if 'gender' in data and data['gender']:\n actor.gender = data['gender']\n\n actor.update()\n\n return jsonify({\n 'success': True,\n 'actor': actor.format(),\n }), 200\n\n # PATCH endpoint to update a movie in the database.\n @app.route('/movies/', methods=['PATCH'])\n @requires_auth('patch:movie')\n def update_movie(movie_id):\n if not movie_id:\n abort(404)\n\n movie = Movie.query.get(movie_id)\n if not movie:\n abort(404)\n\n data = request.get_json()\n\n if 'title' in data and data['title']:\n movie.title = data['title']\n\n if 'release' in data and data['release']:\n movie.release = data['release']\n\n movie.update()\n\n return jsonify({\n 'success': True,\n 'movie': movie.format(),\n }), 200\n\n # DELETE endpoint to delete actors in the database.\n @app.route('/actors/', methods=['DELETE'])\n @requires_auth('delete:actor')\n def delete_actor(actor_id):\n if not actor_id:\n abort(404)\n\n actor_to_delete = Actor.query.get(actor_id)\n if not actor_to_delete:\n abort(404)\n\n actor_to_delete.delete()\n\n return jsonify({\n 'success': True,\n 'actor_id': actor_id\n }), 200\n\n # DELETE endpoint to delete movies in the database.\n @app.route('/movies/', methods=['DELETE'])\n @requires_auth('delete:movie')\n def delete_movie(movie_id):\n if not movie_id:\n abort(404)\n\n movie_to_delete = Movie.query.get(movie_id)\n if not movie_to_delete:\n abort(404)\n\n movie_to_delete.delete()\n\n return jsonify({\n 'success': True,\n 'movie_id': movie_id\n }), 200\n\n# ---------------------------------------------------------\n# Error Handling\n# ---------------------------------------------------------\n @app.errorhandler(401)\n def not_authorized(error):\n return jsonify({\n \"success\": False,\n \"error\": 401,\n \"message\": \"Authentication error.\"\n }), 401\n\n @app.errorhandler(403)\n def forbidden(error):\n return jsonify({\n \"success\": False,\n \"error\": 403,\n \"message\": \"Forbidden.\"\n }), 403\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Item not found.\"\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"Request could not be processed.\"\n }), 422\n\n @app.errorhandler(AuthError)\n def auth_error(error):\n return jsonify({\n 'success': False,\n 'error': error.status_code,\n 'message': error.error['description']\n }), error.status_code\n\n# ---------------------------------------------------------\n# Launch\n# ---------------------------------------------------------\n\n return app\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"varlese/FSND-capstone","sub_path":"agency/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"42827795625","text":"import torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom qmodels import QGenerator, QDiscriminator, QDiscriminator_mmd\nfrom torch.autograd.variable import Variable\n\nclass GAN():\n def __init__(self, a_generator:nn.Module, a_discriminator:nn.Module, lr=0.05, config=None):\n self.generator = a_generator\n self.discriminator = a_discriminator\n self.lr = lr\n self.g_optimizer = optim.Adam(self.generator.parameters(), lr=self.lr, betas=(0.5,0.999))\n self.d_optimizer = optim.Adam(self.discriminator.parameters(), lr=self.lr, betas=(0.5,0.999))\n # self.d_optimizer = optim.SGD(self.discriminator.parameters(), lr=self.lr)\n # self.loss = nn.BCEWithLogitsLoss()\n # self.loss = nn.BCELoss(reduction='mean')\n self.loss = nn.BCEWithLogitsLoss(reduction='mean')\n self.config = config\n\n def train_discriminator(self, real_data, fake_data):\n self.d_optimizer.zero_grad()\n label_as_real = Variable(torch.ones_like(real_data),requires_grad=False)\n label_as_fake = Variable(torch.zeros_like(real_data),requires_grad=False)\n\n # Measure discriminator's ability to classify real from generated samples\n real_logits = self.discriminator(real_data)\n fake_logits = self.discriminator(fake_data.detach())\n\n real_loss = self.loss(real_logits, label_as_real)\n fake_loss = self.loss(fake_logits, label_as_fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n self.d_optimizer.step()\n\n return d_loss\n\n def train_discriminator(self, real_data, fake_data):\n self.d_optimizer.zero_grad()\n label_as_real = Variable(torch.ones_like(real_data),requires_grad=False)\n label_as_fake = Variable(torch.zeros_like(real_data),requires_grad=False)\n\n # Measure discriminator's ability to classify real from generated samples\n real_logits = self.discriminator(real_data)\n fake_logits = self.discriminator(fake_data.detach())\n\n real_loss = self.loss(real_logits, label_as_real)\n fake_loss = self.loss(fake_logits, label_as_fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n self.d_optimizer.step()\n\n return d_loss\n\n\n def train_generator(self, fake_data):\n\n self.g_optimizer.zero_grad()\n\n label_as_real = Variable(torch.ones_like(fake_data))\n gen_logits = self.discriminator(fake_data)\n g_loss = self.loss(gen_logits, label_as_real)\n g_loss.backward()\n\n self.g_optimizer.step()\n return g_loss\n\n def save(self, path, epoch=0):\n torch.save({'generator_state_dict': self.generator.state_dict(),\n 'discriminator_state_dict': self.discriminator.state_dict(),\n 'generator_optimizer_state_dict': self.g_optimizer.state_dict(),\n 'discriminator_optimizer_state_dict': self.d_optimizer.state_dict(),\n 'config':self.config,\n 'epoch':epoch\n }, path)\n\n def load(self, path):\n f = torch.load(path)\n self.generator.load_state_dict(f['generator_state_dict'])\n self.discriminator.load_state_dict(f['discriminator_state_dict'])\n self.g_optimizer.load_state_dict(f['generator_optimizer_state_dict'])\n self.d_optimizer.load_state_dict(f['discriminator_optimizer_state_dict'])\n\nclass GAN_factory:\n @staticmethod\n def get_cell_type_from_config(cell_type_str):\n if cell_type_str.upper() in \"LSTM\":\n return \"LSTM\", False\n elif cell_type_str.upper() in \"BILSTM\":\n return \"LSTM\", True\n elif cell_type_str.upper() in \"GRU\":\n return \"GRU\", False\n elif cell_type_str.upper() in \"BIGRU\":\n return \"GRU\", True\n\n @staticmethod\n def default_gan(latent_dim = 10, nfeatures=1 , gen_hidRNN=100, gen_memory_layers=1,\n dis_hidRNN=100, dis_memory_layers=1, use_cuda=False, lr=0.05, config=None,\n g_cell_type='LSTM', d_cell_type='LSTM',g_dropout=0, d_dropout=0,\n use_minibatch_discrimination=False):\n\n device = torch.device('cuda' if torch.cuda.is_available() and use_cuda else 'cpu')\n\n g_cell_type, g_bidirectional = GAN_factory.get_cell_type_from_config(g_cell_type)\n d_cell_type, d_bidirectional = GAN_factory.get_cell_type_from_config(d_cell_type)\n\n generator = QGenerator(z_dim=latent_dim, output_size=nfeatures, hidRNN=gen_hidRNN, nlayers=gen_memory_layers,\n cell_type=g_cell_type, bidirectional=g_bidirectional, dropout=g_dropout).to(device)\n\n if use_minibatch_discrimination:\n discriminator = QDiscriminator_mmd(nfeatures=nfeatures, hidRNN=dis_hidRNN, nlayers=dis_memory_layers,\n cell_type=d_cell_type, bidirectional=d_bidirectional, dropout=d_dropout,\n mmd_kernel_size=config['minibatch_discrimination_kernel_size'],\n mmd_out_features=config['minibatch_discrimination_features']).to(device)\n else:\n discriminator = QDiscriminator(nfeatures=nfeatures, hidRNN=dis_hidRNN, nlayers=dis_memory_layers,\n cell_type=d_cell_type, bidirectional=d_bidirectional, dropout=d_dropout).to(device)\n # generator = generator.float()\n # discriminator = discriminator.float()\n gan = GAN(generator, discriminator, lr, config=config)\n gan.loss.to(device)\n # for name, param in generator.named_parameters():\n # print('\\n generator parameter: '+name)\n return gan\n\n @staticmethod\n def model_from_config(cfg):\n # only default gan supported at this time\n is_default = 'RNN' in cfg['generator_type'] and 'RNN' in cfg['discriminator_type']\n if not is_default:\n raise Exception('Only default GAN is supported at this time, please choose RNN and LSTM as type and cell for G and D')\n\n if not 'generator_dropout'in cfg: cfg['generator_dropout'] = 0\n if not 'discriminator_dropout' in cfg: cfg['discriminator_dropout'] = 0\n if not 'use_minibatch_discrimination' in cfg: cfg['use_minibatch_discrimination'] = False\n if not 'minibatch_discrimination_kernel_size' in cfg: cfg['minibatch_discrimination_kernel_size'] = 16\n if not 'minibatch_discrimination_features' in cfg: cfg['minibatch_discrimination_features'] = 200\n\n return GAN_factory.default_gan(latent_dim=cfg['latent_dimension'],\n nfeatures=cfg['nfeatures'],\n gen_hidRNN=cfg['generator_hidden_units'],\n gen_memory_layers=cfg['generator_RNN_layers'],\n dis_hidRNN=cfg['discriminator_hidden_units'],\n dis_memory_layers=cfg['discriminator_RNN_layers'],\n use_cuda=cfg['use_cuda'],\n lr=cfg['lr'],\n config=cfg,\n g_cell_type=cfg['generator_RNN_cell'],\n d_cell_type=cfg['discriminator_RNN_cell'],\n g_dropout=cfg['generator_dropout'],\n d_dropout=cfg['discriminator_dropout'],\n use_minibatch_discrimination=cfg['use_minibatch_discrimination']\n )\n\n @staticmethod\n def model_from_checkpoint(path):\n f=torch.load(path)\n the_gan = GAN_factory.model_from_config(f['config'])\n the_gan.load(path)\n return the_gan, f['config'], f['epoch']\n\ndef test_QGenerator():\n zdim = 5\n seq_length = 200\n batch = 10\n hidden_sz = 100\n ndims = 2\n # input = torch.randn(batch, seq_length, zdim)\n z = torch.ones(batch, seq_length, zdim)\n gen = QGenerator(z_dim=zdim, output_size= ndims)\n x_hat = gen(z)\n k=0\n\n\ndef test_QGDiscriminator():\n seq_length = 200\n batch = 10\n hidden_sz = 100\n ndims = 2\n # input = torch.randn(batch, seq_length, zdim)\n x = torch.ones(batch, seq_length, ndims)\n dis = QDiscriminator(nfeatures=ndims)\n x_hat = dis(x)\n k=0\n\n","repo_name":"txoritxo/CS236Project_2","sub_path":"qgan_factory.py","file_name":"qgan_factory.py","file_ext":"py","file_size_in_byte":8432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17762249194","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.forms.models import model_to_dict\nfrom rest_framework import viewsets\nfrom .serializers import ClassificacaoContasAPagarSerializer, ClassificacaoContasAReceberSerializer, ContasAPagarSerializer, ContasAReceberSerializer, FormaPagamentoRecebimentoSerializer\nfrom .models.classificacaoContasAPagar import ClassificacaoContasAPagar\nfrom .models.classificacaoContasAReceber import ClassificacaoContasAReceber\nfrom .models.contasAPagar import ContasAPagar\nfrom .models.contasAReceber import ContasAReceber\nfrom .models.formaPagamentoRecebimento import FormaPagamentoRecebimento\nfrom django.views.decorators.http import require_http_methods\nimport datetime\n\ndef relatorio(request):\n\t\n\tdados = {'pessoas': {id:1},\n\t\t'msg': \"Esta é uma mensagem fora do modelo\"}\n\t\n\treturn render(request, 'relatorio/index.html', dados)\n\ndef relpagar(request, dtInit, dtEnd):\n\tprint(dtInit,dtEnd)\n\tcontas = ContasAPagar.objects.filter(dataVencimento__gte=dtInit,dataVencimento__lte=dtEnd)\n\tcontas_json = []\n\tfor c in contas:\n\t\tpdic = {\n\t\t\t'id': c.id,\n\t\t\t'dataVencimento': c.dataVencimento,\n\t\t\t'dataPagamento': c.dataPagamento,\n\t\t\t'valor': c.valor,\n\t\t\t'descricaoConta': c.descricaoConta,\n\t\t\t'situacao': c.situacao,\n\t\t\t'classificacaoContasAPagar': c.classificacaoContasAPagar,\n\t\t\t'formaPagamento': c.formaPagamento\n\t\t}\n\n\t\tif c.classificacaoContasAPagar:\n \t pdic['classificacaoContasAPagar'] = model_to_dict(c.classificacaoContasAPagar)\n\n\t\tif c.formaPagamento:\n \t pdic['formaPagamento']\t= model_to_dict(c.formaPagamento)\n\t \n\t\tcontas_json.append(pdic)\n \n\t\n\tpayload = {\n\t\t'pagar': contas_json\n\t}\n\n\treturn JsonResponse(payload)\n\n\ndef relreceber(request, dtInit, dtEnd):\n\tcontas = ContasAReceber.objects.filter(dataExpectativa__gte=dtInit,dataExpectativa__lte=dtEnd)\n\tcontas_json = []\n\tfor c in contas:\n\t\tpdic = {\n\t\t\t\t'id': c.id,\n\t\t\t\t'dataExpectativa': c.dataExpectativa,\n\t\t\t\t'dataRecebimento': c.dataRecebimento,\n\t\t\t\t'valor': c.valor,\n\t\t\t\t'descricaoConta': c.descricaoConta,\n\t\t\t\t'situacao': c.situacao,\n\t\t\t\t'classificacaoContasAReceber': c.classificacaoContasAReceber,\n\t\t\t\t'formaPagamento': c.formaPagamento\n\t\t}\n\t\tif c.classificacaoContasAReceber:\n \t pdic['classificacaoContasAReceber'] = model_to_dict(c.classificacaoContasAReceber)\n\n\t\tif c.formaPagamento:\n \t pdic['formaPagamento']\t= model_to_dict(c.formaPagamento)\n\t\t\n\t\tcontas_json.append(pdic)\n \n\t\n\tpayload = {\n\t\t'receber': contas_json\n\t}\n\n\treturn JsonResponse(payload)\n\n@require_http_methods(['GET'])\ndef exibir_fluxo_caixa(request):\n\n relatorios_meses = []\n\n for mes_x in range(1, 13):\n\n saldo_inicial = 0 if mes_x == 1 else relatorio_mes['saldo_final'] \n\n saldo_final = round(saldo_inicial - ContasAPagar.objects.obter_soma_total_mes(mes_x) + ContasAReceber.objects.obter_soma_total_mes(mes_x), 2)\n\n lucro_total = round(saldo_final - saldo_inicial, 2)\n\n relatorio_mes = {\n\n \"data\" : datetime.date(1900, mes_x, 1).strftime('%B'),\n\n \"saldo_inicial\" : saldo_inicial,\n\n \"a_pagar\" : {\n \"soma_total_prevista\": ContasAPagar.objects.obter_soma_prevista_mes(mes_x),\n \"soma_total_realizada\": ContasAPagar.objects.obter_soma_realizada_mes(mes_x),\n \"soma_mensal\": ContasAPagar.objects.obter_soma_total_mes(mes_x),\n \"gastos_por_classificacao\": ContasAPagar.objects.obter_gastos_por_classificacao(mes_x),\n },\n\n \"a_receber\" : {\n \"soma_total_prevista\": ContasAReceber.objects.obter_soma_prevista_mes(mes_x),\n \"soma_total_recebida\": ContasAReceber.objects.obter_soma_realizada_mes(mes_x),\n \"soma_mensal\": ContasAReceber.objects.obter_soma_total_mes(mes_x),\n \"ganhos_por_classificacao\": ContasAReceber.objects.obter_gastos_por_classificacao(mes_x),\n },\n\n \"lucro_total\": lucro_total,\n\n\t\t\t\"saldo_final\" : saldo_final,\n\n } \n\n relatorios_meses.append(relatorio_mes)\n\n return render(request, 'relatorio/fluxo_caixa.html', {\"relatorios_meses\" : relatorios_meses})\t\t\n\nclass ClassificacaoContasAPagarViewSet(viewsets.ModelViewSet):\n\tqueryset = ClassificacaoContasAPagar.objects.all().order_by('descricaoClassificacao')\n\tserializer_class = ClassificacaoContasAPagarSerializer\n\nclass ClassificacaoContasAReceberViewSet(viewsets.ModelViewSet):\n\tqueryset = ClassificacaoContasAReceber.objects.all().order_by('descricaoClassificacao')\n\tserializer_class = ClassificacaoContasAReceberSerializer\n\nclass ContasAPagarViewSet(viewsets.ModelViewSet):\n\tqueryset = ContasAPagar.objects.all().order_by('descricaoConta')\n\tserializer_class = ContasAPagarSerializer\n\nclass ContasAReceberViewSet(viewsets.ModelViewSet):\n\tqueryset = ContasAReceber.objects.all().order_by('descricaoConta')\n\tserializer_class = ContasAReceberSerializer\n\nclass FormaPagamentoRecebimentoViewSet(viewsets.ModelViewSet):\n\tqueryset = FormaPagamentoRecebimento.objects.all().order_by('descricaoFormaPagamentoRecebimento')\n\tserializer_class = FormaPagamentoRecebimentoSerializer\n\t\n\n","repo_name":"RenanTafner/TrabalhoFinalPython","sub_path":"meu_projeto/meuapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39809760820","text":"from html.parser import HTMLParser\nfrom collections import OrderedDict\nfrom .node import Root, Node, Leaf\nfrom .util import void_tags\n\n\nclass Parser(object):\n def __init__(self, source):\n self.source = source\n self.node = self.root = Root()\n self.process()\n\n def html(self):\n return self.root.html()\n\n def htil(self):\n return self.root.htil()\n\n\nclass Htil(Parser):\n def __init__(self, source):\n self.file_indent = None\n self.pos = 0\n self.row = 1\n self.col = 1\n\n self.indent = 0\n self.last_indent = 0\n self.state = 'lf'\n self.attr = None\n super().__init__(source)\n\n def read_until(self, until, bt=False, drop_comments=True):\n name = ''\n escape = False\n while self.pos < len(self.source) - 1 and not (\n until(self.char) and not escape):\n if self.char == '\\n':\n self.row += 1\n if drop_comments and self.char == '/' and self.next_char == '/':\n self.pos += 2\n self.col += 2\n self.read_until(lambda c: c == '\\n')\n continue\n\n if self.char != '\\\\' or escape:\n name += self.char\n escape = False\n else:\n escape = self.char == '\\\\'\n self.pos += 1\n self.col += 1\n if bt:\n self.pos -= 1\n self.col -= 1\n return name\n\n def get_indent(self):\n return len(self.read_until(lambda c: c != ' ', True))\n\n @property\n def char(self):\n return self.source[self.pos]\n\n @property\n def next_char(self):\n return self.source[self.pos + 1] if self.pos < len(\n self.source) - 1 else None\n\n def process(self):\n assert self.get_indent() == 0\n\n while self.pos < len(self.source) - 1:\n self.pos += 1\n self.col += 1\n\n if self.char == ' ':\n pass\n elif self.char == '\\n':\n self.new_line()\n elif self.char in '\\'\"':\n self.read_data()\n elif self.char == '/' and self.next_char == '/':\n self.pos += 2\n self.col += 2\n self.read_until(lambda c: c == '\\n', True)\n continue\n elif self.state == 'lf':\n self.read_tag()\n elif self.state == 'tag':\n self.read_attr_key()\n elif self.state == 'attr_key':\n self.read_attr_value()\n\n def read_data(self):\n delim = self.char\n self.pos += 1\n self.col += 1\n name = self.read_until(lambda c: c == delim, False, False)\n if self.state == 'attr_key':\n self.node.attrs[self.attr] = name\n self.attr = None\n else:\n leaf = Leaf(name, self.node)\n self.node.nodes.append(leaf)\n self.node = leaf\n self.state = 'tag'\n\n def parse_tag(self, tag):\n attrs = OrderedDict()\n id = ''\n cls = []\n\n if '#' in tag:\n tag, id = tag.split('#')\n\n if '.' in tag:\n parts = tag.split('.')\n tag = parts[0]\n cls.extend(parts[1:])\n\n if '.' in id:\n parts = id.split('.')\n id = parts[0]\n cls.extend(parts[1:])\n if id:\n attrs['id'] = id\n if cls:\n attrs['class'] = ' '.join(cls)\n return tag, attrs\n\n def read_tag(self):\n tag = self.read_until(lambda c: c in ' \\n', True)\n tag, attrs = self.parse_tag(tag)\n node = Node(tag, attrs, self.node)\n self.node.nodes.append(node)\n self.node = node\n self.state = 'tag'\n\n def read_attr_key(self):\n name = self.read_until(lambda c: c in '=\\n', True)\n self.attr = name\n if self.next_char != '=':\n self.node.attrs[self.attr] = None\n else:\n self.pos += 1\n self.state = 'attr_key'\n\n def read_attr_value(self):\n name = self.read_until(lambda c: c in ' \\n', True, False)\n self.node.attrs[self.attr] = name\n self.attr = None\n self.state = 'tag'\n\n def new_line(self):\n self.col = 1\n self.row += 1\n self.pos += 1\n indent = self.get_indent()\n if self.char == '\\n':\n return\n self.last_indent = self.indent\n self.indent = indent\n if self.file_indent is None and self.indent:\n self.file_indent = self.indent\n if self.indent > self.last_indent and (\n self.indent - self.last_indent != self.file_indent) or (\n (self.last_indent - self.indent) % self.file_indent):\n raise IndentationError('Bad indent at %d:%d' % (\n self.row, self.col))\n\n if self.state in 'tag' and self.indent <= self.last_indent:\n for i in range(1 + (self.last_indent - self.indent) //\n self.file_indent):\n self.node = self.node.parent\n self.state = 'lf'\n\n\nclass Html(Parser):\n def process(self):\n html = self\n\n class HtmlParser(HTMLParser):\n def ordered_attrs(self, attrs):\n attrs = OrderedDict(attrs)\n rv = OrderedDict()\n if 'id' in attrs:\n rv['id'] = attrs['id']\n if 'class' in attrs:\n rv['class'] = attrs['class']\n rv.update(attrs)\n return rv\n\n def handle_starttag(self, tag, attrs):\n if tag in void_tags:\n self.handle_startendtag(tag, attrs)\n return\n node = Node(tag, self.ordered_attrs(attrs), html.node)\n html.node.nodes.append(node)\n html.node = node\n\n def handle_startendtag(self, tag, attrs):\n node = Node(tag, self.ordered_attrs(attrs), html.node)\n html.node.nodes.append(node)\n\n def handle_data(self, data):\n data = data.strip(' \\t\\n\\r')\n if data:\n leaf = Leaf(data, html.node)\n html.node.nodes.append(leaf)\n\n def handle_endtag(self, tag):\n if tag in void_tags:\n return\n html.node = html.node.parent\n\n HtmlParser().feed(self.source)\n\n\ndef to_htil(html):\n return Html(html).htil()\n\n\ndef to_html(htil):\n return Htil(htil).html()\n","repo_name":"paradoxxxzero/htil","sub_path":"htil/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41288773995","text":"from datetime import *\nfrom smtplib import *\nimport time\nimport requests\n\nMY_LAT = 10.786730\nMY_LONG = 76.654793\n\nmy_email = '****************@gmail.com'\nto_address = '**************@gmail.com'\npassword = '****************'\n\n\n# ------------------------------- ISS POSITION API -------------------------\ndef iss_close():\n response = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\n response.raise_for_status()\n\n data = response.json()\n # print(data)\n\n iss_longitude = float(data['iss_position']['longitude'])\n iss_latitude = float(data['iss_position']['latitude'])\n\n if MY_LAT - 5 <= iss_latitude >= MY_LAT + 5 and MY_LONG - 5 <= iss_longitude >= MY_LONG + 5:\n return True\n\n\n# --------------------------------- SUNRISE AND SUNSET API ----------------------\ndef is_night_time():\n params = {\n \"lat\": 'MY_LAT',\n \"lng\": 'MY_LONG',\n \"formatted\": 0\n }\n response = requests.get(url='https://api.sunrise-sunset.org/json', params=params)\n response.raise_for_status()\n\n data = response.json()\n # print(data)\n sunrise = data['results']['sunrise'].split('T')[1].split(':')[0]\n sunset = data['results']['sunset'].split('T')[1].split(':')[0]\n time_now = datetime.now().hour\n\n if time_now >= sunset or time_now <= sunrise:\n return True\n\n\n# --------------------------------- CHECKING ---------------------------------------\n\nwhile True:\n time.sleep(60) # runs every 60 seconds to check\n if iss_close() and is_night_time():\n with SMTP('smtp.gmail.com') as connection:\n connection.starttls()\n connection.login(user=my_email, password=password)\n connection.sendmail(from_addr=my_email,\n to_addrs=to_address,\n msg=\"Subject:ISS Overhead Notification 🛰ï¸�\\n\\n\"\"The International space station is over \"\n \"your head today! Have a look at the sky. \")\n\n","repo_name":"reynaroyce12/Python-API","sub_path":"ISSNotifier/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5955497565","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Flatten\nfrom keras.optimizers import Adam\nimport tensorflow as tf\nimport tensorflow_model_optimization as tfmot\nimport numpy as np\nimport os\ndirname = os.path.dirname(__file__)\n\nclass ANET:\n \n\tdef __init__(self, model=None):\n\t\tself.model = model\n\n\n\tdef make_model(self, board_size, neuron_counts=[64, 64], activation_func='relu', loss_func='categorical_crossentropy'):\n\t\tself.model = Sequential()\n\t\tinput_shape = (board_size**2 + 1, 2)\n\n\t\tself.model.add(Flatten(input_shape=input_shape))\n\n\t\tfor count in neuron_counts:\n\t\t\tself.model.add(Dense(count, activation=activation_func))\n\n\t\tself.model.add(Dense(board_size**2, activation='softmax'))\n\n\t\toptimizer = Adam(learning_rate=0.0001) # Var 0.0001\n\t\tself.model.compile(optimizer=optimizer, loss=loss_func, metrics=['accuracy'])\n\t\tself.convert_to_tflite()\n\n\n\tdef one_hot_tuple(self, tuple):\n\t\tnew_tuple = []\n\t\tfor i in tuple:\n\t\t\tif i == 0:\n\t\t\t\tnew_tuple.append([0, 0])\n\t\t\telif i == 1:\n\t\t\t\tnew_tuple.append([1, 0])\n\t\t\telif i == 2:\n\t\t\t\tnew_tuple.append([0, 1])\n\t\treturn new_tuple\n\n\n\tdef one_hot_tuple_list(self, lst):\n\t\tnew_lst = []\n\t\tfor i in range(len(lst)):\n\t\t\trow = []\n\t\t\tfor j in range(len(lst[0])):\n\t\t\t\tif lst[i][j] == 0:\n\t\t\t\t\trow.append([0, 0])\n\t\t\t\telif lst[i][j] == 1:\n\t\t\t\t\trow.append([1, 0])\n\t\t\t\telif lst[i][j] == 2:\n\t\t\t\t\trow.append([0, 1])\n\t\t\tnew_lst.append(row)\n\t\treturn new_lst\n\t\n\n\tdef get_tuples(self, data):\n\t\ttuples = []\n\t\tfor d in data:\n\t\t\tflat_board = sum(d[0], [])\n\t\t\tplayer = d[1]\n\t\t\tflat_board.insert(0, player)\n\t\t\ttuple = flat_board\n\t\t\ttuples.append(self.one_hot_tuple(tuple))\n\t\treturn tuples\n\n\n\tdef fit(self, x, y, epochs=10, batch_size=1):\n\t\ttuples = self.get_tuples(x)\n\t\tx_processed = np.array(tuples)\n\t\t#x_processed = np.array(self.one_hot_tuple_list(tuples))\n\t\ty_processed = np.array(y)\n\t\tprint(f'x shape: {x_processed.shape}')\n\t\tprint(f'y shape: {y_processed.shape}')\n\t\tself.model.fit(x_processed, y_processed, epochs, batch_size)\n\t\t#self.convert_to_tflite()\n\n\tdef predict(self, x):\n\t\tx_processed = np.array([self.one_hot_tuple(x)])\n\t\t#return self.predict_with_tflite(x)\n\t\tresult = self.model(x_processed)\n\t\t#result = self.model.predict(x_processed, verbose=0)\n\t\treturn result.numpy()\n\n\tdef save(self, name):\n\t\tself.model.save(name)\n\n\n\tdef convert_to_tflite(self):\n\t\t# Apply pruning\n\t\t\"\"\"\n\t\tpruning_params = {\n 'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,\n final_sparsity=0.80,\n begin_step=0,\n end_step=1000)\n }\n\t\tmodel_for_pruning = tfmot.sparsity.keras.prune_low_magnitude(self.model, **pruning_params)\n\n # Apply quantization\n\t\tconverter = tf.lite.TFLiteConverter.from_keras_model(model_for_pruning)\n\t\tconverter.optimizations = [tf.lite.Optimize.DEFAULT]\n\t\tconverter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n\t\tconverter.inference_input_type = tf.int8\n\t\tconverter.inference_output_type = tf.int8\n\n\t\t\"\"\"\n \t# Convert the Keras model to a TensorFlow Lite model\n\t\tconverter = tf.lite.TFLiteConverter.from_keras_model(self.model)\n\t\ttflite_model = converter.convert()\n\n \t# Save the TensorFlow Lite model to a file\n\t\tpath = os.path.join(dirname, 'current_ANET.tflite')\n\t\twith open(path, 'wb') as f:\n\t\t\tf.write(tflite_model)\n\n\n\tdef predict_with_tflite(self, x):\n\t\tx_processed = np.array(self.one_hot_tuple(x)).astype(np.float32)\n\t\t# Load the TensorFlow Lite model\n\t\tpath = os.path.join(dirname, 'current_ANET.tflite')\n\t\tinterpreter = tf.lite.Interpreter(model_path=path)\n\t\tinterpreter.allocate_tensors()\n\n\t\t# Get input and output tensors\n\t\tinput_details = interpreter.get_input_details()\n\t\toutput_details = interpreter.get_output_details()\n\n\t\t# Make a prediction\n\t\tinterpreter.set_tensor(input_details[0]['index'], x_processed)\n\t\tinterpreter.invoke()\n\t\toutput_data = interpreter.get_tensor(output_details[0]['index'])\n\n\t\treturn output_data","repo_name":"Jonathanhelgesen/Hex-AI","sub_path":"NeuralNetworks/ANET.py","file_name":"ANET.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6095148788","text":"import math\r\nclass rdh_class:\r\n def __init__(self):\r\n self.x = 100\r\n self.y = 140\r\n self.speed = 3\r\n self.jump = False\r\n self.jump_vel = 5\r\n self.move_left = False\r\n self.move_right = False\r\n self.left = False\r\n self.right = True\r\n\r\n #HEALTH\r\n self.health = 8\r\n self.death = False\r\n #COMBAT\r\n self.player_dmg = 100\r\n self.player_get_dmg = False\r\n self.cmb1 = False\r\n\r\n\r\n self.camera_x = 0\r\n self.camera_y = 0\r\n #=-=\r\n self.run_left_list = []\r\n self.run_right_list = []\r\n self.r_frame_left = 0\r\n self.r_frame_right = 0\r\n #=-=\r\n self.idle_left_list = []\r\n self.idle_right_list = []\r\n self.i_frame_left = 0\r\n self.i_frame_right = 0\r\n #=-=\r\n self.jump_left_list = []\r\n self.jump_right_list = []\r\n self.j_frame_left = 0\r\n self.j_frame_right = 0\r\n #=-=\r\n self.cmb1_left_list = []\r\n self.cmb1_right_list = []\r\n self.c1_frame_left = 0\r\n self.c1_frame_right = 0\r\n #=-=\r\n self.dmg_left_list = []\r\n self.dmg_right_list = []\r\n self.d_frame_left = 0\r\n self.d_frame_right = 0\r\n #=-=\r\n self.death_left_list = []\r\n self.death_right_list = []\r\n self.death_frame_left = 0\r\n self.death_frame_right = 0\r\n\r\n self.r,self.g,self.b = (255,0,0),(0,255,0),(0,0,255)\r\n self.rect = ((self.x - self.camera_x + 2, self.y + 16 , 16,20))\r\n\r\n #self.cmb_rect_left = ((self.x - self.camera_x - 15, self.y + 20 , 15,5))\r\n #self.cmb_rect_right = ((self.x - self.camera_x + 25, self.y + 20 , 15,5))\r\n\r\n self.show_rect = False\r\n def update(self, pg, window):\r\n self.move_left = False\r\n self.move_right = False\r\n\r\n\r\n #RECTS\r\n\r\n self.rect = pg.Rect(self.x - self.camera_x + 2, self.y + 16 , 16,20)\r\n self.cmb_rect_left = pg.Rect(self.x - self.camera_x - 13, self.y + 20 , 15,5)\r\n self.cmb_rect_right = pg.Rect(self.x - self.camera_x + 18, self.y + 20 , 15,5)\r\n\r\n if self.show_rect == True:\r\n self.rect = pg.draw.rect(window, (self.b),(self.rect),1)\r\n\r\n if self.left == True:\r\n self.cmb_rect_left = pg.draw.rect(window, (self.r),(self.cmb_rect_left),1)\r\n\r\n if self.right == True:\r\n self.cmb_rect_right = pg.draw.rect(window, (self.r),(self.cmb_rect_right),1)\r\n\r\n\r\n self.keyinput = pg.key.get_pressed()\r\n #X,Y MOVEMENT\r\n\r\n\r\n #print(self.c1_frame_right)\r\n #print(self.x,self.speed)\r\n\r\n #RUN ANIMATION\r\n def run_left_anim(self, pg, window):\r\n for num in range(1,24 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_run{num}.png\")\r\n image.set_colorkey((255,0,255))\r\n self.run_left_list.append(image)\r\n window.blit(self.run_left_list[int(self.r_frame_left)], (self.x - self.camera_x, self.y))\r\n self.r_frame_left += 00.5\r\n\r\n def run_right_anim(self, pg, window):\r\n for num in range(1,24 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_run{num}.png\")\r\n image = pg.transform.flip(image, True, False)\r\n image.set_colorkey((255,0,255))\r\n self.run_right_list.append(image)\r\n window.blit(self.run_right_list[int(self.r_frame_right)], (self.x - self.camera_x, self.y))\r\n self.r_frame_right += 00.5\r\n\r\n #IDLE ANIMATION\r\n def idle_left_anim(self, pg, window):\r\n for num in range(1,18 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_idle{num}.png\")\r\n image = pg.transform.flip(image, True, False)\r\n image.set_colorkey((255,0,255))\r\n self.idle_left_list.append(image)\r\n window.blit(self.idle_left_list[int(self.i_frame_left)],(self.x - self.camera_x, self.y))\r\n self.i_frame_left += 00.3\r\n\r\n def idle_right_anim(self, pg, window):\r\n for num in range(1,18 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_idle{num}.png\")\r\n image.set_colorkey((255,0,255))\r\n self.idle_right_list.append(image)\r\n window.blit(self.idle_right_list[int(self.i_frame_right)],(self.x - self.camera_x, self.y))\r\n self.i_frame_right += 00.3\r\n\r\n #JUMP ANIMATION\r\n def jump_left_anim(self, pg,window):\r\n for num in range(1,18 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_jump{num}.png\")\r\n image = pg.transform.flip(image, True, False)\r\n image.set_colorkey((255,0,255))\r\n self.jump_left_list.append(image)\r\n window.blit(self.jump_left_list[int(self.j_frame_left)],(self.x - self.camera_x, self.y))\r\n self.j_frame_left += 00.60\r\n\r\n\r\n def jump_right_anim(self, pg,window):\r\n for num in range(1,18 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_jump{num}.png\")\r\n image.set_colorkey((255,0,255))\r\n self.jump_right_list.append(image)\r\n window.blit(self.jump_right_list[int(self.j_frame_right)],(self.x - self.camera_x, self.y))\r\n self.j_frame_right += 00.60\r\n\r\n #COMBO 1 ANIMATION\r\n def cmb1_left_anim(self, pg,window):\r\n for num in range(1,22 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_firstcmb{num}.png\")\r\n image.set_colorkey((255,0,255))\r\n self.cmb1_left_list.append(image)\r\n window.blit(self.cmb1_left_list[int(self.c1_frame_left)],(self.x - self.camera_x - 10, self.y))\r\n self.c1_frame_left += 00.3\r\n\r\n def cmb1_right_anim(self, pg,window):\r\n for num in range(1,22 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_firstcmb{num}.png\")\r\n image = pg.transform.flip(image, True, False)\r\n image.set_colorkey((255,0,255))\r\n self.cmb1_right_list.append(image)\r\n window.blit(self.cmb1_right_list[int(self.c1_frame_right)],(self.x - self.camera_x - 10, self.y))\r\n self.c1_frame_right += 00.3\r\n\r\n #DAMAGE ANIMATION\r\n def dmg_left_anim(self, pg,window):\r\n for num in range(1,6 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_getdmg{num}.png\")\r\n image.set_colorkey((255,0,255))\r\n self.dmg_left_list.append(image)\r\n window.blit(self.dmg_left_list[int(self.d_frame_left)],(self.x - self.camera_x, self.y))\r\n self.d_frame_left += 00.3\r\n\r\n def dmg_right_anim(self, pg,window):\r\n for num in range(1,6 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_getdmg{num}.png\")\r\n image = pg.transform.flip(image, True, False)\r\n image.set_colorkey((255,0,255))\r\n self.dmg_right_list.append(image)\r\n window.blit(self.dmg_right_list[int(self.d_frame_right)],(self.x - self.camera_x, self.y))\r\n self.d_frame_right += 00.3\r\n\r\n #DEATH ANIMATION\r\n def death_left_anim(self, pg,window):\r\n for num in range(1,21 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_death{num}.png\")\r\n image = pg.transform.flip(image, True, False)\r\n image.set_colorkey((255,0,255))\r\n self.death_left_list.append(image)\r\n window.blit(self.death_left_list[int(self.death_frame_left)],(self.x - self.camera_x, self.y + 15))\r\n self.death_frame_left += 00.4\r\n\r\n def death_right_anim(self, pg,window):\r\n for num in range(1,21 + 1):\r\n image = pg.image.load(f\"data/rdh_anim/rdh_death{num}.png\")\r\n image.set_colorkey((255,0,255))\r\n self.death_right_list.append(image)\r\n window.blit(self.death_right_list[int(self.death_frame_right)],(self.x - self.camera_x, self.y + 15))\r\n self.death_frame_right += 00.4\r\n\r\n\r\nrdh = rdh_class()\r\n\r\n\r\nclass healthbar_class():\r\n def __init__(self, x, y):\r\n self.pos_x = x\r\n self.pos_y = y\r\n\r\n #ANIMATION\r\n self.bar_list_animation = []\r\n self.hb_frame = 0\r\n\r\n self.heart_list = []\r\n self.heart_frame = 0\r\n\r\n\r\n def hb_animation(self, pg, window):\r\n for i in range(1, 17 + 1):\r\n image = pg.image.load(f\"data/bin/hb_animation/healthbar{i}.png\")\r\n self.bar_list_animation.append(image)\r\n window.blit(self.bar_list_animation[int(self.hb_frame)], (self.pos_x, self.pos_y))\r\n self.hb_frame += 0.5\r\n #print(rdh.health)\r\n\r\n\r\n def heart_animation(self, pg, window):\r\n for i in range(1, 7 + 1):\r\n image = pg.image.load(f\"data/bin/hb_animation/heart_anim{i}.png\")\r\n self.heart_list.append(image)\r\n\r\n for j in range(rdh.health):\r\n window.blit(self.heart_list[int(self.heart_frame)], (self.pos_x + j * 9 + 24, self.pos_y + 19))\r\n\r\n self.heart_frame += 0.2\r\n\r\nhealthbar = healthbar_class(5,5)\r\n","repo_name":"ragej4x/RedRidingHood-Mobile","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":8886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33466046802","text":"from pathlib import Path\nimport random\nimport shutil\n\nimport click\nimport lazycon\nimport numpy as np\nimport torch\n\nfrom core.learner import Learner\n\n\n@click.command()\n@click.option(\n \"-config\",\n \"--config_path\",\n required=True,\n type=click.Path(exists=True),\n help=\"path to .config file\",\n)\n@click.option(\n \"-exp_path\",\n \"--exp_path\",\n required=True,\n type=click.Path(),\n help=\"path to dump experiment\",\n)\ndef train_model(config_path, exp_path):\n exp_path = Path(exp_path)\n model_name = exp_path.name\n cfg = lazycon.load(config_path)\n base_path = cfg.base_path\n assert (\n base_path.exists()\n ), f\"{base_path} doesn't exist. Correct base_path in configs/data.config\"\n\n exp_path.mkdir(parents=True, exist_ok=True)\n\n # dump params\n # save compiled config\n cfg.dump(exp_path / \"train.config\")\n\n # dump jsonls\n shutil.copy(cfg.train_manifest_path, exp_path / \"train.jsonl\")\n shutil.copy(cfg.val_manifest_path, exp_path / \"val.jsonl\")\n\n model = cfg.model\n\n # load pretrained model\n if cfg.pt_model_path is not None:\n model.load_state_dict(torch.load(cfg.pt_model_path, map_location=\"cuda:0\"))\n shutil.copy(cfg.pt_model_path, exp_path / \"pt_model\")\n\n # init learner\n learner = Learner(\n train_dataset=cfg.train_dataset,\n val_dataset=cfg.val_dataset,\n dataloaders=cfg.dataloaders,\n exp_path=exp_path,\n model_name=model_name,\n model=model,\n batch_size=cfg.batch_size,\n dump_best_checkpoints=cfg.DUMP_BEST_CHECKPOINTS,\n dump_last_checkpoints=cfg.DUMP_LAST_CHECKPOINTS,\n best_checkpoints_warmup=cfg.BEST_CHECKPOINTS_WARMUP,\n )\n\n # train\n best_model_wts = learner.train(\n num_epochs=cfg.epoch_count,\n lr=cfg.learning_rate,\n step_size=cfg.optimizer_step,\n gamma=cfg.optimizer_gamma,\n weight_decay=cfg.weight_decay,\n clip_grad=cfg.clip_grad,\n )\n\n # dump best model\n torch.save(best_model_wts, exp_path / model_name)\n\n\nif __name__ == \"__main__\":\n # fix seeds for reproducibility\n torch.manual_seed(0)\n random.seed(0)\n np.random.seed(0)\n torch.backends.cudnn.benchmark = False\n torch.use_deterministic_algorithms(True)\n\n train_model()\n","repo_name":"salute-developers/golos","sub_path":"dusha/experiments/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"61"} +{"seq_id":"34515173704","text":"# Python3 program to solve\n# Traveling Salesman Problem using\n# Branch and Bound.\nimport math\nimport os\nimport sys\nimport time\nfrom configparser import ConfigParser\n\nfrom wmi import WMI\n\nimport my_writer\n\nmaxsize = float('inf')\n# Adjacency matrix for the given graph\nadj = [[0, 10, 15, 20],\n [10, 0, 35, 25],\n [15, 35, 0, 30],\n [20, 25, 30, 0]]\n\nN = 4\n\n# final_path[] stores the final solution\n# i.e. the // path of the salesman.\nfinal_path = [None] * (N + 1)\n\n# visited[] keeps track of the already\n# visited nodes in a particular path\nvisited = [False] * N\n\n# Stores the final minimum weight\n# of shortest tour.\nfinal_res = maxsize\n\n\n# Function to copy temporary solution\n# to the final solution\ndef copyToFinal(curr_path):\n final_path[:N + 1] = curr_path[:]\n final_path[N] = curr_path[0]\n\n\n# Function to find the minimum edge cost\n# having an end at the vertex i\ndef firstMin(adj, i):\n min = maxsize\n for k in range(N):\n if adj[i][k] < min and i != k:\n min = adj[i][k]\n\n return min\n\n\n# function to find the second minimum edge\n# cost having an end at the vertex i\ndef secondMin(adj, i):\n first, second = maxsize, maxsize\n for j in range(N):\n if i == j:\n continue\n if adj[i][j] <= first:\n second = first\n first = adj[i][j]\n\n elif (adj[i][j] <= second and\n adj[i][j] != first):\n second = adj[i][j]\n\n return second\n\n\n# function that takes as arguments:\n# curr_bound -> lower bound of the root node\n# curr_weight-> stores the weight of the path so far\n# level-> current level while moving\n# in the search space tree\n# curr_path[] -> where the solution is being stored\n# which would later be copied to final_path[]\ndef TSPRec(adj, curr_bound, curr_weight,\n level, curr_path, visited):\n global final_res\n\n # base case is when we have reached level N\n # which means we have covered all the nodes once\n if level == N:\n\n # check if there is an edge from\n # last vertex in path back to the first vertex\n if adj[curr_path[level - 1]][curr_path[0]] != 0:\n\n # curr_res has the total weight\n # of the solution we got\n curr_res = curr_weight + adj[curr_path[level - 1]] \\\n [curr_path[0]]\n if curr_res < final_res:\n copyToFinal(curr_path)\n final_res = curr_res\n return\n\n # for any other level iterate for all vertices\n # to build the search space tree recursively\n for i in range(N):\n\n # Consider next vertex if it is not same\n # (diagonal entry in adjacency matrix and\n # not visited already)\n if (adj[curr_path[level - 1]][i] != 0 and\n visited[i] == False):\n temp = curr_bound\n curr_weight += adj[curr_path[level - 1]][i]\n\n # different computation of curr_bound\n # for level 2 from the other levels\n if level == 1:\n curr_bound -= ((firstMin(adj, curr_path[level - 1]) +\n firstMin(adj, i)) / 2)\n else:\n curr_bound -= ((secondMin(adj, curr_path[level - 1]) +\n firstMin(adj, i)) / 2)\n\n # curr_bound + curr_weight is the actual lower bound\n # for the node that we have arrived on.\n # If current lower bound < final_res,\n # we need to explore the node further\n if curr_bound + curr_weight < final_res:\n curr_path[level] = i\n visited[i] = True\n\n # call TSPRec for the next level\n TSPRec(adj, curr_bound, curr_weight,\n level + 1, curr_path, visited)\n\n # Else we have to prune the node by resetting\n # all changes to curr_weight and curr_bound\n curr_weight -= adj[curr_path[level - 1]][i]\n curr_bound = temp\n\n # Also reset the visited array\n visited = [False] * len(visited)\n for j in range(level):\n if curr_path[j] != -1:\n visited[curr_path[j]] = True\n\n\n# This function sets up final_path\ndef TSP(adj):\n global N\n global final_res\n global final_path\n global visited\n\n N = len(adj)\n # Calculate initial lower bound for the root node\n # using the formula 1/2 * (sum of first min +\n # second min) for all edges. Also initialize the\n # curr_path and visited array\n curr_bound = 0\n curr_path = [-1] * (N + 1)\n visited = [False] * N\n\n # Compute initial bound\n for i in range(N):\n curr_bound += (firstMin(adj, i) +\n secondMin(adj, i))\n\n # Rounding off the lower bound to an integer\n curr_bound = math.ceil(curr_bound / 2)\n\n # We start at vertex 1 so the first vertex\n # in curr_path[] is 0\n visited[0] = True\n curr_path[0] = 0\n\n # Call to TSPRec for curr_weight\n # equal to 0 and level 1\n TSPRec(adj, curr_bound, 0, 1, curr_path, visited)\n\n\ndef best_search_init(matrix):\n global N\n global final_res\n global final_path\n global visited\n global adj\n adj = matrix\n N = len(adj)\n final_path = [None] * (N + 1)\n visited = [False] * N\n final_res = maxsize\n\n\ndef memory():\n w = WMI('.')\n result = w.query(\"SELECT WorkingSet FROM Win32_PerfRawData_PerfProc_Process WHERE IDProcess=%d\" % os.getpid())\n return int(result[0].WorkingSet)\n\n\ndef work(name, matrix):\n global N\n global final_res\n global final_path\n global visited\n\n print(\"Liczę dla: \" + name)\n\n start_time = time.time()\n best_search_init(matrix)\n TSP(matrix)\n end_time = time.time() - start_time\n\n tsp_result = [name, len(matrix), end_time, memory(), final_res, final_path]\n # tsp_result = [name, len(matrix), end_time, min(paths_costs), paths[low_cost_index]]\n return tsp_result\n\n\ndef from_txt_to_matrix(file_name):\n with open(file_name) as f:\n lines = f.readlines()\n n = int(lines[0])\n\n matrix = []\n for each in lines[1:]:\n matrix.append(each.split())\n\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n matrix[i][j] = int(matrix[i][j])\n\n return matrix\n\n\ndef main():\n # tab = [(4, 0), (6, 1), (7, 2), (8, 3), (2, 4), (12, 5), (231, 6), (1, 7)]\n # mini, parent = min(tab)\n # print(\"min: \" + str(mini))\n # print(\"parent: \" + str(parent))\n\n try:\n # Otworzenie pliku .ini i wczytanie jego parametrów\n file = 'config.ini'\n config = ConfigParser()\n config.read(file)\n\n # Zczytanie parametrów\n result_out_file_bs = 'tsp_bs3.csv'\n data0 = config['data']['tsp_6_1']\n data1 = config['data']['tsp_6_2']\n data2 = config['data']['tsp_10']\n data3 = config['data']['tsp_12']\n data4 = config['data']['tsp_13']\n data5 = config['data']['tsp_14']\n data6 = config['data']['tsp_15']\n data7 = config['data']['tsp_17']\n data8 = config['data']['gr24']\n data9 = config['data']['bays29']\n # data10 = config['data']['att48']\n # data11 = config['data']['eil51']\n # data12 = config['data']['berlin52']\n data13 = config['data']['br17']\n data14 = config['data']['ftv33']\n data15 = config['data']['ftv35']\n data16 = config['data']['ftv38']\n data17 = config['data']['p43']\n data18 = config['data']['gr21']\n data19 = config['data']['ulysses22']\n data20 = config['data']['fri26']\n\n matrix6_1_opti = config['data']['tsp_6_1_opti']\n matrix6_2_opti = config['data']['tsp_6_2_opti']\n matrix10_opti = config['data']['tsp_10_opti']\n matrix12_opti = config['data']['tsp_12_opti']\n matrix13_opti = config['data']['tsp_13_opti']\n matrix14_opti = config['data']['tsp_14_opti']\n matrix15_opti = config['data']['tsp_15_opti']\n matrix17_opti = config['data']['tsp_17_opti']\n matrix_gr24_opti = config['data']['gr24_opti']\n matrix_bays29_opti = config['data']['bays29_opti']\n # matrix_att48_opti = config['data']['att48_opti']\n # matrix_eil51_opti = config['data']['eil51_opti']\n # matrix_berlin52_opti = config['data']['berlin52_opti']\n matrix_br17_opti = config['data']['br17_opti']\n matrix_ftv33_opti = config['data']['ftv33_opti']\n matrix_ftv35_opti = config['data']['ftv35_opti']\n matrix_ftv38_opti = config['data']['ftv38_opti']\n matrix_p43_opti = config['data']['p43_opti']\n matrix_gr21_opti = config['data']['gr21_opti']\n matrix_ulysses22_opti = config['data']['ulysses22_opti']\n matrix_fri26_opti = config['data']['fri26_opti']\n\n matrix6_1 = from_txt_to_matrix(data0)\n matrix6_2 = from_txt_to_matrix(data1)\n matrix10 = from_txt_to_matrix(data2)\n matrix12 = from_txt_to_matrix(data3)\n matrix13 = from_txt_to_matrix(data4)\n matrix14 = from_txt_to_matrix(data5)\n matrix15 = from_txt_to_matrix(data6)\n matrix17 = from_txt_to_matrix(data7)\n matrix_gr24 = from_txt_to_matrix(data8)\n matrix_bays29 = from_txt_to_matrix(data9)\n # matrix_att48 = from_txt_to_matrix(data10)\n # matrix_eil51 = from_txt_to_matrix(data11)\n # matrix_berlin52 = from_txt_to_matrix(data12)\n matrix_br17 = from_txt_to_matrix(data13)\n matrix_ftv33 = from_txt_to_matrix(data14)\n matrix_ftv35 = from_txt_to_matrix(data15)\n matrix_ftv38 = from_txt_to_matrix(data16)\n matrix_p43 = from_txt_to_matrix(data17)\n matrix_gr21 = from_txt_to_matrix(data18)\n matrix_ulysses22 = from_txt_to_matrix(data19)\n matrix_fri26 = from_txt_to_matrix(data20)\n\n # Make analysis\n result_out_file = result_out_file_bs\n # tsp_result = work(\"matrix_my_2\", matrix_my_2)\n # my_writer.simple_write_to_csv(result_out_file, tsp_result)\n my_writer.simple_write_to_csv(result_out_file, ['', '', '', '', '', ''], '')\n\n tsp_result = work(\"matrix_my_4\", matrix_my_4)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, tsp_my_4_opti)\n\n tsp_result = work(\"matrix_my_5\", matrix_my_5)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, tsp_my_5_opti)\n\n tsp_result = work(\"matrix6_1\", matrix6_1)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix6_1_opti)\n\n tsp_result = work(\"matrix6_2\", matrix6_2)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix6_2_opti)\n\n tsp_result = work(\"matrix10\", matrix10)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix10_opti)\n #\n # tsp_result = work(\"matrix_my_11\", matrix_my_11)\n # my_writer.simple_write_to_csv(result_out_file, tsp_result, tsp_my_11_opti)\n\n tsp_result = work(\"matrix12\", matrix12)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix12_opti)\n\n tsp_result = work(\"matrix13\", matrix13)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix13_opti)\n\n tsp_result = work(\"matrix14\", matrix14)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix14_opti)\n\n tsp_result = work(\"matrix15\", matrix15)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix15_opti)\n\n # tsp_result = work(\"matrix17\", matrix17)\n # my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix17_opti)\n\n # ==========================================================================\n tsp_result = work(\"gr21\", matrix_gr21)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_gr21_opti)\n\n tsp_result = work(\"ulysses22\", matrix_ulysses22)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_ulysses22_opti)\n\n tsp_result = work(\"gr24\", matrix_gr24)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_gr24_opti)\n\n tsp_result = work(\"fri26\", matrix_fri26)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_fri26_opti)\n\n tsp_result = work(\"bays29\", matrix_bays29)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_bays29_opti)\n\n # tsp_result = work(\"att48\", matrix_att48)\n # my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_att48_opti)\n #\n # tsp_result = work(\"eil51\", matrix_eil51)\n # my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_eil51_opti)\n #\n # tsp_result = work(\"berlin52\", matrix_berlin52)\n # my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_berlin52_opti)\n\n tsp_result = work(\"br17\", matrix_br17)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_br17_opti)\n\n tsp_result = work(\"ftv33\", matrix_ftv33)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_ftv33_opti)\n\n tsp_result = work(\"ftv35\", matrix_ftv35)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_ftv35_opti)\n\n tsp_result = work(\"ftv38\", matrix_ftv38)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_ftv38_opti)\n\n tsp_result = work(\"p43\", matrix_p43)\n my_writer.simple_write_to_csv(result_out_file, tsp_result, matrix_p43_opti)\n\n input('Skończyłem, wciśnij cokolwiek, aby mnie wyłączyć...')\n sys.exit(0)\n\n except OSError:\n print(\"Coś poszło nie tak, sprawdź czy plik inicjujący\")\n\n\n\nmatrix_my_2 = [\n [0, 10],\n [10, 0]\n]\ntsp_my_2_opti = '20; [0, 1, 0]'\n\nmatrix_my_4 = [\n [0, 10, 12, 8],\n [10, 0, 15, 18],\n [12, 15, 0, 5],\n [8, 18, 5, 0]\n]\ntsp_my_4_opti = '38; [0, 1, 2, 3, 0]'\n\nmatrix_my_5 = [\n [0, 10, 12, 8, 9],\n [10, 0, 15, 18, 10],\n [12, 15, 0, 5, 11],\n [8, 18, 5, 0, 5],\n [9, 10, 11, 5, 0]\n]\ntsp_my_5_opti = '42; [0, 1, 4, 3, 2, 0]'\n\n# matrix10 = [\n# [0, 10, 12, 8, 9, 31],\n# [10, 0, 15, 18, 10, 50],\n# [12, 15, 0, 5, 11, 4],\n# [8, 18, 5, 0, 5, 19],\n# [9, 10, 11, 5, 0, 21],\n# [31, 50, 4, 19, 21, 0]\n# ]\n#\n# matrix11 = [\n# [0, 10, 12, 8, 9, 31, 2],\n# [10, 0, 15, 18, 10, 50, 4],\n# [12, 15, 0, 5, 11, 4, 8],\n# [8, 18, 5, 0, 5, 19, 16],\n# [9, 10, 11, 5, 0, 21, 20],\n# [31, 50, 4, 19, 21, 0, 30],\n# [2, 4, 8, 16, 20, 30, 0]\n# ]\n#\n# matrix12 = [\n# [0, 20, 30, 31, 28, 40, 8, 21],\n# [20, 0, 10, 14, 20, 44, 9, 20],\n# [30, 10, 0, 10, 22, 50, 5, 31],\n# [31, 14, 10, 0, 14, 42, 3, 22],\n# [28, 20, 22, 14, 0, 28, 11, 20],\n# [40, 44, 50, 42, 28, 0, 14, 3],\n# [8, 9, 5, 3, 11, 14, 0, 3],\n# [21, 20, 31, 22, 20, 3, 3, 0]\n# ]\n#\n# matrix13 = [\n# [0, 20, 30, 31, 28, 40, 8, 21, 50],\n# [20, 0, 10, 14, 20, 44, 9, 20, 3],\n# [30, 10, 0, 10, 22, 50, 5, 31, 10],\n# [31, 14, 10, 0, 14, 42, 3, 22, 4],\n# [28, 20, 22, 14, 0, 28, 11, 20, 9],\n# [40, 44, 50, 42, 28, 0, 14, 3, 2],\n# [28, 9, 5, 3, 11, 14, 0, 3, 10],\n# [21, 20, 31, 22, 20, 3, 3, 0, 11],\n# [50, 3, 10, 4, 9, 2, 10, 11, 0]\n# ]\n#\nmatrix_my_11 = [\n [-1, 29, 82, 46, 68, 52, 72, 42, 51, 55, 29],\n [29, -1, 55, 46, 42, 43, 43, 23, 23, 31, 41],\n [82, 55, -1, 68, 46, 55, 23, 43, 41, 29, 79],\n [46, 46, 68, -1, 82, 15, 72, 31, 62, 42, 21],\n [68, 42, 46, 82, -1, 74, 23, 52, 21, 46, 82],\n [52, 43, 55, 15, 74, -1, 61, 23, 55, 31, 33],\n [72, 43, 23, 72, 23, 61, -1, 42, 23, 31, 77],\n [42, 23, 43, 31, 52, 23, 42, -1, 33, 15, 37],\n [51, 23, 41, 62, 21, 55, 23, 33, -1, 29, 62],\n [55, 31, 29, 42, 46, 31, 31, 15, 29, -1, 51],\n [29, 41, 79, 21, 82, 33, 77, 37, 62, 51, -1]\n]\ntsp_my_11_opti = '251; [0, 1, 8, 4, 6, 2, 9, 7, 5, 3, 10, 0]'\n\nif __name__ == '__main__':\n main()\n","repo_name":"Karolina606/PEA","sub_path":"kom3/tsp_bs3.py","file_name":"tsp_bs3.py","file_ext":"py","file_size_in_byte":15845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11889551488","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def print_list(self):\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next\n\n def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node\n \n def len_iterative(self):\n\n count = 0\n cur_node = self.head\n\n while cur_node:\n count += 1\n cur_node = cur_node.next\n return count\n\n def len_recursive(self, node):\n if node is None:\n return 0\n return 1 + self.len_recursive(node.next)\n \n def print_nth_from_last(self, n):\n\n # Method 2:\n p = self.head\n q = self.head\n\n count = 0\n while q and count < n:\n q = q.next\n count += 1\n\n if not q:\n print(str(n) + \" is greater than the number of nodes in list.\")\n return\n\n while p and q:\n p = p.next\n q = q.next\n return p.data\n\n \nllist = LinkedList()\nllist.append(\"A\")\nllist.append(\"B\")\nllist.append(\"C\")\nllist.append(\"D\")\n\nprint(llist.print_nth_from_last(3))\n\n","repo_name":"bipulhstu/Data-Structures-and-Algorithms-Through-Python-in-Depth","sub_path":"1. Single Linked List/16. Print nth to last node in Singly Linked List - Method 2.py","file_name":"16. Print nth to last node in Singly Linked List - Method 2.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25652430703","text":"print(\"00 - DOS D8\\n\")\n\"\"\"\nCrea una simulación que muestre qué ocurre cuando se lanzan dos dados de ocho\nlados 1000 veces.\n\"\"\"\n\nfrom plotly.graph_objs import Bar, Layout\nfrom plotly import offline\n\nfrom die import Die\n\n# create two D8\ndie_1 = Die(8)\ndie_2 = Die(8)\n\n# make 1000 rolls and store results in a list\nresults = []\n\nfor roll_num in range(1000):\n result = die_1.roll() + die_2.roll()\n results.append(result)\n\n# analyze results\nfrequencies = []\nmax_value = die_1.num_sides + die_2.num_sides\n\nfor value in range(2, max_value + 1):\n frequency = results.count(value)\n frequencies.append(frequency)\n\n# visualize the results\nx_values = list(range(2, max_value + 1))\ndata = [Bar(x=x_values, y=frequencies)]\n\nx_axis_config = {\"title\": \"Result\"}\ny_axis_config = {\"title\": \"Frequency of Result\"}\n\nmy_layout = Layout(\n title=\"Results of rolling two D8s 1000 times\",\n xaxis=x_axis_config,\n yaxis=y_axis_config\n)\n\noffline.plot({\"data\": data, \"layout\": my_layout}, filename=\"../../EJERCICIOS/13_visualizing_data/02_dice/plots/2_d8.html\")","repo_name":"nlarrea/apuntes-de-python","sub_path":"EJERCICIOS/13_visualizing_data/02_dice/00_two_d8.py","file_name":"00_two_d8.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72979222594","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if not my_list:\n return 0\n else:\n sum_weight = 0\n for list in my_list:\n sum_weight += list[1]\n total = sum(x * y for x, y in my_list)\n return total / sum_weight\n","repo_name":"Madele-theron/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12030566664","text":"# -*- coding: utf-8 -*-\n\n###########################################################################\n#\n# Author: Mark Olson 2018\n#\n# This is to help me manage my personal photo and video collection.\n# \n###########################################################################\n\n\n# import sys\nimport os\nimport time\nfrom time import sleep\n\n###########################################################################\n## Python code generated with wxFormBuilder (version Oct 26 2018)\n## http://www.wxformbuilder.org/\n###########################################################################\n\nimport wx\nimport wx.xrc\nimport wx.media\nimport gettext\n_ = gettext.gettext\n\n###########################################################################\n## some globals - need use by everyone\n###########################################################################\n\n# for historical reasons Weird numbering is\n# leftmost digit: _01...9A...Z re: [_0-9A-Z]\n# next digit: 01...9A...Z re: [0-9A-Z]\n# next 3 digits: 01...9 re: [0-9]\n# (example: _0001 to _Z999 to 00000 to 99999 to 9A000 to 9Z999 to A0000 to ZZ999)\nMarksWeirdDigits = [\"_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\",\n \"0123456789\",\n \"0123456789\",\n \"0123456789\"\n ]\n\ndef fromMarksWeirdNumbers(theNumberText, quiet=False):\n if not isinstance(theNumberText, type('_A123')):\n if quiet is False:\n ignore = wx.MessageBox(\n \"ERROR: cannot convert input type %s - inside fromMarksWeirdNumbers()\" % (type(theNumberText)),\n \"Bad Inputs\", wx.ICON_EXCLAMATION | wx.CENTRE)\n elif len(theNumberText) < 5:\n if quiet is False:\n ignore = wx.MessageBox(\n \"ERROR: string |%s| is too short; must be >= 5 - inside fromMarksWeirdNumbers()\" % theNumberText,\n \"Bad Inputs\", wx.ICON_EXCLAMATION | wx.CENTRE)\n theNumberText = theNumberText.upper()\n good = True\n converted = 0\n if len(theNumberText) < len(MarksWeirdDigits):\n good = False\n converted = -1\n if good:\n for idx in range(len(MarksWeirdDigits)):\n converted *= len(MarksWeirdDigits[idx])\n tmp = MarksWeirdDigits[idx].find(theNumberText[idx])\n if -1 != tmp:\n converted += tmp\n else:\n good = False\n converted = -1\n break\n return good, converted\n\ndef toMarksWeirdNumbers(theNumber, quiet=False):\n # see MarksWeirdDigits[] for description of strange numbering scheme\n good = True\n converted = \"_0000\" # zero\n if isinstance(theNumber, type(\"123\")): # if it is a string\n theNumber = int(theNumber)\n if not isinstance(theNumber, type(123)): # if it is not a number\n if quiet is False:\n ignore = wx.MessageBox(\n \"ERROR: cannot convert %s type %s inside toMarksWeirdNumbers()\" % (str(theNumber), type(theNumber)),\n \"Bad Inputs\", wx.ICON_EXCLAMATION | wx.CENTRE)\n good = False\n else:\n # TODO FIXME let's cheat a little; we know some things about this...\n last3 = \"%03d\" % (theNumber % 1000)\n first2int = int(theNumber // 1000)\n firstint = int(first2int // len(MarksWeirdDigits[1]))\n secondint = first2int - firstint * len(MarksWeirdDigits[1])\n if firstint > len(MarksWeirdDigits[0]):\n theMax = ((len(MarksWeirdDigits[0]) - 1) * len(MarksWeirdDigits[1]) + (\n len(MarksWeirdDigits[1]) - 1)) * 1000 + 999\n ignore = wx.MessageBox(\n \"ERROR: cannot convert %d: larger than max %d inside toMarksWeirdNumbers()\" % (theNumber, theMax),\n \"Bad Inputs\", wx.ICON_EXCLAMATION | wx.CENTRE)\n good = False\n else:\n converted = MarksWeirdDigits[0][firstint] + MarksWeirdDigits[1][secondint] + last3\n return good, converted\n\n\n###########################################################################\n# Class DlgEnterVidNum - retrieve hand-entered Weird media number\n# local non-control variables\n# self.l_returnNumberDec - decimal number of media entered or else -1\n# self.l_returnNumberWeird - weird number of media entered or else \"UNKNOWN\"\n# local control variables\n# self.m_sdbtn_Sizer1Apply - to hold the \"Apply\" button\n# self.m_sdbtn_Sizer1Cancel - to hold the \"Cancel\" button\n# self.m_staticTextDlgEnterVidNum - static text telling how to enter video number\n# self.m_staticTextDlgEnterVidNumStatus - static text telling what dialog is doing or waiting for\n# self.m_textCtrlEnterNum - text control that user types Weird number into\n###########################################################################\n\nclass DlgEnterVidNum ( wx.Dialog ):\n\n def __init__( self, parent ):\n wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition,\n size = wx.Size( 474,187 ), style = wx.DEFAULT_DIALOG_STYLE )\n\n self.l_returnNumberWeird = \"UNKNOWN\"\n self.l_returnNumberDec = -1\n\n self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )\n\n bSizer4 = wx.BoxSizer( wx.VERTICAL )\n\n self.m_staticTextDlgEnterVidNum =\\\n wx.StaticText(self, wx.ID_ANY,\n _(u\"Enter last 5 digits below. Example: for IMG_P085 enter _P085, for MVI0C123 enter 0C123\"),\n wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTER_HORIZONTAL )\n self.m_staticTextDlgEnterVidNum.Wrap( -1 )\n\n bSizer4.Add( self.m_staticTextDlgEnterVidNum, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )\n\n self.m_textCtrlEnterNum = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, size=wx.DefaultSize, style = wx.TE_PROCESS_ENTER)\n bSizer4.Add( self.m_textCtrlEnterNum, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )\n\n self.m_staticTextDlgEnterVidNumStatus =\\\n wx.StaticText( self, wx.ID_ANY, _(u\"Status: waiting for entry...\"),\n wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTER_HORIZONTAL )\n self.m_staticTextDlgEnterVidNumStatus.Wrap( -1 )\n\n bSizer4.Add( self.m_staticTextDlgEnterVidNumStatus, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 )\n\n # m_sdbtn_ is standard dialog button\n m_sdbSizer1 = wx.StdDialogButtonSizer()\n self.m_sdbtn_Sizer1Apply = wx.Button( self, wx.ID_APPLY )\n m_sdbSizer1.AddButton( self.m_sdbtn_Sizer1Apply )\n self.m_sdbtn_Sizer1Cancel = wx.Button( self, wx.ID_CANCEL )\n m_sdbSizer1.AddButton( self.m_sdbtn_Sizer1Cancel )\n m_sdbSizer1.Realize()\n\n bSizer4.Add( m_sdbSizer1, 1, wx.ALIGN_CENTER_HORIZONTAL, 5 )\n\n\n self.SetSizer( bSizer4 )\n self.Layout()\n\n self.Centre( wx.BOTH )\n\n # Connect Events\n self.m_sdbtn_Sizer1Apply.Bind( wx.EVT_BUTTON, self.onDlgBtnEnterVidNumApply )\n self.m_sdbtn_Sizer1Cancel.Bind( wx.EVT_BUTTON, self.onDlgBtnEnterVidNumCancel )\n self.Bind(wx.EVT_TEXT_ENTER, self.onDlgBtnEnterVidNumApply)\n # end class DlgEnterVidNum().__init__()\n\n def __del__( self ):\n pass\n # end class DlgEnterVidNum().__del__()\n\n # Virtual event handlers\n def onDlgBtnEnterVidNumApply( self, event ):\n typedNumWeird = self.m_textCtrlEnterNum.GetValue()\n # see if this is a valid number; should be one of my weird numbers\n good, typedNumDec = fromMarksWeirdNumbers(typedNumWeird)\n if good:\n self.l_returnNumberWeird = typedNumWeird\n self.l_returnNumberDec = typedNumDec\n self.Destroy()\n else:\n message = wx.MessageDialog(self, _(\"%s is not a valid weird number\" % typedNumWeird),\n _(\"ERROR\"), wx.OK | wx.ICON_ERROR)\n message.ShowModal()\n message.Destroy()\n # end class DlgEnterVidNum().onDlgBtnEnterVidNumApply()\n\n def onDlgBtnEnterVidNumCancel( self, event ):\n self.l_returnNumberWeird = \"UNKNOWN\"\n self.Destroy()\n # end class DlgEnterVidNum().onDlgBtnEnterVidNumCancel()\n # end class DlgEnterVidNum()\n\n\n\n###########################################################################\n# Class MainFrame - main class\n# local non-control variables\n# self.l_absRunPath - absolute path we were run from; used to find resources such as the icon\n# self.l_dbDirName - path to directory for database (db) text file\n# self.l_dbFileName - filename (no path) for database (db) text file\n# self.l_dbMediaDescripIdx - if no db or no line select -1, else line number (starting @ 0) currently displayed\n# self.l_dbMediaDescripLines - lines for open db text file, stripped\n# self.l_dbMediaDescripModified - will be True if db text file edits not yet saved to l_dbFileName\n# self.l_dbMediaDescripPath - absolute path to the open db text file\n# self.l_infoMaxImgMvi - maximum numbers for pix and mvi media in decimal\n# self.l_listCtrlInfo - where display version of db text file is stored; not exact txt format\n# self.l_listCtrlSlctd - prev and curr lines; TODO candidate for deletion, maybe use when editing\n# self.l_mediaCurrentWeirdNum - mediaWeirdNum of currently displayed file\n# self.l_mediaLength - self.m_mediactrl.Length() (millisec) of currently displayed video or else None\n# self.l_mediaMtime - time.ctime(os.path.getmtime(mediaFile)) date/time file modified\n# self.l_mediaStartStopDisplay - set True to load movie video to a bit past the start (otherwise blank)\n# self.l_rootDir - absolute path to the root directory: has db text file and dirs for all media\n# self.l_textCtrlEntry_unchanged - copy of single db text file entry to compare to see if user changed it\n# self.l_useMdoMediaCtrls - True to use my clunky controls; set false when ShowPlayerControls() works\n#\n# local control variables\n# self.m_buttonEnterVidNum - button to call DlgEnterVidNum() and manually enter video number\n# self.m_buttonLouder - video control - louder\n# self.m_buttonPause - video control - pause video play\n# self.m_buttonPlay - video control - play video\n# self.m_buttonSofter - video control - softer\n# self.m_buttonNextNum - video select - move to next numerical mediafile\n# self.m_buttonPrevNum - video select - move to previous numerical mediafile\n# self.m_buttonNextDbFile - video select - move to next mediafile listed in media db text file\n# self.m_buttonPrevDbFile - video select - move to previous mediafile listed in media db text file\n# self.m_listCtrlVidComments - list control to hold media db text file\n# self.m_mediactrl - media control to play video\n# self.m_menubarMainFrame - the \"menu\" bar\n# self.m_menuFile - collection for \"FILE\" menu\n# self.m_menuItemFileOpen - menu - open media db text file\n# self.m_menuItemFileSave - menu - save media db text file using current name\n# self.m_menuItemFileSaveAs - menu - save media db text file using new name\n# self.m_menuItemQuit - menu - quit without save\n# self.m_menuItemExit - menu - exit and save\n# self.m_menuHelp - collection for \"HELP\" menu\n# self.m_menuItemHelpAbout - menu - help/about\n# self.m_panel1 - video is displayed here\n# self.m_staticTextStatus - info about video being displayed\n# self.m_textCtrlEntry - enter/edit video comment here\n# self.m_timerMedia - timer needed to move a few milliseconds into movie so not blank screen\n# see also self.l_mediaStartStopDisplay\n#\n# to get list of l_ or m_\n# grep \"self.l_.*[=]\" MarkMedia.py | sed \"s?self.l_?\\nself.l_?g\" | grep self.l_ | sed \"s?[ ,\\[\\()].*??\" | sort | uniq\n# grep \"self.m_.*[=]\" MarkMedia.py | sed \"s?self.m_?\\nself.m_?g\" | grep self.m_ | sed \"s?[ ,\\[\\()].*??\" | sort | uniq\n###########################################################################\n\nclass MainFrame ( wx.Frame ):\n def __init__( self, parent, absRunPath = \"\" ):\n wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition,\n size = wx.Size( 1245,1193 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )\n\n self.l_absRunPath = absRunPath # absolute path we were run from\n self.l_dbMediaDescripPath = \"UNKNOWN\" # absolute path to open text file\n self.l_dbMediaDescripLines = [] # lines for open text file, stripped\n self.l_dbMediaDescripIdx = -1 # which line for open text file or -1\n self.l_dbMediaDescripModified = False # will be true when modifications are made\n self.l_mediaLength = None # the length of media file; appears to be in milliseconds\n self.l_mediaStartStopDisplay = False # yet another media load flag\n self.l_mediaCurrentWeirdNum = \"_0001\"\n self.l_mediaMtime = \"\" # time.ctime(os.path.getmtime(<>))\n self.l_infoMaxImgMvi = {} # filled with max nums, example {'THE_MAX_IMGNUM': 45065, 'THE_MAX_MVINUM': 3441}\n self.l_listCtrlInfo = {} # [validWeirdNum] = {\"line\": -1} , others TBD\n self.l_listCtrlSlctd = {\"prev\": \"_0001\", \"curr\": \"_0001\"}\n self.l_textCtrlEntry_unchanged = \"\"\n self.l_useMdoMediaCtrls = True # ShowPlayerControls does not seem to work\n self.l_dbFileName = \"\"\n self.l_dbDirName = \"\"\n self.l_rootDir = \"\"\n\n self.SetIcon(wx.Icon(os.path.join(self.l_absRunPath,\"MadScience_256.ico\"))) # Mark: set icon\n\n bSizerFrame = wx.BoxSizer( wx.VERTICAL )\n\n self.m_panel1 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )\n self.m_panel1.SetToolTip( u\"Video is displayed here\" )\n\n bSizerPanel = wx.BoxSizer( wx.VERTICAL )\n\n self.m_staticTextStatus = wx.StaticText( self.m_panel1, wx.ID_ANY, u\"Status: ...\",\n wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_staticTextStatus.Wrap( -1 )\n\n self.m_staticTextStatus.SetToolTip( u\"Status is displayed here\" )\n\n bSizerPanel.Add( self.m_staticTextStatus, 0, wx.ALL|wx.EXPAND, 5 )\n self.m_mediactrl = wx.media.MediaCtrl(self, style=wx.SIMPLE_BORDER, size=wx.Size( 800,800 ))\n # self.m_mediactrl = wx.media.MediaCtrl(self, style=wx.SIMPLE_BORDER, size=wx.Size( 800,800 ), szBackend=wx.media.MEDIABACKEND_DIRECTSHOW)\n # self.m_mediactrl = wx.media.MediaCtrl(self, style=wx.SIMPLE_BORDER, size=wx.Size( 800,800 ), szBackend=wx.media.MEDIABACKEND_WMP10)\n if not self.l_useMdoMediaCtrls:\n # Currently only implemented on the QuickTime and DirectShow backends. The function returns True on success\n # if not self.m_mediactrl.ShowPlayerControls(flags=wx.media.MEDIACTRLPLAYERCONTROLS_STEP):\n if not self.m_mediactrl.ShowPlayerControls(flags=wx.media.MEDIACTRLPLAYERCONTROLS_DEFAULT): # does not seem to work\n # it claims that it worked but there are no working controls\n print(\"ShowPlayerControls() failed\")\n bSizerPanel.Add( self.m_mediactrl, 1, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5 )\n\n bSizer3 = wx.BoxSizer( wx.HORIZONTAL )\n\n self.m_buttonPrevDbFile = wx.Button(self.m_panel1, wx.ID_ANY, u\"|\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_buttonPlay.SetToolTip( u\"Play\" )\n\n bSizer3.Add( self.m_buttonPlay, 0, wx.ALL, 5 )\n\n self.m_buttonPause = wx.Button( self.m_panel1, wx.ID_ANY, u\">][<\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_buttonPause.SetToolTip( u\"Pause\" )\n\n bSizer3.Add( self.m_buttonPause, 0, wx.ALL, 5 )\n\n self.m_buttonNextNum = wx.Button( self.m_panel1, wx.ID_ANY, u\">|\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_buttonNextNum.SetToolTip( u\"Next media file\" )\n\n bSizer3.Add( self.m_buttonNextNum, 0, wx.ALL, 5 )\n\n self.m_buttonNextDbFile = wx.Button(self.m_panel1, wx.ID_ANY, u\"textfile>|\", wx.DefaultPosition, wx.DefaultSize, 0)\n self.m_buttonNextDbFile.SetToolTip( u\"Next in text file\" )\n\n bSizer3.Add( self.m_buttonNextDbFile, 0, wx.ALL, 5 )\n\n self.m_buttonEnterVidNum = wx.Button(self.m_panel1, wx.ID_ANY, u\"Enter #...\", wx.DefaultPosition, wx.DefaultSize, 0)\n self.m_buttonEnterVidNum.SetToolTip( u\"Enter media number\" )\n\n bSizer3.Add( self.m_buttonEnterVidNum, 0, wx.ALL, 5 )\n\n self.m_buttonLouder = wx.Button( self.m_panel1, wx.ID_ANY, u\"Louder\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_buttonLouder.SetToolTip( u\"Louder\" )\n\n bSizer3.Add( self.m_buttonLouder, 0, wx.ALL, 5 )\n\n self.m_buttonSofter = wx.Button( self.m_panel1, wx.ID_ANY, u\"Softer\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_buttonSofter.SetToolTip( u\"Softer\" )\n\n bSizer3.Add( self.m_buttonSofter, 0, wx.ALL, 5 )\n\n\n bSizerPanel.Add( bSizer3, 0, wx.ALIGN_CENTER_HORIZONTAL, 5 )\n\n self.m_textCtrlEntry = wx.TextCtrl( self.m_panel1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition,\n wx.DefaultSize, wx.TE_NOHIDESEL | wx.TE_NO_VSCROLL | wx.TE_PROCESS_ENTER )\n self.m_textCtrlEntry.SetToolTip( u\"Enter/Edit Comment\" )\n\n bSizerPanel.Add( self.m_textCtrlEntry, 0, wx.ALL | wx.EXPAND, 5 )\n\n self.m_listCtrlVidComments = wx.ListCtrl( self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ),\n wx.LC_REPORT | wx.BORDER_SUNKEN )\n self.m_listCtrlVidComments.SetToolTip( u\"List of existing Video txt comments\" )\n self.m_listCtrlVidComments.SetMinSize( wx.Size( -1,300 ) )\n# start copying here\n self.m_listCtrlVidComments.InsertColumn(0, 'MagicNum')\n self.m_listCtrlVidComments.InsertColumn(1, 'Comment')\n self.m_listCtrlVidComments.SetColumnWidth(0, 50)\n self.m_listCtrlVidComments.SetColumnWidth(1, 1500)\n\n\n bSizerPanel.Add( self.m_listCtrlVidComments, 0, wx.ALL | wx.EXPAND, 5 )\n\n\n self.m_panel1.SetSizer( bSizerPanel )\n self.m_panel1.Layout()\n bSizerPanel.Fit( self.m_panel1 )\n bSizerFrame.Add( self.m_panel1, 1, wx.EXPAND | wx.ALL, 5 )\n\n\n self.SetSizer( bSizerFrame )\n self.Layout()\n self.m_menubarMainFrame = wx.MenuBar( 0 )\n self.m_menuFile = wx.Menu()\n self.m_menuItemFileOpen = wx.MenuItem( self.m_menuFile, wx.ID_ANY, u\"Open Video txt...\",\n wx.EmptyString, wx.ITEM_NORMAL )\n self.m_menuFile.Append( self.m_menuItemFileOpen )\n\n self.m_menuItemFileSave = wx.MenuItem( self.m_menuFile, wx.ID_ANY, u\"Save Video txt...\",\n wx.EmptyString, wx.ITEM_NORMAL )\n self.m_menuFile.Append( self.m_menuItemFileSave )\n\n self.m_menuItemFileSaveAs = wx.MenuItem( self.m_menuFile, wx.ID_ANY, u\"Save As Video txt...\"\n , wx.EmptyString, wx.ITEM_NORMAL )\n self.m_menuFile.Append( self.m_menuItemFileSaveAs )\n\n self.m_menuItemQuit = wx.MenuItem( self.m_menuFile, wx.ID_ANY, u\"Quit without save...\",\n wx.EmptyString, wx.ITEM_NORMAL )\n self.m_menuFile.Append( self.m_menuItemQuit )\n\n self.m_menuItemExit = wx.MenuItem( self.m_menuFile, wx.ID_ANY, u\"Exit and save\",\n wx.EmptyString, wx.ITEM_NORMAL )\n self.m_menuFile.Append( self.m_menuItemExit )\n\n self.m_menubarMainFrame.Append( self.m_menuFile, u\"File\" )\n\n self.m_menuHelp = wx.Menu()\n self.m_menuItemHelpAbout = wx.MenuItem( self.m_menuHelp, wx.ID_ANY, u\"About...\",\n wx.EmptyString, wx.ITEM_NORMAL )\n self.m_menuHelp.Append( self.m_menuItemHelpAbout )\n\n self.m_menubarMainFrame.Append( self.m_menuHelp, u\"Help\" )\n\n self.SetMenuBar( self.m_menubarMainFrame )\n\n self.m_timerMedia = wx.Timer()\n self.m_timerMedia.SetOwner( self, wx.ID_ANY )\n self.m_timerMedia.Start( 125 )\n\n self.Centre( wx.BOTH )\n\n # Connect Events\n self.m_buttonPrevDbFile.Bind( wx.EVT_BUTTON, self.onBtnPrevFile )\n self.m_buttonPrevNum.Bind( wx.EVT_BUTTON, self.onBtnPrev )\n if self.l_useMdoMediaCtrls:\n self.m_buttonPlay.Bind( wx.EVT_BUTTON, self.onBtnPlay )\n self.m_buttonPause.Bind( wx.EVT_BUTTON, self.onBtnPause )\n self.m_buttonNextNum.Bind( wx.EVT_BUTTON, self.onBtnNext )\n self.m_buttonNextDbFile.Bind( wx.EVT_BUTTON, self.onBtnNextFile )\n self.m_buttonEnterVidNum.Bind( wx.EVT_BUTTON, self.onBtnEnterVidNum )\n self.m_buttonLouder.Bind( wx.EVT_BUTTON, self.onBtnLouder )\n self.m_buttonSofter.Bind( wx.EVT_BUTTON, self.onBtnSofter )\n self.m_textCtrlEntry.Bind( wx.EVT_TEXT_ENTER, self.onTextCtrlEntry )\n self.m_listCtrlVidComments.Bind( wx.EVT_LIST_ITEM_ACTIVATED, self.onListCtrlActivated )\n self.Bind( wx.EVT_MENU, self.onFileOpen, id = self.m_menuItemFileOpen.GetId() )\n self.Bind( wx.EVT_MENU, self.onFileSave, id = self.m_menuItemFileSave.GetId() )\n self.Bind( wx.EVT_MENU, self.onFileSaveAs, id = self.m_menuItemFileSaveAs.GetId() )\n self.Bind( wx.EVT_MENU, self.onFileQuit, id = self.m_menuItemQuit.GetId() )\n self.Bind( wx.EVT_MENU, self.onFileExit, id = self.m_menuItemExit.GetId() )\n self.Bind( wx.EVT_MENU, self.onHelpAbout, id = self.m_menuItemHelpAbout.GetId() )\n self.Bind( wx.EVT_TIMER, self.onTimerMedia, id=wx.ID_ANY )\n # end class MainFrame().__init__()\n\n def __del__( self ):\n del self.m_mediactrl\n pass\n # end class MainFrame().__del__()\n\n\n # Virtual event handlers\n def onBtnPrevFile( self, event ):\n possibleIdx = self.doFindDirecTextFileUsableLine(-1)\n if possibleIdx >= 0:\n self.l_dbMediaDescripIdx = possibleIdx\n ignore = self.doLoadupNumMediaFile( mediaWeirdNum = self.l_dbMediaDescripLines[self.l_dbMediaDescripIdx][:5],\n statusText= self.l_dbMediaDescripLines[self.l_dbMediaDescripIdx] )\n # end class MainFrame().onBtnPrevFile()\n\n def onBtnPrev( self, event ):\n ignore, mediaDecNum = fromMarksWeirdNumbers(self.l_mediaCurrentWeirdNum)\n # print(\"fromMarksWeirdNumbers m_mediaCurrentWeirdNum: |%s| ignore: |%s| mediaDecNum: |%s|\" % (self.l_mediaCurrentWeirdNum, ignore, mediaDecNum))\n if mediaDecNum > 1:\n mediaDecNum -= 1\n # print(\"decrement m_mediaCurrentWeirdNum: |%s| ignore: |%s| mediaDecNum: |%s|\" % (self.l_mediaCurrentWeirdNum, ignore, mediaDecNum))\n ignore, weirdMediaNum = toMarksWeirdNumbers(mediaDecNum)\n # print(\"toMarksWeirdNumbers weirdMediaNum: |%s| ignore: |%s| mediaDecNum: |%s|\" % (weirdMediaNum, ignore, mediaDecNum))\n self.doLoadupNumMediaFile( mediaWeirdNum = weirdMediaNum, statusText = \"non-text-file %d\" % mediaDecNum )\n # end class MainFrame().onBtnPrev()\n\n def onBtnPlay( self, event ):\n if not self.m_mediactrl.Play():\n wx.MessageBox(\"Unable to Play media : Unsupported format?\",\n \"ERROR\", wx.ICON_ERROR | wx.OK)\n else:\n self.m_mediactrl.SetInitialSize()\n self.GetSizer().Layout()\n # end class MainFrame().onBtnPlay()\n\n def onBtnPause( self, event ):\n self.m_mediactrl.Pause()\n # end class MainFrame().onBtnPause()\n\n\n def onBtnNext( self, event ):\n ignore, mediaDecNum = fromMarksWeirdNumbers(self.l_mediaCurrentWeirdNum)\n # TODO FIXME Pix vs Movies\n if mediaDecNum < self.l_infoMaxImgMvi[\"THE_MAX_MVINUM\"]:\n mediaDecNum += 1\n ignore, weirdMediaNum = toMarksWeirdNumbers(mediaDecNum)\n self.doLoadupNumMediaFile( mediaWeirdNum = weirdMediaNum, statusText = \"non-text-file %d\" % mediaDecNum )\n # end class MainFrame().onBtnNext()\n\n\n\n def onBtnNextFile( self, event ):\n possibleIdx = self.doFindDirecTextFileUsableLine(+1)\n if possibleIdx >= 0:\n self.l_dbMediaDescripIdx = possibleIdx\n ignore = self.doLoadupNumMediaFile( mediaWeirdNum = self.l_dbMediaDescripLines[self.l_dbMediaDescripIdx][:5],\n statusText= self.l_dbMediaDescripLines[self.l_dbMediaDescripIdx] )\n # end class MainFrame().onBtnNextFile()\n\n\n def onBtnEnterVidNum( self, event ):\n tmp = DlgEnterVidNum(self)\n tmp.ShowModal()\n if \"UNKNOWN\" != tmp.l_returnNumberWeird:\n self.doLoadupNumMediaFile(mediaWeirdNum=tmp.l_returnNumberWeird,\n statusText=\"non-text-file %d\" % tmp.l_returnNumberDec)\n # end class MainFrame().onBtnEnterVidNum()\n\n def onBtnLouder( self, event ):\n # increase volume by 5 percent; makes assumption that current volume is valid\n self.m_mediactrl.SetVolume(min(1.0, self.m_mediactrl.GetVolume()+0.05))\n # end class MainFrame().onBtnLouder()\n\n def onBtnSofter( self, event ):\n # decrease volume by 5 percent; makes assumption that current volume is valid\n self.m_mediactrl.SetVolume(max(0.0, self.m_mediactrl.GetVolume()-0.05))\n # end class MainFrame().onBtnSofter()\n\n def onTextCtrlEntry( self, event ):\n event.Skip() # TODO need to write this one\n # end class MainFrame().onTextCtrlEntry()\n\n def onListCtrlActivated( self, event ):\n # print(\"OnItemActivated: %s - %s TopItem: %s\" % (event.Index, self.m_listCtrlVidComments.GetItemText(event.Index), self.m_listCtrlVidComments.GetTopItem()))\n # print(\" GetColumnWidth() [0] [1]: [%s] [%s]\" % (self.m_listCtrlVidComments.GetColumnWidth(0), self.m_listCtrlVidComments.GetColumnWidth(1)))\n # print(\" GetItemPosition(event.Index): %s\" % self.m_listCtrlVidComments.GetItemPosition(event.Index))\n # print(\" GetItemRect(event.Index): %s\" % self.m_listCtrlVidComments.GetItemRect(event.Index))\n myRow = event.Index\n myCol = -1\n myX, myY = self.m_listCtrlVidComments.ScreenToClient(wx.GetMousePosition())\n # print(\"myX=%d\" % myX)\n numCols = self.m_listCtrlVidComments.GetColumnCount() # actually I know there are just two\n # print(\"numCols=%d\" % numCols)\n for idx in range(numCols):\n # print(\" idx=%d myX=%d\" % (idx, myX))\n colWidth = self.m_listCtrlVidComments.GetColumnWidth(idx)\n # print(\" colWidth=%d\" % colWidth)\n if myX < colWidth:\n myCol = idx\n break\n myX -= colWidth\n print(\"myRow=%d myCol=%d\" % (myRow, myCol))\n textNum = self.m_listCtrlVidComments.GetItemText(myRow,0)\n textString = self.m_listCtrlVidComments.GetItemText(myRow,1)\n # print(\"item=|%s| |%s|\" % (textNum, textString))\n good = False\n if len(textNum) >= 5:\n good, ignore = fromMarksWeirdNumbers(textNum)\n if good:\n self.doLoadupNumMediaFile( mediaWeirdNum = textNum, statusText = \"load %s\" % textNum )\n else:\n good, ignore = fromMarksWeirdNumbers(textString[1:6])\n if good:\n self.doLoadupNumMediaFile(mediaWeirdNum=textString[1:6], statusText=\"alt-load %s\" % textString[1:6])\n # end class MainFrame().onListCtrlActivated()\n\n def doAddListCtrlLine( self, line = \"\", posn = 0 ):\n # adds line to list control before specified position\n mediaFlag, ignore = fromMarksWeirdNumbers(line, quiet=True)\n if mediaFlag:\n # media line\n self.m_listCtrlVidComments.InsertItem(posn, line[:5])\n self.m_listCtrlVidComments.SetItem(posn, 1, line[5:].strip())\n if line[:5] in self.l_listCtrlInfo.keys():\n self.l_listCtrlInfo[line[:5]][\"line\"].append(posn)\n else:\n self.l_listCtrlInfo[line[:5]] = {\"line\": [posn]}\n else:\n # comment\n self.m_listCtrlVidComments.InsertItem(posn, \" \")\n self.m_listCtrlVidComments.SetItem(posn, 1, line)\n if \" \" in self.l_listCtrlInfo.keys():\n self.l_listCtrlInfo[\" \"][\"line\"].append(posn)\n else:\n self.l_listCtrlInfo[\" \"] = {\"line\": [posn]}\n # alternating colors\n if posn % 2:\n self.m_listCtrlVidComments.SetItemBackgroundColour(posn, \"white\")\n else:\n self.m_listCtrlVidComments.SetItemBackgroundColour(posn, \"yellow\")\n # end class MainFrame().doAddListCtrlLine()\n\n def onFileOpen(self, event):\n # TODO FIXME Pix vs Movies\n dlg = wx.FileDialog(self, message=\"Choose a NewMovie.txt file in complete Olson www folder\",\n defaultDir=r'X:\\OlsonMedia\\DigitalCamera\\www_html', defaultFile=\"*.txt\",\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n if dlg.ShowModal() == wx.ID_OK:\n loadOK = True\n lcltmp_pathTxtFile = os.path.abspath(dlg.GetPath())\n self.l_dbFileName = dlg.GetFilename()\n self.l_dbDirName = os.path.dirname(lcltmp_pathTxtFile)\n lcltmp_pathTxtFile = os.path.join(self.l_dbDirName, self.l_dbFileName)\n l_pathMediaMaxInfo = os.path.join(self.l_dbDirName, \"OlsonPictureInfo.php\")\n if False == os.path.exists(l_pathMediaMaxInfo):\n wx.MessageBox(\"Unable to load required Info file %s: file does not exist\" % l_pathMediaMaxInfo,\n \"ERROR\", wx.ICON_ERROR | wx.OK)\n loadOK = False\n if loadOK:\n retn = self.doLoadMediaMaxFile(l_pathMediaMaxInfo)\n if \"OK\" != retn:\n wx.MessageBox(\"Unable to load %s: %s\" % (lcltmp_pathTxtFile, retn), \"ERROR\", wx.ICON_ERROR | wx.OK)\n loadOK = False\n if loadOK:\n retn = self.doLoadMediaDescripFile(lcltmp_pathTxtFile)\n if \"OK\" != retn:\n wx.MessageBox(\"Unable to load %s: %s\" % (lcltmp_pathTxtFile, retn), \"ERROR\", wx.ICON_ERROR | wx.OK)\n loadOK = False\n if loadOK:\n loadOK = self.doLoadupNumMediaFile(mediaWeirdNum=self.l_dbMediaDescripLines[self.l_dbMediaDescripIdx][:5],\n statusText=self.l_dbMediaDescripLines[self.l_dbMediaDescripIdx])\n # end class MainFrame().onFileOpen()\n\n def doGetTxtFileLines(self, fname):\n \"\"\"Opens text file and reads the lines\"\"\"\n retn = \"OK\"\n absName = \"UNKNOWN\"\n fp = None\n theLines = []\n try:\n absName = os.path.abspath(fname)\n fp = open(absName, 'rt')\n while True:\n line = fp.readline()\n if len(line) <= 0:\n break\n theLines.append(line.strip())\n fp.close()\n except OSError as err:\n try:\n fp.close()\n except:\n pass\n retn = \"Error: %s\" % err\n theLines = []\n except: # I know, too broad, but this is just for me\n try:\n fp.close()\n except:\n pass\n retn = \"Unexpected Error\"\n theLines = []\n if (\"OK\" == retn) and (len(theLines) < 1):\n retn = \"ERROR: %s has no lines\" % fname\n theLines = []\n return retn, theLines\n # end class MainFrame()doGetTxtFileLines()\n\n def doLoadMediaMaxFile(self, fname):\n \"\"\"gets last filenum for MVI and IMG\"\"\"\n # should contain lines that look like this:\n # define(\"THE_MAX_IMGNUM\",\"45065\");\n # define(\"THE_MAX_MVINUM\",\"3441\");\n self.l_infoMaxImgMvi = {}\n lcl_infoMediaMax = {\"THE_MAX_IMGNUM\": -1, \"THE_MAX_MVINUM\": -1}\n retn, lcltmp_infoLines = self.doGetTxtFileLines( fname )\n if \"OK\" != retn:\n return retn\n for line in lcltmp_infoLines:\n for key in lcl_infoMediaMax.keys():\n if -1 != line.find(key):\n lsplit = line.split('\"')\n try:\n lcl_infoMediaMax[key] = int(lsplit[-2])\n except:\n lcl_infoMediaMax[key] = -1\n retn = \"ERROR: file %s not in infoFile format\" % fname\n if (\"OK\" == retn) and (lcl_infoMediaMax[\"THE_MAX_IMGNUM\"] > 0) and (lcl_infoMediaMax[\"THE_MAX_MVINUM\"] > 0):\n # lcl_infoMediaMax is good; store it\n self.l_infoMaxImgMvi = lcl_infoMediaMax\n # let's just see if the info in lcl_infoMediaMax is up to date, shall we?\n # TODO FIXME Pix vs Movies\n ignore, lcl_mediaWeirdNum = toMarksWeirdNumbers(lcl_infoMediaMax[\"THE_MAX_MVINUM\"])\n lcl_mediaDirList = sorted(os.listdir((os.path.join(os.path.dirname(fname), 'movies', lcl_mediaWeirdNum[:2]))))\n lcl_mp4List = []\n for mDirFile in lcl_mediaDirList:\n if mDirFile.endswith(\".MP4\"):\n lcl_mp4List.append(mDirFile)\n # TODO FIXME Pix vs Movies TODO\n lcl_expectedLastFile = \"MVI\"+lcl_mediaWeirdNum+\".MP4\"\n lcl_found = -1\n if lcl_expectedLastFile in lcl_mp4List:\n lcl_found = lcl_mp4List.index(lcl_expectedLastFile)\n if lcl_found < 0:\n # TODO FIXME Pix vs Movies\n wx.MessageBox(\"Warning: directory structure does not include \\\"%s\\\" media directory files per infoFile %s\" % (\"movie\", fname), \"ERROR\", wx.ICON_ERROR | wx.OK)\n elif (lcl_found + 1) != len(lcl_mp4List):\n ignore, lcl_decNum = fromMarksWeirdNumbers(lcl_mp4List[-1][3:3+5], quiet=True)\n wx.MessageBox(\"Warning: \\\"%s\\\" media directory files per infoFile %s\\nExpected last file was %s (%d)\\nActual last file in directory was %s (%d)\" % (\"movie\", fname, lcl_expectedLastFile, lcl_infoMediaMax[\"THE_MAX_MVINUM\"], lcl_mp4List[-1], lcl_decNum), \"ERROR\", wx.ICON_ERROR | wx.OK)\n return retn\n # end class MainFrame().doLoadMediaMaxFile()\n\n def doLoadMediaDescripFile(self, fname):\n \"\"\"Opens text file\"\"\"\n self.l_dbMediaDescripIdx = -1\n retn, self.l_dbMediaDescripLines = self.doGetTxtFileLines( fname )\n if \"OK\" != retn:\n self.l_dbMediaDescripLines = []\n return retn\n if (\"#\" != self.l_dbMediaDescripLines[-1][0]) or (-1 == self.l_dbMediaDescripLines[-1].find(\"END OF FILE\")):\n self.l_dbMediaDescripLines = []\n return \"ERROR: file %s last line is not # ... END OF FILE\" % fname\n self.l_dbMediaDescripPath = os.path.abspath(fname)\n self.l_rootDir = os.path.dirname(self.l_dbMediaDescripPath)\n for line in self.l_dbMediaDescripLines:\n self.doAddListCtrlLine(line, self.m_listCtrlVidComments.GetItemCount())\n self.l_dbMediaDescripIdx = len(self.l_dbMediaDescripLines)-1\n self.l_dbMediaDescripIdx = self.doFindDirecTextFileUsableLine(-1)\n if self.l_dbMediaDescripIdx < 0:\n self.l_dbMediaDescripLines = []\n retn = \"ERROR: file %s has no non-comment lines\" % fname\n self.m_listCtrlVidComments.EnsureVisible(self.l_dbMediaDescripIdx)\n return retn\n # end class MainFrame().doLoadMediaDescripFile()\n\n def doLoadupNumMediaFile( self, mediaWeirdNum = \"_0001\", statusText = \"Status: ...\" ):\n loadOK = True\n nowStatus = self.m_staticTextStatus.GetLabel()\n nowTextCtrlEntry = self.m_textCtrlEntry.GetValue()\n self.m_staticTextStatus.SetLabel(\"Status: loading %s ...\" % mediaWeirdNum)\n # TODO FIXME Pix vs Movies\n self.l_mediaLength = None\n mediaFile = os.path.join(self.l_rootDir, 'movies', mediaWeirdNum[:2], \"MVI\"+mediaWeirdNum+\".MP4\")\n if False == os.path.exists(mediaFile):\n txt = \"Media file %s does not exist\" % mediaFile\n self.m_staticTextStatus.SetLabel(\"Status: %s\" % txt)\n wx.MessageBox(txt, \"ERROR\", wx.ICON_ERROR | wx.OK)\n loadOK = False\n else:\n if not self.m_mediactrl.Load(mediaFile):\n txt = \"Unable to load media file %s: Unsupported format?\" % mediaFile\n self.m_staticTextStatus.SetLabel(\"Status: %s\" % txt)\n wx.MessageBox(txt, \"ERROR\", wx.ICON_ERROR | wx.OK)\n loadOK = False\n else:\n # print(\" m_bLoaded=%d\" % self.m_mediactrl.m_bLoaded) # not in wxPython\n self.m_staticTextStatus.SetLabel(\"Status: %s (%s)\" % (statusText, self.l_mediaMtime))\n self.m_mediactrl.SetInitialSize()\n self.GetSizer().Layout()\n self.l_mediaMtime = time.ctime(os.path.getmtime(mediaFile))\n self.l_mediaCurrentWeirdNum = mediaWeirdNum\n self.l_mediaStartStopDisplay = loadOK\n if not loadOK:\n self.m_staticTextStatus.SetLabel(nowStatus)\n self.m_textCtrlEntry.ChangeValue(nowTextCtrlEntry) # avoid generating wxEVT_TEXT with SetValue\n else:\n if nowTextCtrlEntry != self.l_textCtrlEntry_unchanged:\n pass # TODO FIXME save edited text\n self.l_listCtrlSlctd[\"prev\"] = self.l_listCtrlSlctd[\"curr\"]\n self.l_listCtrlSlctd[\"curr\"] = mediaWeirdNum\n theListCtrlLine = self.getListCtrlLine(self.l_listCtrlSlctd[\"prev\"])\n if theListCtrlLine >= 0:\n self.m_listCtrlVidComments.Select(theListCtrlLine, 0)\n bestMatchLine = self.getListCtrlLine(mediaWeirdNum)\n if bestMatchLine >= 0: # landed on a line in the text file\n self.l_dbMediaDescripIdx = bestMatchLine\n self.m_listCtrlVidComments.Select(self.l_dbMediaDescripIdx, 1)\n self.m_listCtrlVidComments.EnsureVisible(self.l_dbMediaDescripIdx)\n self.l_textCtrlEntry_unchanged = self.l_dbMediaDescripLines[bestMatchLine]\n self.m_textCtrlEntry.ChangeValue(self.l_textCtrlEntry_unchanged) # avoid generating wxEVT_TEXT with SetValue\n else: # did not land on a line in the text file\n self.l_textCtrlEntry_unchanged = \"\"\n self.m_textCtrlEntry.ChangeValue(\"\") # avoid generating wxEVT_TEXT with SetValue\n pass\n return loadOK # we already notified the user as needed\n # end class MainFrame().doLoadupNumMediaFile()\n\n def getListCtrlLine(self, mediaWeirdNum):\n lineNum = -1\n # TODO FIXME make it choose the same one the user clicked if more than one of same in listctrl\n if mediaWeirdNum in self.l_listCtrlInfo.keys():\n lineNum = self.l_listCtrlInfo[mediaWeirdNum][\"line\"][0]\n return lineNum\n # end class MainFrame().getListCtrlLine()\n\n def doFindDirecTextFileUsableLine(self, direc):\n \"\"\"finds m_txtFileLines Usable Line idx in direction direc or -1\"\"\"\n foundit = -1\n good = False\n idx = self.l_dbMediaDescripIdx\n while True:\n idx += direc\n if (idx < 0) or (idx >= len(self.l_dbMediaDescripLines)):\n break\n good, ignore = fromMarksWeirdNumbers(self.l_dbMediaDescripLines[idx], quiet=True)\n if good != True:\n continue\n else:\n foundit = idx\n break\n return foundit\n # end class MainFrame().doFindDirecTextFileUsableLine()\n\n def writeMediaFile(self):\n fPtr = None\n try:\n fPtr = open(os.path.join(self.l_dbDirName, self.l_dbFileName), 'wt')\n for myLnum in range(self.m_listCtrlVidComments.GetItemCount()):\n textNum = self.m_listCtrlVidComments.GetItemText(myLnum, 0)\n textString = self.m_listCtrlVidComments.GetItemText(myLnum, 1)\n mediaFlag, ignore = fromMarksWeirdNumbers(textNum, quiet=True)\n if mediaFlag:\n fPtr.write(\"%s %s\\n\" % (textNum, textString))\n else:\n fPtr.write(\"%s\\n\" % textString)\n fPtr.close()\n fPtr = None\n except:\n message = wx.MessageDialog(self, _(\"Could not save file; file not saved. Try again.\"),\n _(\"ERROR\"), wx.OK | wx.ICON_ERROR)\n message.ShowModal()\n message.Destroy()\n if fPtr is not None:\n fPtr.close()\n # end MainFrame().writeMediaFile(self):\n\n def onFileSave( self, event ):\n event.Skip() # TODO need to write this one\n # end class MainFrame().onFileSave()\n\n def onFileSaveAs( self, event ):\n dlg = None\n dlg = wx.FileDialog(self, \"Save to file:\", \".\", \"\", \"Text (*.txt)|*.txt\", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n if dlg.ShowModal() == wx.ID_OK:\n self.l_dbFileName = dlg.GetFilename()\n self.l_dbDirName = dlg.GetDirectory()\n self.writeMediaFile()\n dlg.Destroy()\n dlg = None\n # end class MainFrame().onFileSaveAs()\n\n def onFileQuit( self, event ):\n dlgRslt = wx.MessageBox(\"Are you sure? This will lose changes\", \"Are you sure?\",\n wx.YES|wx.NO|wx.CANCEL|wx.ICON_EXCLAMATION|wx.CENTRE)\n if wx.NO == dlgRslt:\n event.Skip() # OK; just keep going\n else:\n self.Close()\n # end class MainFrame().onFileQuit()\n\n def onFileExit( self, event ):\n event.Skip() # TODO need to write this one\n # end class MainFrame().onFileExit()\n\n def onHelpAbout( self, event ):\n event.Skip() # TODO need to write this one\n # end class MainFrame().onHelpAbout()\n\n \"\"\"TODO FIXME TODO start/stop movie on click in movie window\n def onMouseUpLeftUp( self, event): # if click in movie window, toggle play-stop for movie\n myX, myY = self.m_mediactrl.ScreenToClient(wx.GetMousePosition())\n print(\"myX=%d, myY=%d\" % (myX, myY))\n event.Skip() # TODO need to write this one\n # end class MainFrame().onMouseUpLeftUp()\n \"\"\"\n\n def onTimerMedia( self, event ):\n # the length is -1 if nothing is loaded\n # after the load it is length None\n # some time after loading it goes to 0\n # some time after that it goes to number of millisecs (ex: 9637)\n # then some time later it rounds off to the seconds (ex: 9000); I don't know why\n # we want to keep the 9637 from the example above\n if self.l_mediaStartStopDisplay: # set True to load to a bit past the start\n tmp = self.m_mediactrl.Length()\n if ((None == self.l_mediaLength) or (self.l_mediaLength <= 0)) and ((tmp > 0) and (self.l_mediaLength != tmp)):\n self.l_mediaLength = self.m_mediactrl.Length()\n # print(\"timer length %s\" % self.l_mediaLength)\n if self.m_mediactrl.Play():\n # print(\"timer: Play worked\")\n self.m_mediactrl.SetInitialSize()\n self.GetSizer().Layout()\n sleep(0.05) # sleep seconds\n self.m_mediactrl.Pause()\n self.l_mediaStartStopDisplay = False\n # end class MainFrame().onTimerMedia()\n\n\n\n###########################################################################\n## MAIN PROGRAM\n###########################################################################\n\nif __name__ == \"__main__\":\n app = wx.App()\n frame = MainFrame(None, os.path.dirname(os.path.realpath(__file__))).Show()\n app.MainLoop()\n","repo_name":"Mark-MDO47/MarkMedia","sub_path":"PyMarkMedia/MarkMedia.py","file_name":"MarkMedia.py","file_ext":"py","file_size_in_byte":44460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14572788823","text":"import pandas as pd\nimport numpy as np\nimport re\nimport scipy.sparse as sps\nimport codecs\nfrom scipy.sparse import identity,coo_matrix, hstack\nfrom collections import Counter\nimport pickle as pkl\n\n\ndef train_model(local_path):\n\n #read_user_files_from_local_and_merge(local_path)\n\n # Reading all relevant files from csv / txt\n doc = codecs.open(local_path + '/Courses_09202017.txt', 'rU', 'UTF-16')\n courses = pd.read_csv(doc, sep = '\\t')\n\n doc = codecs.open(local_path + '/Products_09202017.txt', 'rU', 'UTF-8')#16-->8\n products = pd.read_csv(doc, sep = '\\t')\n\n doc = codecs.open(local_path + '/ProdOrderSummary_09202017.txt', 'rU', 'UTF-8')\n products_order_summary = pd.read_csv(doc, sep = '\\t')\n\n doc = codecs.open(local_path + '/UserEnrollmentsCompletions_09222017.txt', 'rU', 'UTF-8')\n course_completion = pd.read_csv(doc, sep = '\\t')\n\n doc = codecs.open(local_path + '/UserSubscriptions_09202017.txt', 'rU', 'UTF-8')\n user_subscription = pd.read_csv(doc, sep = '\\t')\n\n demographics = pd.read_csv(local_path + '/user__masterlist.csv', sep = '\\t', encoding='ISO-8859-1')\n\n\n courses_club = courses['CourseCode']\n courses_club_unique = courses_club.unique()\n\n courses_enrolments_club = course_completion['CourseCode']\n courses_enrolments_club_unique = courses_enrolments_club.unique()\n\n all_course_club = np.concatenate((courses_club_unique,courses_enrolments_club_unique))\n all_course_club_unique = np.unique(all_course_club)\n\n products_club = products['SkuID']\n products_club_unique = products_club.unique()\n\n products_order_club = products_order_summary['SkuID']\n products_order_club_unique = products_order_club.unique()\n\n all_products_club = np.concatenate((products_club_unique, products_order_club_unique))\n all_products_club_unique = np.unique(all_products_club)\n\n course_products_club = np.concatenate((courses_club_unique,courses_enrolments_club_unique, products_club_unique, products_order_club_unique))\n course_products_club_unique = np.unique(course_products_club)\n\n users_from_demographics = demographics['EncryptedRecordID']\n users_from_demographics_unique = users_from_demographics.unique()\n\n users_from_subscription = user_subscription['EncryptedUserID']\n users_from_subscription_unique = users_from_subscription.unique()\n\n users_from_course_completion = course_completion['EncryptedRecordID']\n users_from_course_completion_unique = users_from_course_completion.unique()\n\n users_from_products_order_summary = products_order_summary['EncryptedRecordID']\n users_from_products_order_summary_unique = users_from_products_order_summary.unique()\n\n users_club = np.concatenate((users_from_demographics_unique,users_from_subscription_unique,users_from_course_completion_unique,users_from_products_order_summary_unique))\n users_club_unique = np.unique(users_club)\n\n\n\n # Build a dict with the user index in ulist\n d_ui = {}\n for i in range(len(users_club_unique)):\n user = users_club_unique[i]\n d_ui[user] = i\n\n\n # Build a dict with the course/product index in ulist\n d_ci = {}\n for i in range(len(all_course_club_unique)):\n course = all_course_club_unique[i]\n d_ci[course] = i\n\n course_number = i + 1\n for i in range(len(all_products_club_unique)):\n course = all_products_club_unique[i]\n d_ci[course] = i + course_number\n\n # Build the user x item matrices using scipy lil_matrix\n Mui_tr = sps.lil_matrix((len(users_club_unique), (len(all_course_club_unique) + len(all_products_club_unique))), dtype=np.int8)\n\n # Now fill Mui_tr with the info from cpdtr\n for i in range(len(course_completion)):\n user = course_completion[\"EncryptedRecordID\"].values[i]\n course = course_completion[\"CourseCode\"].values[i]\n ui, ci = d_ui[user], d_ci[course]\n Mui_tr[ui, ci] = 1.0\n\n\n # Now fill Mui_tr with the info from cpdtr\n for i in range(len(products_order_summary)):\n user = products_order_summary[\"EncryptedRecordID\"].values[i]\n course = products_order_summary[\"SkuID\"].values[i]\n ui, ci = d_ui[user], d_ci[course]\n Mui_tr[ui, ci] = 1.0\n\n for i in range(len(user_subscription)):\n user = user_subscription[\"EncryptedUserID\"].values[i]\n course = user_subscription[\"SkuID\"].values[i]\n ui, ci = d_ui[user], d_ci[course]\n Mui_tr[ui, ci] = 1.0\n\n\n user_cred = demographics['Credential'].unique()\n user_unique_cred = []\n for i in range(0, len(user_cred)):\n user_cred[i] = str(user_cred[i]).replace(' ', '')\n fields = str(user_cred[i]).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n user_unique_cred.append(fields[j])\n\n user_unique_cred = list(set(user_unique_cred))\n\n user_expert = demographics['Expertise'].unique()\n user_unique_expert = []\n for i in range(0, len(user_expert)):\n user_expert[i] = str(user_expert[i]).replace(' ', '')\n fields = str(user_expert[i]).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n user_unique_expert.append(fields[j])\n\n user_unique_expert = list(set(user_unique_expert))\n\n user_interst = demographics['Interest'].unique()\n user_unique_interest = []\n for i in range(0, len(user_interst)):\n user_interst[i] = str(user_interst[i]).replace(' ', '')\n user_interst[i] = str(user_interst[i]).replace('\\\\', ',')\n fields = str(user_interst[i]).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n user_unique_interest.append(fields[j])\n\n user_unique_interest = list(set(user_unique_interest))\n\n user_judi = demographics['Jurisdiction'].unique()\n user_unique_judi = []\n for i in range(0, len(user_judi)):\n user_judi[i] = str(user_judi[i]).replace(' ', '')\n fields = str(user_judi[i]).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n user_unique_judi.append(fields[j])\n\n user_unique_judi = list(set(user_unique_judi))\n\n user_state = demographics['State'].unique()\n user_unique_state = []\n for i in range(0, len(user_state)):\n user_state[i] = str(user_state[i]).replace(' ', '')\n fields = str(user_state[i]).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan' and (not ('.' in fields[j])):\n user_unique_state.append(fields[j])\n\n user_unique_state = list(set(user_unique_state))\n\n\n\n # Build a dict with the user features in ulist\n d_uf = {}\n feature_count = 0\n for i in range(len(user_unique_cred)):\n feature = user_unique_cred[i]\n d_uf['%s_credential' %feature] = i\n\n feature_count = feature_count + i + 1\n for i in range(len(user_unique_state)):\n feature = user_unique_state[i]\n d_uf['%s_state' %feature] = i + feature_count\n\n feature_count = feature_count + i + 1\n for i in range(len(user_unique_expert)):\n feature = user_unique_expert[i]\n d_uf['%s_expert' %feature] = i + feature_count\n\n feature_count = feature_count + i + 1\n for i in range(len(user_unique_interest)):\n feature = user_unique_interest[i]\n d_uf['%s_interest' %feature] = i + feature_count\n\n feature_count = feature_count + i + 1\n for i in range(len(user_unique_judi)):\n feature = user_unique_judi[i]\n d_uf['%s_jurisdiction' %feature] = i + feature_count\n\n #d_ui has list of users\n\n from scipy.sparse import identity,coo_matrix, hstack\n # Build the user x item matrices using scipy lil_matrix\n Mui_uf = sps.lil_matrix((len(users_club_unique), len(d_uf)), dtype=np.int8)\n\n # Now fill Mui_tr with the info from course features\n for i in range(len(demographics)):\n if demographics[\"EncryptedRecordID\"].values[i] == '#Name?':\n i = i+1\n user = demographics[\"EncryptedRecordID\"].values[i]\n ui = d_ui[user]\n credential = demographics['Credential'].values[i]\n credential = str(credential).replace(' ', '')\n fields = str(credential).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n uf = d_uf[fields[j] + '_credential']\n Mui_uf[ui, uf] = 1.0\n\n\n state = demographics['State'].values[i]\n state = str(state).replace(' ', '')\n fields = str(state).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan' and (not ('.' in fields[j])):\n uf = d_uf[fields[j] + '_state']\n Mui_uf[ui, uf] = 1.0\n\n expert = demographics['Expertise'].values[i]\n expert = str(expert).replace(' ', '')\n fields = str(expert).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n uf = d_uf[fields[j] + '_expert']\n Mui_uf[ui, uf] = 1.0\n\n interest = demographics['Interest'].values[i]\n interest = str(interest).replace(' ', '')\n interest = str(interest).replace('\\\\', ',')\n fields = str(interest).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n uf = d_uf[fields[j] + '_interest']\n Mui_uf[ui, uf] = 1.0\n\n jurisdiction = demographics['Jurisdiction'].values[i]\n jurisdiction = str(jurisdiction).replace(' ', '')\n fields = str(jurisdiction).split(',')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n uf = d_uf[fields[j] + '_jurisdiction']\n Mui_uf[ui, uf] = 1.0\n\n Mui_uf = hstack([Mui_uf, identity(len(users_club_unique))])\n\n\n\n required_columns = ['CourseCode', 'AllFieldOfStudy', 'TotalCreditHours', 'Topic', 'Type', 'PriceRange', 'Keywords']\n courses = pd.DataFrame(courses, columns=required_columns)\n\n course_FOS = courses['AllFieldOfStudy'].unique()\n course_unique_FOS = []\n for i in range(0, len(course_FOS)):\n course_FOS[i] = str(course_FOS[i]).replace(';', ',')\n fields = str(course_FOS[i]).split(', ')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n course_unique_FOS.append(fields[j])\n\n course_unique_FOS = list(set(course_unique_FOS))\n\n set_topics = []\n course_topic = courses['Topic'].unique()\n for i in range(0, len(course_topic)):\n set_topics.append([x.strip() for x in (str((course_topic[i]))).split('|')])\n course_unique_topics = []\n for topics in course_topic:\n topic = str(topics).split('|')\n for top in topic:\n if top != '' and top !='None' and top !='nan':\n course_unique_topics.append(str(top))\n\n course_unique_topics = list(set(course_unique_topics))\n\n course_unique_type = list(courses['Type'].unique())\n course_unique_type = [x for x in course_unique_type if str(x) != 'nan']\n\n def get_top_Keywords(keyword_unique, N):\n keyword_set = []\n for topics in keyword_unique:\n topic = str(topics).split(', ')\n for top in topic:\n if top != '':\n keyword_set.append(str(top))\n count = Counter(keyword_set)\n common_list = count.most_common(N)\n keyword_list = [ seq[0] for seq in common_list ]\n return keyword_list\n course_unique_keywords = get_top_Keywords(courses['Keywords'].unique(), 100)\n\n course_price = courses['PriceRange'].unique()\n average_course_price_unique =[]\n for i in range(0, len(course_price)):\n if not (pd.isnull(course_price[i])):\n priceends = course_price[i].split(' - ')\n for j in range(0, len(priceends)):\n priceends[j] = float(priceends[j].replace('$', '').replace(',',''))\n average_course_price_unique.append(int(np.mean(priceends)))\n\n course_bins = np.linspace(0, max(average_course_price_unique), 10)\n\n required_columns = ['ProductCode', 'SkuID', 'Type', 'AllFieldOfStudy', 'TotalCreditHours', 'PriceRange', 'Topic', 'Keywords']\n products = pd.DataFrame(products, columns=required_columns)\n\n product_unique_type = list(products['Type'].unique())\n product_unique_type = [x for x in product_unique_type if str(x) != 'nan']\n\n product_FOS = products['AllFieldOfStudy'].unique()\n product_unique_FOS = []\n for i in range(0, len(product_FOS)):\n product_FOS[i] = str(product_FOS[i]).replace(';', ',')\n fields = str(product_FOS[i]).split(', ')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n product_unique_FOS.append(fields[j])\n\n product_unique_FOS = list(set(product_unique_FOS))\n\n set_topics = []\n product_topic = products['Topic'].unique()\n for i in range(0, len(product_topic)):\n set_topics.append([x.strip() for x in (str((product_topic[i]))).split('|')])\n product_unique_topics = []\n for topics in product_topic:\n topic = str(topics).split('|')\n for top in topic:\n if top != '' and top !='None' and top !='nan':\n product_unique_topics.append(str(top))\n\n product_unique_topics = list(set(product_unique_topics))\n\n product_unique_keywords = get_top_Keywords(products['Keywords'].unique(), 100)\n\n product_price = products['PriceRange'].unique()\n average_product_price_unique =[]\n for i in range(0, len(product_price)):\n if not (pd.isnull(product_price[i])):\n priceends = product_price[i].split(' - ')\n for j in range(0, len(priceends)):\n priceends[j] = float(priceends[j].replace('$', '').replace(',',''))\n average_product_price_unique.append(int(np.mean(priceends)))\n\n product_bins = np.linspace(0, max(average_product_price_unique), 10)\n\n\n\n # Build a dict with the item features in ulist\n d_if = {}\n unique_type = list(set().union(product_unique_type,course_unique_type))\n feature_count = 0\n for i in range(len(unique_type)):\n feature = unique_type[i]\n d_if['%s_type' %feature] = i\n\n unique_FOS = list(set().union(product_unique_FOS,course_unique_FOS))\n feature_count = feature_count + i + 1\n for i in range(len(unique_FOS)):\n feature = unique_FOS[i]\n d_if['%s_FOS' %feature] = i + feature_count\n\n feature_count = feature_count + i + 1\n d_if['TotalCreditHours'] = feature_count\n\n feature_count = feature_count + 1\n unique_topic = list(set().union(product_unique_topics,course_unique_topics))\n for i in range(len(unique_topic)):\n feature = unique_topic[i]\n d_if['%s_topic' %feature] = i + feature_count\n\n unique_keywords = list(set().union(product_unique_keywords,course_unique_keywords))\n feature_count = feature_count + i + 1\n\n for i in range(len(unique_keywords)):\n feature = unique_keywords[i]\n d_if['%s_keyword' %feature] = i + feature_count\n\n price_range = max(max(average_product_price_unique), max(average_course_price_unique))\n price_bins = np.linspace(0, price_range, 10)\n feature_count = feature_count + i + 1\n\n for i in range(len(product_bins)):\n d_if['price Range %s' %i] = i + feature_count\n feature_count = feature_count + i + 1\n #d_ci has list of courses/products\n\n\n\n # Build the user x item matrices using scipy lil_matrix\n Mui_if = sps.lil_matrix((len(course_products_club_unique), len(d_if)), dtype=np.int8)\n\n # Now fill Mui_tr with the info from course features\n for i in range(len(courses)):\n course = courses[\"CourseCode\"].values[i]\n ci = d_ci[course]\n FOS = courses['AllFieldOfStudy'].values[i]\n FOS = str(FOS).replace(';', ',')\n fields = str(FOS).split(', ')\n for j in range(0, len(fields)):\n if fields[j] !='' and fields[j] !='None'and fields[j] !='nan':\n fi = d_if[fields[j] + '_FOS']\n Mui_if[ci, fi] = 1.0\n\n credit_hours = courses[\"TotalCreditHours\"].values[i]\n if not np.isnan(credit_hours):\n fi = d_if['TotalCreditHours']\n Mui_if[ci, fi] = credit_hours\n\n typec = courses['Type'].values[i] + '_type'\n fi = d_if[typec]\n Mui_if[ci, fi] = 1.0\n\n pricerange = courses['PriceRange'].values[i]\n if (not (pd.isnull(pricerange))):\n priceends = str(pricerange).split(' - ')\n for j in range(0, len(priceends)):\n priceends[j] = float(priceends[j].replace('$', '').replace(',',''))\n price_avg = int(np.mean(priceends))\n course_price_bin = int(np.digitize(price_avg, price_bins))\n pricerange = 'price Range %s' %course_price_bin\n fi = d_if[pricerange]\n Mui_if[ci, fi] = 1.0\n\n topic = str(products['Topic'].values[0]).split('|')\n for top in topic:\n if top != '' and top !='None' and top !='nan':\n fi = d_if[str(top) + '_topic']\n Mui_if[ci, fi] = 1.0\n\n\n Mui_if = hstack([Mui_if, identity(len(course_products_club_unique))])\n\n\n\n doc = codecs.open(local_path + '/Courses_09202017.txt', 'rU', 'UTF-16')\n courses = pd.read_csv(doc, sep = '\\t')\n doc = codecs.open(local_path + '/Products_09202017.txt', 'rU', 'UTF-16')\n products = pd.read_csv(doc, sep = '\\t')\n\n\n\n from lightfm import LightFM\n from lightfm.evaluation import auc_score, precision_at_k\n\n\n model = LightFM(no_components=30, loss='warp')\n model.fit_partial(\n Mui_tr, item_features = Mui_if, user_features=Mui_uf,\n epochs=30,\n verbose=True)\n \n return(model, d_ui, all_course_club_unique, all_products_club_unique, courses, products)\n\n","repo_name":"abhi-infrrd/init","sub_path":"hybrid_recommender.py","file_name":"hybrid_recommender.py","file_ext":"py","file_size_in_byte":18147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28047899851","text":"import os.path\nfrom collections import defaultdict\nimport pandas as pd\nfrom ckg.graphdb_builder import builder_utils\nfrom ckg.graphdb_builder import mapping as mp\n\n\n#########################\n# Pfam #\n#########################\n\ndef parser(databases_directory, import_directory, download=True, updated_on=None):\n config = builder_utils.get_config(config_name=\"pfamConfig.yml\", data_type='databases')\n entity_header = config['entity_header']\n relationship_headers = config['relationship_headers']\n\n directory = os.path.join(databases_directory, 'Pfam')\n builder_utils.checkDirectory(directory)\n protein_mapping = mp.getMappingForEntity(entity=\"Protein\")\n valid_proteins = list(set(protein_mapping.values()))\n\n ftp_url = config['ftp_url']\n filename = config['full_uniprot_file']\n # url = config['test']\n\n if not os.path.exists(os.path.join(directory, filename)):\n if download:\n builder_utils.downloadDB(ftp_url+filename, directory)\n\n stats = set()\n if os.path.exists(os.path.join(directory, filename)):\n fhandler = builder_utils.read_gzipped_file(os.path.join(directory, filename))\n identifier = None\n description = []\n lines = []\n missed = 0\n entities = set()\n relationships = defaultdict(set)\n is_first = True\n i = 0\n read_lines = 0\n num_entities = 0\n num_relationships = {}\n try:\n for line in fhandler:\n i += 1\n read_lines += 1\n if line.startswith(\"# STOCKHOLM\"):\n if identifier is not None:\n entities.add((identifier, 'Functional_region', name, \" \".join(description), \"PFam\"))\n if len(entities) == 100:\n print_files(entities, entity_header, outputfile=os.path.join(import_directory, 'Functional_region.tsv'), is_first=is_first)\n num_entities += len(entities)\n if 'mentioned_in_publication' in relationships:\n print_files(relationships['mentioned_in_publication'], relationship_headers['mentioned_in_publication'], outputfile=os.path.join(import_directory, 'Functional_region_mentioned_in_publication.tsv'), is_first=is_first)\n if 'mentioned_in_publication' not in num_relationships:\n num_relationships['mentioned_in_publication'] = 0\n num_relationships['mentioned_in_publication'] += len(relationships['mentioned_in_publication'])\n if 'found_in_protein' in relationships:\n print_files(relationships['found_in_protein'], relationship_headers['found_in_protein'], outputfile=os.path.join(import_directory, 'Functional_region_found_in_protein.tsv'), is_first=is_first, filter_for=('END_ID', valid_proteins))\n if 'found_in_protein' not in num_relationships:\n num_relationships['found_in_protein'] = 0\n num_relationships['found_in_protein'] += len(relationships['found_in_protein'])\n entities = set()\n relationships = defaultdict(set)\n is_first = False\n identifier = None\n description = []\n elif line.startswith(\"#=GF\"):\n data = line.rstrip('\\r\\n').split()\n if 'AC' in data:\n identifier = data[2].split('.')[0]\n elif 'DE' in data:\n name = \" \".join(data[2:])\n elif 'RM' in data:\n relationships['mentioned_in_publication'].add((identifier, data[2], \"MENTIONED_IN_PUBLICATION\", \"PFam\"))\n elif 'CC' in data:\n description.append(\" \".join(data[2:]))\n elif not line.startswith('//'):\n data = line.rstrip('\\r\\n').split()\n protein, positions = data[0].split('/')\n protein = protein.replace('.', '-')\n start, end = positions.split('-')\n sequence = data[1]\n relationships['found_in_protein'].add((identifier, protein, \"FOUND_IN_PROTEIN\", start, end, sequence, \"PFam\"))\n if protein.split('-')[0] != protein:\n relationships['found_in_protein'].add((identifier, protein.split('-')[0], \"FOUND_IN_PROTEIN\", start, end, sequence, \"PFam\"))\n except UnicodeDecodeError:\n lines.append(i)\n missed += 1\n\n fhandler.close()\n\n if len(entities) > 0:\n print_files(entities, entity_header, outputfile=os.path.join(import_directory,'Functional_region.tsv'), is_first=is_first)\n num_entities += len(entities)\n print_files(relationships['mentioned_in_publication'], relationship_headers['mentioned_in_publication'], outputfile=os.path.join(import_directory,'Functional_region_mentioned_in_publication.tsv'), is_first=is_first)\n num_relationships['mentioned_in_publication'] += len(relationships['mentioned_in_publication'])\n print_files(relationships['found_in_protein'], relationship_headers['found_in_protein'], outputfile=os.path.join(import_directory,'Functional_region_found_in_protein.tsv'), is_first=is_first)\n num_relationships['found_in_protein'] += len(relationships['found_in_protein'])\n\n stats.add(builder_utils.buildStats(num_entities, \"entity\", \"Functional_region\", \"Pfam\", 'Functional_region.tsv', updated_on))\n\n for rel in num_relationships:\n stats.add(builder_utils.buildStats(num_relationships[rel], \"relationship\", rel.upper(), \"Pfam\", 'Functional_region_'+rel+'.tsv', updated_on))\n\n builder_utils.remove_directory(directory)\n\n return stats\n\n\ndef print_files(data, header, outputfile, is_first, filter_for=None):\n df = pd.DataFrame(list(data), columns=header)\n if filter_for is not None:\n df = df[df[filter_for[0]].isin(filter_for[1])]\n if not df.empty:\n with open(outputfile, 'a', encoding='utf-8') as f:\n df.to_csv(path_or_buf=f, sep='\\t',\n header=is_first, index=False, quotechar='\"',\n line_terminator='\\n', escapechar='\\\\')\n\n\nif __name__ == \"__main__\":\n base = os.path.dirname(os.path.abspath(__file__))\n db_path = os.path.join(base, \"../../../../data/databases\")\n import_path = os.path.join(base, \"../../../../data/imports/databases\")\n\n parser(databases_directory=db_path, import_directory=import_path, download=False)\n","repo_name":"MannLabs/CKG","sub_path":"ckg/graphdb_builder/databases/parsers/pfamParser.py","file_name":"pfamParser.py","file_ext":"py","file_size_in_byte":6780,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"61"} +{"seq_id":"28499424419","text":"import sys\nimport csv\nimport os\n\n\n#################################################\nusage = \"python3 annotGenes.py file.gff3 file_diamond_SP.tsv file_diamond_NR.tsv file_eggnog.tsv file_interpro.tsv '[pref keyword1];keyowordNR2; ...'\"\n#################################################\n\ntry: _, file_gff,file_diamond_SP,file_diamond_NR,file_eggnog,file_interpro,prefs=sys.argv\nexcept: sys.exit(\"Correct usage is:\\n\"+usage)\n\nout_file = 'genes_anotados.csv'\nUNIPROT_API_PARALLEL=100\n\n\ndef bestName(names, pref=['['], filterPs = ['[', ']', 'protein', '_', 'PREDIC', 'PUTAT', 'putat', '.', 'hypothetic']):\n names = list(names)\n if len(names) < 1:\n return ''\n if len(names) == 1:\n return names[0]\n txt = ' '.join(names)\n pScore = {p: txt.count(p) for p in set(txt.split()) if not p.isdigit() and not any([fp in p for fp in filterPs])}\n def pontuar(name):\n a = min([(pref.index(p) if p in name else len(pref)) for p in pref]) ### best pref < melhor\n b = -sum([pScore[p] if p in pScore else 0 for p in name.split()])### melhor concenso > melhor\n c = -len(name) ### maior texto\n return (a, b, c)\n return sorted(names, key=pontuar)[0]\n\n\ndef parseDiamondNR(file, mrna2gene, pref=['[']):\n ## Needs Seq Title in Last Column!\n dt = [ x for x in list(csv.reader(open(file), delimiter='\\t')) if len(x) > 2]\n res = {}\n for hit in dt:\n g = mrna2gene[hit[0]]\n if g in res:\n res[g][0].add(hit[1])\n res[g][1].add(hit[-1])\n else:\n res[g] = [set([hit[1]]), set([hit[-1]])]\n for k, v in res.items():\n res[k][1] = bestName(res[k][1], pref)\n return res\n\n\ndef parseDiamond(file, mrna2gene):\n dt = [ x for x in list(csv.reader(open(file), delimiter='\\t')) if len(x) > 2]\n res = {}\n for hit in dt:\n g = mrna2gene[hit[0]]\n if g in res:\n res[g].add(hit[1])\n else:\n res[g] = set([hit[1]])\n return res\n\n\ndef parseInterpro(file, mrna2gene):\n dt = [ x for x in list(csv.reader(open(file), delimiter='\\t')) if len(x) > 2 and not x[0].startswith('#')]\n res = {}\n #0 Protein Accession (e.g. P51587)\n #1 Sequence MD5 digest (e.g. 14086411a2cdf1c4cba63020e1622579)\n #2 Sequence Length (e.g. 3418)\n #3 Analysis (e.g. Pfam / PRINTS / Gene3D)\n #4 Signature Accession (e.g. PF09103 / G3DSA:2.40.50.140)\n #5 Signature Description (e.g. BRCA2 repeat profile)\n #6 Start location\n #7 Stop location\n #8 Score - is the e-value of the match reported by member database method (e.g. 3.1E-52)\n #9 Status - is the status of the match (T: true)\n #10 Date - is the date of the run\n #11 (InterProScan annotations - accession (e.g. IPR002093) - optional column; only displayed if -iprscan option is switched on)\n #12 (InterProScan annotations - description (e.g. BRCA2 repeat) - optional column; only displayed if -iprscan option is switched on)\n #13 (GO annotations (e.g. GO:0005515) - optional column; only displayed if --goterms option is switched on)\n #14 (Pathways annotations (e.g. REACT_71) - optional column; only displayed if --pathways option is switched on)\n def f(x):\n interesse = [3, 4, 5]\n return [[x[i] for i in interesse] + [x[13] if len(x) > 13 else '', x[14] if len(x) > 14 else ''], \n ['interpro', x[11], x[12], '', ''] if len(x) > 12 else ['', '', '', '', '']]\n \n for hit in dt:\n g = mrna2gene[hit[0]]\n if g in res:\n res[g].extend(f(hit))\n else:\n res[g] = f(hit)\n return res\n\ndef parseEggNog(file, mrna2gene):\n dt = [ x for x in list(csv.reader(open(file), delimiter='\\t')) if len(x) > 2 and not x[0].startswith('#')]\n res = {}\n #0 query_name\n #1 seed_eggNOG_ortholog\n #2 seed_ortholog_evalue\n #3 seed_ortholog_score\n #4 best_tax_level\n #5 Preferred_name\n #6 GOs\n #7 EC\n #8 KEGG_ko\n #9 KEGG_Pathway\n #10 KEGG_Module\n #11 KEGG_Reaction\n #12 KEGG_rclass\n #13 BRITE\n #14 KEGG_TC\n #15 CAZy\n #16 BiGG_Reaction\n def f(x):\n interesse = [5, 6, 8, 9, 16]\n return [x[i] for i in interesse]\n \n for hit in dt:\n g = mrna2gene[hit[0]]\n if g in res:\n res[g].append(f(hit))\n else:\n res[g] = [f(hit)]\n return res\n\n\ndef getUniprotReg(ids, limi=100, dbs_interesse=['GO','KEGG','PFAM','Pfam','Reactome','SMART','STRING','UniPathway']):\n baixados = {}\n total = len(ids)\n interesse = ['ID ', 'DE ', 'OS ', 'OC ', 'OX ', 'DR ', 'CC -!- ']\n \n def parseReg(rows):\n \n def getByPrefix(p):\n l = len(p)\n return [ln[l:].strip() for ln in rows if ln.startswith(p)]\n tID = getByPrefix('ID ')\n idR = tID[0] if len(tID) > 0 else ''\n _ent = idR.split(\" \")[0]\n _isRev = ' Reviewed;' in idR\n os = getByPrefix('OS ')\n _os = os[0] if len(os) > 0 else ''\n ox = getByPrefix('OX ')\n _ox = (ox[0].split(\"NCBI_TaxID=\")[1].split(\";\")[0] if 'NCBI_TaxID=' in ox[0] else None) if len(ox) > 0 else None\n\n _oc = '; '.join(getByPrefix('OC ')).replace(\";;\", ';')\n\n names = getByPrefix('DE ')\n _name = (names[0].split('RecName: Full=')[1] if 'RecName: Full=' in names[0] else names[0]) if len(names) > 0 else ''\n\n _dbs = [(y[0], y[1], y[2]) for y in [x.split(';') for x in getByPrefix('DR ')] if y[0] in dbs_interesse] + [('Uniprot', _ent, 'Reviwed' if _isRev else 'Unreviwed')]\n _coments = '|'.join(getByPrefix('CC -!- '))\n\n return _name, _os, _oc, _ox if not _ox is None else '', _dbs, _coments\n \n \n def fParse(lines):\n return parseReg([x for x in lines if any([x.startswith(y) for y in interesse])])\n \n while len(ids) > 0:\n atual = ids[:limi]\n fs = [x+'.txt' for x in atual]\n os.system(' '.join(['wget https://www.uniprot.org/uniprot/%s.txt 1>/dev/null 2>/dev/null &' % k for k in atual]) + ' wait')\n ids = ids[limi:]\n files = set(os.listdir()).intersection(fs)\n baixados.update({b.replace('.txt', ''): fParse([x.strip() for x in open(b).readlines()]) for b in files})\n for f in files:\n os.remove(f)\n print('faltam: %d...' % len(ids))\n print('%d de %d obtidos' % (len(baixados), total))\n return baixados\n\n\ndef mesclar(mult, king='Viridiplantae'):\n if len([x for x in mult if king in x[2]]) > 0:\n mult = [x for x in mult if king in x[2]]\n ret = list(mult[0])\n if len(mult) > 1:\n ret[0] = max([x[0] for x in mult])\n idsj = [x[1] for x in ret[4]]\n for o in mult[1:]:\n for dbE in o[4]:\n if not dbE[1] in idsj:\n ret[4].append(dbE)\n for com in o[5]:\n if not com in ret[5]:\n ret[5] + '|' + com\n return tuple(ret)\n\n\ndef addInterpro(gene_mesclado, ipro_data):\n for gene, datas in ipro_data.items():\n for data in datas:\n dbKs = [(data[0], data[1], data[2]), ('GO', data[3], '')]\n ## fix pathways\n if gene in gene_mesclado:\n \n try:\n ks = [x[1] for x in gene_mesclado[gene][4]]\n except:\n print(gene_mesclado[gene])\n for dbK in dbKs:\n if not dbK in ks:\n gene_mesclado[gene][4].extend([dbK])\n else:\n gene_mesclado[gene] = ('','','','',dbKs,'')\n return gene_mesclado\n\n\ndef addEggnog(gene_mesclado, eggnog_data):\n for gene, datas in eggnog_data.items():\n if gene in gene_mesclado:\n for data in datas:\n if len(data[0]) > 1:\n gene_mesclado[gene] = list(gene_mesclado[gene])\n gene_mesclado[gene][0] += '|' + data[0]\n if 'GO' in data[1]:\n entryes = [x[1] for x in gene_mesclado[gene][4]]\n for go in data[1].split(\",\"):\n if not go in entryes:\n gene_mesclado[gene][4].append(('GO', go, ''))\n if ':' in data[2]:\n ko = data[2].split(\":\")[1]\n if not ko in entryes:\n gene_mesclado[gene][4].append(('KO', ko, ''))\n if len(data[3]) > 1:\n gene_mesclado[gene][4].append(('KEGG_Pathway', data[3], ''))\n if len(data[4]) > 1:\n gene_mesclado[gene][4].append(('BiGG_Reaction', data[4], ''))\n else:\n dbks = []\n if 'GO' in data[1]:\n dbks = [('GO', x, '') for x in data[1].split(\",\")]\n if len(data[3]) > 1:\n dbks.append(('KEGG_Pathway', data[3], ''))\n if len(data[4]) > 1:\n dbks.append(('BiGG_Reaction', data[4], ''))\n gene_mesclado[gene] = (data[0],'','','',dbks,'')\n return gene_mesclado\n\n\ndef regs2table(data, nrData):\n ret = []\n for gene, records in data.items():\n txt = [x for x in (records[0] + ' ' + records[5]).replace('|', ' ').replace('SubName: Full=', '').replace('Full=', '').split(' ') if not x in [\n 'MISCELLANEOUS:',\n 'uncharacterized',\n 'protein.',\n 'PROTEIN.',\n 'Putative',\n 'PUTATIVE',\n 'PROTEIN-LIKE.',\n 'hypothetical',\n 'Hypothetical',\n 'Uncharacterized'\n ]]\n \n old_name = records[0]\n nrN = ''\n if gene in nrData:\n nm = nrN = (nrData[gene][1] + ' ')\n if len(nm) > 1: \n old_name = nm + ' | ' + old_name\n nome = nrN + '[%s] %s' % (sorted(txt, key=lambda e: (-len(e), txt.count(e)))[0], ' :: '.join(set(old_name.split(\"|\"))))\n \n ks = ','.join(set([x[0]+':'+x[1] for x in records[4] if len(x[1]) > 3])).replace('|', ',').split(\",\")\n gos = ','.join(set([x.replace(' ', '').replace(\"GO:GO:\", 'GO:') for x in ks if x.startswith('GO:') and len(x) > 6]))\n dbs = ','.join(set([x for x in ks if not x.startswith('GO:')]))\n ret.append((\n gene.replace('\\t', ' ').replace(';', ','), \n nome.replace('\\t', ' ').replace(';', ','), \n records[1].replace('\\t', ' ').replace(';', ','), records[2].replace('\\t', ' ').replace(';', ','), \n gos.replace('\\t', ' ').replace(';', ','),\n ','.join(nrData[gene][0] if gene in nrData else []),\n dbs.replace('\\t', ' ').replace(';', ','),\n (records[5] + ' | '.join(set([x[0]+':'+x[2] for x in records[4] if len(x[2]) > 3]))).replace('\\t', ' ').replace(';', ',')\n ))\n for gene, records in nrData.items():\n if not gene in data:\n ret.append((\n gene.replace('\\t', ' ').replace(';', ','), \n records[1], \n '-', \n '-',\n ','.join(nrData[gene][0]),\n dbs.replace('\\t', ' ').replace(';', ','),\n '-'\n )) \n return ret\n\n\n\nprint('[1/7] parseando gff3 ...')\nmrna2gene = {\n x.split(\"ID=\")[1].split(\";\")[0]: x.split(\"Parent=\")[1].split(\";\")[0] for x in\n [m[8] for m in list(csv.reader(open(file_gff), delimiter='\\t')) if len(m) > 2 and m[2] == 'mRNA']\n}\n\nprint('[2/7] parseando diamond NR ...')\ndiamond_NR = parseDiamondNR(file_diamond_NR, mrna2gene, prefs.split(';'))\ngenes_NR = set(diamond_NR)\n\nprint('[3/7] parseando diamond swiss prot ...')\ndiamond_SP = parseDiamond(file_diamond_SP, mrna2gene)\ngenes_SP = set(diamond_SP)\nswps = list(set(','.join([','.join(v) for v in diamond_SP.values()]).split(',')))\nregs = getUniprotReg(swps, limi=UNIPROT_API_PARALLEL)\n\nprint('[4/7] parseando EggNOG ...')\neggnog = parseEggNog(file_eggnog, mrna2gene)\ngenes_EN = set(eggnog)\n\nprint('[5/7] parseando InterproScan ...')\ninterpro = parseInterpro(file_interpro, mrna2gene)\ngenes_IP = set(interpro)\n\nprint('[6/7] mesclando dados ...')\ng_mesc = {k: mesclar([regs[x] for x in v if x in regs]) for k, v in diamond_SP.items()} \ng_mesc_int = addInterpro(g_mesc, interpro)\ng_mesc_int_egg = addEggnog(g_mesc_int, eggnog)\nparsed_mescled_table = regs2table(g_mesc_int_egg, diamond_NR)\n\nprint('[7/7] persistindo ...')\nopen(out_file, 'w').writelines(['\\t'.join(x) + '\\n' for x in parsed_mescled_table])\nopen(out_file + '_agbase.txt', 'w').writelines(['\\n'.join([(x[0] +'\\t'+ y) for y in x[4].split(',')]) + '\\n' for x in parsed_mescled_table if 'GO:' in x[4]])\n\nall_G = genes_NR.union(genes_SP).union(genes_EN).union(genes_IP)\ngenes_N, genes_S, genes_E, genes_I = len(genes_NR), len(genes_SP), len(genes_EN), len(genes_IP)\ntotal = len(all_G)\n\nprint('%d (%.1f%%) (%d so nele) genes anotados no NR' % (genes_N, genes_N/total*100, len(genes_NR.difference((genes_SP).union(genes_EN).union(genes_IP)))))\nprint('%d (%.1f%%) (%d so nele) genes anotados no SWPROT' % (genes_S, genes_S/total*100, len(genes_SP.difference(genes_NR.union(genes_EN).union(genes_IP)))))\nprint('%d (%.1f%%) (%d so nele) genes anotados no Eggnog' % (genes_E, genes_E/total*100, len(genes_EN.difference(genes_NR.union(genes_SP).union(genes_IP)))))\nprint('%d (%.1f%%) (%d so nele) genes anotados no InterproScan' % (genes_I, genes_I/total*100, len(genes_IP.difference(genes_NR.union(genes_SP).union(genes_EN)))))\nprint('TOTAL: %d genes anotados' % total)\n\nprint('\\nall finished. sotored at: ' + out_file)\n\n","repo_name":"MiqueiasFernandes/bioinformatics","sub_path":"annotGenes.py","file_name":"annotGenes.py","file_ext":"py","file_size_in_byte":13421,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"18554747405","text":"import math\n\ndef isLeapYear(year):\n if year > 1917:\n return True if year%400==0 or (year%4==0 and year%100!=0) else False\n else:\n return True if year%4==0 else False\n\n#print (isLeapYear(2008))\n\n# day = 256 - X\n# month = 256 % day\ndaysWithoutFebruary = 31+31+30+31+30+31+31\nyear = int(input())\n\n\nfeb = 28\nif year==1918:\n feb = 28-14\nif isLeapYear(year):\n feb = 29\n\ndays = daysWithoutFebruary+feb\nday = 256-days\nmonth = math.ceil(days/30)\n\nprint ('{:02d}.{:02d}.{}'.format(day,month,year))\n","repo_name":"gakonst/Challenges","sub_path":"Hackerrank/WeekOfCode29/dayOfTheProgrammer.py","file_name":"dayOfTheProgrammer.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4382205208","text":"import hashlib\nimport socket\n\n\ndef server_config(a=0):\n ip1 = socket.gethostbyname(socket.gethostname())\n ip2 = ip1\n host = str(ip2) # Server configurations\n chat_port = 3000\n video_port = 4000\n audio_port = 5000\n TCP_Port = 9001\n if a == 1:\n return host\n elif a == 2:\n return video_port\n elif a == 3:\n return audio_port\n elif a == 4:\n return chat_port\n elif a == 5:\n return TCP_Port\n elif a == 6:\n return ip1\n\n\ndef split(a, b=1):\n sep = ' '\n if b == 1: # code to split username\n uname = a.split(sep, 1)[0]\n return uname.lower()\n elif b == 2:\n addr1 = format(a) # code to split ip address\n addr2 = addr1.split(sep, 1)[0]\n addr2 = addr2.replace('(', '')\n addr2 = addr2.replace(',', '')\n return addr2\n\n\ndef encrypt_code(p):\n m = hashlib.md5()\n m.update(p.encode('utf-8'))\n p2 = m.hexdigest()\n return p2\n","repo_name":"silimkhanpr/Ghannas","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3586699582","text":"def getcopieractiondescription(copyid, copieritem):\n\n\tdata = copieritem.getcopieractioninstruction(copyid)\n\n\tif data['action'] == \"Scrape TV Shows\":\n\t\toutcome = \"Request \" + copyid + \" sent to copier to scrape TV Show folders\"\n\n\telif data['action'] == \"Copy File\":\n\t\tspace = \"      \"\n\t\tarrows = space + space + \"↓\"\n\t\tindent = space + space + space + space + space\n\t\tlineofarrows = arrows + arrows + arrows + arrows + arrows\n\t\toutcome = \"Request \" + copyid + \" sent to copier to Copy File:
\" + indent + data['source'] + \"
\"\n\t\toutcome = outcome + indent + lineofarrows + \"
\" + indent + data['target']\n\n\telse:\n\t\toutcome = \"Unknown Action Type\"\n\n\treturn outcome\n\n","repo_name":"johnpcole/Download-Manager","sub_path":"codebase/manager_component/copiertracker_subcomponent/copiertracker_privatefunctions.py","file_name":"copiertracker_privatefunctions.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70889338755","text":"from pybrain3 import FeedForwardNetwork, LinearLayer, SigmoidLayer, BiasUnit, FullConnection, RecurrentNetwork, \\\r\n TanhLayer\r\n\r\n#network = RecurrentNetwork() # nowa siec\r\nnetwork=FeedForwardNetwork();\r\n\r\ninLayer = LinearLayer(35) # warstwa wejsciowa\r\n#hiddenLayer = SigmoidLayer(5) # tworze ukryta warstwe\r\nhiddenLayer=TanhLayer(5);\r\noutLayer = LinearLayer(1) # tworze warstwe wyjsciowa\r\nbias = BiasUnit() # inicjalizuje Bias\r\n\r\nnetwork.addInputModule(inLayer)\r\nnetwork.addModule(bias)\r\nnetwork.addModule(hiddenLayer)\r\nnetwork.addOutputModule(outLayer)\r\n\r\nbias_to_hidden = FullConnection(bias, hiddenLayer) # lacze warstwy\r\nin_to_hidden = FullConnection(inLayer, hiddenLayer)\r\nhidden_to_out = FullConnection(hiddenLayer, outLayer)\r\n\r\nnetwork.addConnection(bias_to_hidden) # dodaje polaczenie do sieci\r\nnetwork.addConnection(in_to_hidden)\r\nnetwork.addConnection(hidden_to_out)\r\n\r\nnetwork.sortModules()\r\n","repo_name":"Greg96/Grzegorz_Planeta_PSI_CP_GR3","sub_path":"Scenariusz_2/siec.py","file_name":"siec.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1169715752","text":"from turtle import Screen, Turtle\nimport time\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"black\")\nscreen.title(\"My Snake Game\")\nsnake_colours=[\"red\", \"green\", \"blue\"]\nx_start_position = [0,20,40]\ny_start_position = []\nturtle_list=[]\npen_y_position=250\n\ndef pen_write(text,pos):\n penTurtle = Turtle()\n penTurtle.pencolor(\"white\")\n penTurtle.penup()\n penTurtle.goto(pos)\n penTurtle.pendown()\n penTurtle.write(text)\n\n\n\nfor i in range(0,3):\n turtle = Turtle(shape=\"square\")\n turtle.shapesize(1,1,1)\n turtle.color(snake_colours[i])\n # turtle.write(i)\n turtle.penup()\n turtle.goto(x=x_start_position[i], y=0)\n print(turtle.position()[0])\n turtle_list.append(turtle)\n if i ==2:\n pen_write(\"starting\", turtle_list[2].pos())\n\nscreen.update()\n\nprint(turtle_list)\n\n\ndef forward():\n for j in range(3):\n # turtle_list[j].pendown()\n turtle_list[j].forward(5)\n screen.update()\n # pen_write(\"forward\")\n\n\n\ndef backward():\n for j in range(3):\n # turtle_list[j].pendown()\n turtle_list[j].backward(50)\n screen.update()\n\n\ndef left():\n time.sleep(0.1)\n turtle_list[2].left(90)\n pen_write(\"left\", turtle_list[2].pos())\n turtle_list[2].forward(40)\n turtle_list[1].forward(20)\n turtle_list[1].left(90)\n turtle_list[1].forward(20)\n turtle_list[0].forward(40)\n turtle_list[0].left(90)\n screen.update()\n\n\n\ndef right():\n time.sleep(0.1)\n turtle_list[2].right(90)\n pen_write(\"right\", turtle_list[2].pos())\n turtle_list[2].forward(40)\n turtle_list[1].forward(20)\n turtle_list[1].right(90)\n turtle_list[1].forward(20)\n turtle_list[0].forward(40)\n turtle_list[0].right(90)\n screen.update()\n\n\n\n\n# screen.update()\n# screen.onkey(forward,\"w\")\n# screen.onkey(backward,\"s\")\n# screen.onkey(left,\"a\")\n# screen.onkey(right,\"d\")\n\nscreen.listen()\n\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n forward()\n screen.onkey(backward,\"s\")\n screen.onkey(left,\"a\")\n screen.onkey(right,\"d\")\n\n\n\n\nscreen.exitonclick()\nscreen.mainloop()","repo_name":"chenricky/snakeGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2040718918","text":"#\n# @lc app=leetcode id=621 lang=python\n#\n# [621] Task Scheduler\n#\n\nfrom heapq import heappop, heappush\n# @lc code=start\nclass Solution(object):\n def leastInterval(self, tasks, n):\n \"\"\"\n :type tasks: List[str]\n :type n: int\n :rtype: int\n \"\"\"\n\n counter = {}\n for task in tasks:\n counter[task] = counter.get(task, 0) + 1\n\n heap = []\n for task, count in counter.items():\n heappush(heap, (-count, -1, task))\n\n q = []\n curr = 0\n while heap or q:\n \n if q:\n count, i, task = q[0]\n if curr - i > n or i == -1:\n heappush(heap, q.pop(0))\n\n if heap:\n count, i, task = heap[0]\n if curr - i > n or i == -1:\n heappop(heap)\n if count + 1 != 0:\n q.append((count+1, curr, task))\n\n curr += 1\n\n return curr\n\n\n\n \n# @lc code=end\nprint(Solution().leastInterval([\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"], 2))\n# print(Solution().leastInterval([\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"], 0))\n# print(Solution().leastInterval([\"A\",\"A\",\"A\",\"A\",\"A\",\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\"], 2))\n","repo_name":"aryanjain28/DSA","sub_path":"revision_150/621.task-scheduler.py","file_name":"621.task-scheduler.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33465210274","text":"from __future__ import absolute_import, unicode_literals\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r'^oneyouvariant/add/(\\w+)/(\\w+)/(\\d+)/$',\n views.create,\n name='experiments_oneyouvariant_add'\n ),\n url(\n r'^oneyouvariant/(\\d+)/edit/$',\n views.edit,\n name='experiments_oneyouvariant_edit'\n ),\n]\n","repo_name":"danmorley/nhs-example","sub_path":"oneYou2/experiments/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2558616544","text":"\nfrom unittest import TestCase\n\nfrom lepl.lexer.matchers import Token\nfrom lepl.matchers.derived import Word, Letter\nfrom lepl.offside.lexer import Indent, Eol\n\n\n# pylint: disable-msg=R0201\n# unittest convention\nclass IndentTest(TestCase):\n '''\n Test the `Indent` token.\n '''\n \n def test_indent(self):\n '''\n Test simple matches against leading spaces.\n '''\n #basicConfig(level=DEBUG)\n text = '''\nleft\n four'''\n word = Token(Word(Letter()))\n indent = Indent()\n line1 = indent('') + Eol()\n line2 = indent('') & word('left') + Eol()\n line3 = indent(' ') & word('four') + Eol()\n expr = (line1 & line2 & line3)\n expr.config.default_line_aware()\n parser = expr.get_parse_string()\n result = parser(text)\n assert result == ['', '', 'left', ' ', 'four'], result\n \n\nclass TabTest(TestCase):\n '''\n Check that tabs are expanded.\n '''\n \n def test_indent(self):\n '''\n Test simple matches against leading spaces.\n '''\n #basicConfig(level=DEBUG)\n text = '''\n onespace\n \\tspaceandtab'''\n word = Token(Word(Letter()))\n indent = Indent()\n line1 = indent('') & ~Eol()\n line2 = indent(' ') & word('onespace') & ~Eol()\n line3 = indent(' ') & word('spaceandtab') & ~Eol()\n expr = line1 & line2 & line3\n expr.config.default_line_aware(tabsize=4).trace(True)\n parser = expr.get_parse_string()\n result = parser(text)\n #print(result)\n assert result == ['', ' ', 'onespace', ' ', 'spaceandtab'], result\n \n \n","repo_name":"willtang/lyx2ebook","sub_path":"src/lepl/offside/_test/indentation.py","file_name":"indentation.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"71334798274","text":"from chat_messages.models import ChatParticipant\nfrom django.contrib.auth import get_user_model\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\n\nfrom agent.users.constants import SYSTEM_USER_ID\n\nfrom .models import Chat, ChatMessage\n\n\ndef get_system_user():\n User = get_user_model()\n system_user = User.objects.get(pk=SYSTEM_USER_ID)\n return system_user\n\n\ndef add_participant_to_chat(chat, user):\n participant = ChatParticipant(chat=chat, user=user)\n participant.save()\n return participant\n\n\ndef add_ai_bot_to_chat(chat):\n system_user = get_system_user()\n\n participant = ChatParticipant(chat=chat, user=system_user)\n participant.save()\n return participant\n\n\ndef create_chat(name, user):\n chat = Chat.objects.create(name=name)\n add_participant_to_chat(chat, user)\n add_ai_bot_to_chat(chat)\n return chat\n\n\ndef get_my_chats(user):\n if user.is_anonymous:\n raise Exception(\"Authentication required\")\n\n chats = Chat.objects.filter(participants__user=user).distinct()\n return chats\n\n\ndef write_message(id, user, chat_id, content):\n if user.is_anonymous:\n raise Exception(\"Authentication required\")\n\n chat = Chat.objects.get(id=chat_id)\n chat_participant = ChatParticipant.objects.get(chat_id=chat_id, user_id=user.id)\n print(chat_participant)\n if not chat_participant:\n raise Exception(\"Not authorized to send messages in this chat\")\n\n chat_message = ChatMessage(id=id, chat=chat, sender_user=user, content=content)\n chat_message.save()\n return chat_message\n\n\ndef apply_pagination(queryset, kwargs):\n paginator = Paginator(queryset, kwargs.get(\"first\", 10))\n page = kwargs.get(\"page\", 1)\n\n try:\n items = paginator.page(page)\n except PageNotAnInteger:\n items = paginator.page(1)\n except EmptyPage:\n items = paginator.page(paginator.num_pages)\n\n return items\n","repo_name":"kokokenada/agent","sub_path":"packages/server/agent/chat_messages/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14324001353","text":"import logging\nfrom typing import FrozenSet, Type\n\nfrom pymmortals.datatypes.interfaces import ValidatorInterface\nfrom pymmortals.datatypes.intermediary.deploymentmodelproperty import \\\n DeploymentModelProperty\nfrom pymmortals.datatypes.intermediary.deploymentmodelresource import \\\n DeploymentModelResource\nfrom pymmortals.datatypes.serializable import Serializable, ValuedEnum\nfrom pymmortals.generated.mil.darpa.immortals.core.api.validation.validators import Validators\nfrom pymmortals.generated.mil.darpa.immortals.core.api.validation.validatortype import ValidatorType\nfrom pymmortals.utils import load_class_by_classpath\n\n_logger = logging.getLogger('validation')\n\n\ndef load_python_validator_class(validator: Validators) -> Type[ValidatorInterface]:\n if validator.validatorType == ValidatorType.PYTHON:\n clazz = load_class_by_classpath(validator.validatorClasspath)\n assert (issubclass(clazz, ValidatorInterface))\n return clazz\n\n elif validator.validatorType == ValidatorType.JAVA:\n raise Exception('Java validators not yet supported!: Validator: \"' + validator.validatorType + '\"!')\n\n else:\n raise Exception('Unexpected validator type \"' + validator.validatorType + '\"!')\n\n\n# noinspection PyPep8Naming\nclass Coordinates(Serializable):\n @classmethod\n def _validator_values(cls):\n return dict()\n\n def __init__(self,\n latitude: float,\n longitude: float,\n altitude: float,\n accuracy: float,\n acquisitionTime: float,\n hasAltitude: bool,\n hasAccuracy: bool,\n provider: str):\n super().__init__()\n self.latitude = latitude\n self.longitude = longitude\n self.altitude = altitude\n self.accuracy = accuracy\n self.acquisitionTime = acquisitionTime\n self.hasAltitude = hasAltitude\n self.hasAccuracy = hasAccuracy\n self.provider = provider\n\n\n# TODO: Change to enum!\n\nclass AnalyticsEventType(ValuedEnum):\n MY_IMAGE_SENT = 'MyImageSent',\n FIELD_IMAGE_RECEIVED = 'FieldImageReceived',\n MY_LOCATION_PRODUCED = 'MyLocationProduced',\n FIELD_LOCATION_UPDATED = 'FieldLocationUpdated',\n CLIENT_START = 'ClientStart',\n CLIENT_SHUTDOWN = 'ClientShutdown',\n DFU_MISSMATCH_ERROR = 'DfuMissmatchError',\n UNEXEPCTED_IGNORABLE_ERROR = 'UnexepctedIgnorableError',\n TOOLING_VALIDATION_SERVER_STARTED = 'Tooling_ValidationServerStarted',\n TOOLING_VALIDATION_SERVER_STOPPED = 'Tooling_ValidationServerStopped',\n TOOLING_VALIDATION_SERVER_CLIENT_CONNECTED = 'Tooling_ValidationServerClientConnected',\n TOOLING_VALIDATION_SERVER_CLIENT_DISCONNECTED = 'Tooling_ValidationServerClientDisconnected',\n TOOLING_VALIDATION_FINISHED = 'Tooling_ValidationFinished',\n ANALYSIS_EVENT_OCCURRED = 'Analysis_EventOccurred'\n\n @property\n def tag(self) -> str:\n return self._value_\n\n @classmethod\n def all_tags(cls) -> FrozenSet[str]:\n return cls._values()\n\n\n\"\"\"\nThis indicates the tests that must have a specified state in order to pass an intent check.\n\"\"\"\nintent_satisfaction_tests = {\n # The baseline set of tests that must pass in order to pass the basic mission intent of sending LatestSA\n 'baseline': frozenset([\n Validators.CLIENT_LOCATION_PRODUCE,\n Validators.CLIENT_LOCATION_SHARE,\n Validators.CLIENT_IMAGE_PRODUCE,\n Validators.CLIENT_IMAGE_SHARE,\n Validators.BANDWIDTH_MAXIMUM_VALIDATOR\n ]),\n # Additional properties that can be specified to add new mission requirements to the scenario\n DeploymentModelProperty.trustedLocations: frozenset([\n Validators.CLIENT_LOCATION_TRUSTED\n ])\n}\n\n# Lists the mandatory requirements to pass each resource usage validator\nresource_test_dependencies = {\n Validators.CLIENT_LOCATION_SOURCE_TRUSTED: frozenset(\n [DeploymentModelResource.gpsSatellites, DeploymentModelResource.usb]),\n Validators.CLIENT_LOCATION_SOURCE_USB: frozenset(\n [DeploymentModelResource.gpsSatellites, DeploymentModelResource.usb]),\n Validators.CLIENT_LOCATION_SOURCE_BLUETOOTH: frozenset(\n [DeploymentModelResource.gpsSatellites, DeploymentModelResource.bluetooth]),\n Validators.CLIENT_LOCATION_SOURCE_ANDROIDGPS: frozenset(\n [DeploymentModelResource.gpsSatellites, DeploymentModelResource.internalGps]),\n Validators.CLIENT_LOCATION_SOURCE_MANUAL: frozenset([DeploymentModelResource.userInterface])\n}\n\n\"\"\"\nTest sets in which if one passes, the others must fail\n\"\"\"\nmutually_exclusive_validator_sets = {\n 'client-location-source': frozenset([\n Validators.CLIENT_LOCATION_SOURCE_TRUSTED,\n Validators.CLIENT_LOCATION_SOURCE_USB,\n Validators.CLIENT_LOCATION_SOURCE_BLUETOOTH,\n Validators.CLIENT_LOCATION_SOURCE_ANDROIDGPS,\n Validators.CLIENT_LOCATION_SOURCE_MANUAL\n ])\n}\n","repo_name":"darpa-brass/bbn-immortals","sub_path":"phase02/immortals_repo/harness/pymmortals/datatypes/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12814975984","text":"import platform\nimport sys\n\nImport('env')\n\nif sys.platform == 'win32':\n # HACK: This works around a bug in SCons.\n # subprocess.check_output will complain unless all environment\n # variables are strings.\n system_root = env['ENV']['SystemRoot']\n env['ENV']['SystemRoot'] = str(system_root)\n\n\n# libs needed for embedding\nELIBS=env['LIBS'] + env['PYTHONLIBS']\n\ndef BPLTest(env, name, sources = None, deps = None):\n run = env.BoostRunPythonScript(name + '.py')\n if sources:\n for source in sources:\n Depends(run,\n env.PythonExtension(source != name and source or (source + '_ext'), source + '.cpp')\n )\n else:\n Depends(run, env.PythonExtension(name + '_ext', name + '.cpp'))\n if deps:\n Depends(run, deps)\n return run\n \nenv.AddMethod(BPLTest)\n\nenv.AppendENVPath('PYTHONPATH', Dir('.').path)\n\ntests=[]\ntests+=env.BPLTest('crossmod_exception', ['crossmod_exception_a', 'crossmod_exception_b'])\n\nfor test in [('injected',),\n ('properties',),\n ('return_arg',),\n ('staticmethod',),\n ('boost_shared_ptr',),\n ('enable_shared_from_this',),\n ('andreas_beyer',),\n ('polymorphism',),\n ('polymorphism2',),\n ('wrapper_held_type',),\n ('minimal',),\n ('args',),\n ('raw_ctor',),\n ('exception_translator',),\n ('test_enum', ['enum_ext']),\n ('test_cltree', ['cltree']),\n ('newtest', ['m1', 'm2']),\n ('const_argument',),\n ('keywords_test', ['keywords']),\n ('test_pointer_adoption',),\n ('operators',),\n ('operators_wrapper',),\n ('callbacks',),\n ('defaults',),\n ('object',),\n ('list',),\n ('long',),\n ('dict',),\n ('tuple',),\n ('str',),\n ('slice',),\n ('virtual_functions',),\n ('back_reference',),\n ('implicit',),\n ('data_members',),\n ('ben_scott1',),\n ('bienstman1',),\n ('bienstman2',),\n ('bienstman3',),\n ('multi_arg_constructor',),\n ('iterator', ['iterator', 'input_iterator']),\n ('stl_iterator',),\n ('extract',),\n ('crossmod_opaque', ['crossmod_opaque_a', 'crossmod_opaque_b']),\n ('opaque',),\n# ('voidptr',),\n ('pickle1',),\n ('pickle2',),\n ('pickle3',),\n ('pickle4',),\n ('nested',),\n ('docstring',),\n ('pytype_function',),\n ('vector_indexing_suite',),\n ('pointer_vector',)]:\n tests+=env.BPLTest(*test)\n\nif env['CXX11']:\n for test in [\n ('shared_ptr',),\n ]:\n tests+=env.BPLTest(*test)\nelse:\n for test in [\n ('polymorphism2_auto_ptr',),\n ('auto_ptr',),\n ]:\n tests+=env.BPLTest(*test)\n\n\ntest = env.BoostRunPythonScript('test_builtin_converters.py')\nDepends(\n test,\n env.PythonExtension('builtin_converters_ext', ['builtin_converters.cpp'])\n )\ntests+=test\ntest = env.BoostRunPythonScript('map_indexing_suite.py')\nDepends(\n test,\n env.PythonExtension('map_indexing_suite_ext', [\n 'map_indexing_suite.cpp',\n 'int_map_indexing_suite.cpp',\n 'a_map_indexing_suite.cpp'])\n )\ntests+=test\n\ntests+=env.BoostRunTest('import_', 'import_.cpp', '${SOURCES[0]} ${SOURCES[1]}', 'import_.py', LIBS=ELIBS)\n\ntests+=env.BoostCompileTest('indirect_traits_test')\ntests+=env.BoostRunTests(['destroy_test',\n 'pointer_type_id_test',\n 'bases',\n 'if_else',\n 'pointee',\n 'result'], LIBS=ELIBS)\n\ntests+=env.BoostCompileTests(['string_literal',\n 'borrowed',\n 'object_manager',\n 'copy_ctor_mutates_rhs'])\n\ntests+=env.BoostRunTest('upcast', LIBS=ELIBS)\ntests+=env.BoostCompileTest('select_holder')\ntests+=env.BoostRunTest('select_from_python_test', LIBS=ELIBS)\ntests+=env.BoostCompileTest('select_arg_to_python_test')\n\nif platform.system() == 'Windows':\n tests+=env.BPLTest('calling_conventions')\n tests+=env.BPLTest('calling_conventions_mf')\n\nif env['NUMPY']:\n numpy_env = env.Clone()\n numpy_env.BoostUseLib('numpy')\n for test in [('numpy/dtype',),\n ('numpy/ufunc',),\n ('numpy/templates',),\n ('numpy/ndarray',),\n ('numpy/indexing',),\n ('numpy/shapes',),]:\n tests+=numpy_env.BPLTest(*test)\n\n\nenv.BoostTestSummary(tests)\nAlwaysBuild(tests)\n","repo_name":"LiquidPlayer/LiquidCore","sub_path":"deps/boost_1_66_0/libs/python/test/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":989,"dataset":"github-code","pt":"61"} +{"seq_id":"29846057372","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom datasets import load_dataset\n\nbooksum = load_dataset(\"kmfoda/booksum\")\n\n\n# In[ ]:\n\n\nfrom transformers import AutoTokenizer\n\ntokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-neo-1.3B\")\n\n\n# In[ ]:\n\n\nprefix = \"summarize: \"\n\n\n# In[ ]:\n\n\ndef preprocess_function(examples):\n inputs = [prefix + doc for doc in examples[\"chapter\"]]\n model_inputs = tokenizer(inputs, max_length=1024, truncation=True)\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(examples[\"summary_text\"], max_length=128, truncation=True)\n \n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n\n# In[ ]:\n\n\ntokenized_booksum = booksum.map(preprocess_function, batched=True)\n\n\n\n# In[ ]:\n\n\nfrom transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer\n\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"EleutherAI/gpt-neo-1.3B\")\n\n\n# In[ ]:\n\n\nfrom transformers import DataCollatorForSeq2Seq\n\ndata_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model)\n\n\n# In[ ]:\n\n\ntraining_args = Seq2SeqTrainingArguments(\n output_dir=\"./results_finetune_GTP-Neo-Booksum\",\n evaluation_strategy=\"epoch\",\n learning_rate=2e-5,\n per_device_train_batch_size=16,\n per_device_eval_batch_size=16,\n weight_decay=0.01,\n save_total_limit=3,\n num_train_epochs=1,\n fp16=True,\n)\n\ntrainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_booksum[\"train\"],\n eval_dataset=tokenized_booksum[\"validation\"],\n tokenizer=tokenizer,\n data_collator=data_collator,\n)\n\ntrainer.train()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"gergoe-szabo/Summarization-with-fiction-Data---SUMWISE","sub_path":"Finetune/FT_GTPNeo_SUM.py","file_name":"FT_GTPNeo_SUM.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18831534825","text":"#CIRCULAR LINKED LIST\r\n\r\nclass Node:\r\n def __init__(self,data):\r\n self.data = data\r\n self.next = None\r\n\r\nclass CList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def printList(self):\r\n if self.head is None:\r\n print(\"No Nodes\")\r\n return\r\n ptr = self.head\r\n count = 0\r\n while(True):\r\n print(ptr.data,end=\"\\t\")\r\n count += 1\r\n if (ptr.next == self.head):\r\n break\r\n ptr = ptr.next\r\n print(\"\\nNo. of Nodes :\",count)\r\n\r\n def append(self,data):\r\n new = Node(data)\r\n if self.head is None:\r\n self.head = new\r\n new.next = self.head\r\n return\r\n ptr = self.head\r\n while(ptr.next!=self.head):\r\n ptr = ptr.next\r\n ptr.next = new\r\n new.next = self.head\r\n\r\n def prepend(self,data):\r\n new = Node(data)\r\n if self.head is None:\r\n new.next = new\r\n self.head = new\r\n return\r\n ptr = self.head\r\n while(ptr.next != self.head):\r\n ptr = ptr.next\r\n new.next = self.head\r\n ptr.next = new\r\n self.head = new\r\n\r\n def add_at_ind(self,data,i):\r\n new = Node(data)\r\n if self.head is None:\r\n new.next = new\r\n self.head = new\r\n return\r\n ptr = self.head\r\n j = 0\r\n while j None:\n cache.clear()\n\n def test_pages_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n\n for template, reverse_name in self.templates_pages_names:\n with self.subTest(reverse_name=reverse_name):\n response = self.authorized_client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_page_show_correct_context_authorized(self):\n \"\"\"Шаблон index сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(reverse(self.index))\n\n page_context = response.context['page_obj'][0]\n\n self.assertEqual(page_context, self.post)\n\n def test_group_list_page_show_correct_context_authorized(self):\n \"\"\"Шаблон group_list сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse(self.group_list, kwargs={'slug': self.group.slug}))\n\n page_context = response.context['page_obj'][0]\n group_context = response.context['group']\n\n self.assertEqual(page_context.author, self.post.author)\n self.assertEqual(page_context.group, self.post.group)\n self.assertEqual(group_context, self.group)\n\n def test_profile_page_show_correct_context(self):\n \"\"\"Шаблон profile сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse(self.profile, args=[self.user]))\n\n page_context = response.context['page_obj'][0]\n author_context = response.context['author']\n\n self.assertEqual(page_context.author, self.post.author)\n self.assertEqual(page_context, self.post)\n self.assertEqual(author_context, self.user)\n\n def test_post_detail_page_show_correct_context_authorized(self):\n \"\"\"Шаблон post_detail сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse(self.post_detail, kwargs={'post_id': self.post.pk}))\n\n post_context = response.context['post']\n\n self.assertEqual(post_context, self.post)\n\n def test_post_create_page_show_correct_context(self):\n \"\"\"Шаблон post_create сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(reverse(self.post_create))\n\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n 'image': forms.ImageField\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n def test_post_edit_page_show_correct_context(self):\n \"\"\"Шаблон post_edit сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse(self.post_edit, kwargs={'post_id': self.post.pk}))\n\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n 'image': forms.ImageField\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n self.assertTrue(response.context.get('is_edit'))\n\n def test_post_is_not_in_another_group(self):\n \"\"\"Созданный пост не попал в группу\n Для которой не был предназначен.\"\"\"\n Group.objects.create(\n title='Another_group',\n slug='test-another-slug',\n )\n response = self.authorized_client.get(\n reverse(self.group_list, args=['test-another-slug']))\n\n self.assertNotIn(self.post, response.context['page_obj'])\n\n def test_post_added_correctly_user2(self):\n \"\"\"Пост при создании не добавляется другому пользователю.\"\"\"\n response_profile = self.authorized_client.get(\n reverse(self.profile,\n kwargs={'username': f'{self.user.username}'}))\n\n group2 = Group.objects.create(title='Тестовая группа 2',\n slug='test_group2')\n post = Post.objects.create(\n text='Тестовый пост от другого автора',\n author=self.user2,\n group=group2)\n profile = response_profile.context['page_obj']\n\n self.assertNotIn(post, profile,\n 'поста нет в группе др��гого пользователя')\n\n def test_post_added_correctly(self):\n \"\"\"\"Пост виден на главной странице и странице группы.\"\"\"\n\n response_index = self.authorized_client.get(\n reverse(self.index))\n response_group = self.authorized_client.get(\n reverse(self.group_list,\n kwargs={'slug': f'{self.group.slug}'}))\n\n index_page = response_index.context['page_obj']\n group_page = response_group.context['page_obj']\n\n self.assertIn(self.post, index_page, 'поста нет на главной')\n self.assertIn(self.post, group_page, 'поста нет в группе')\n\n def image_in_context(self):\n url_names = (self.index, self.profile, self.group_list,\n self.post_detail)\n for url in url_names:\n with self.subTest(value=url):\n response = self.authorized_client.get(url)\n self.assertEqual(response.context.get('post').image,\n self.post.image)\n\n def test_cache_context(self):\n '''Проверка кэширования страницы index'''\n before_creating_post = self.authorized_client.get(\n reverse('posts:index'))\n first_item = before_creating_post.content\n Post.objects.create(\n author=self.user,\n text='Проверка кэша',\n group=self.group)\n after_creating_post = self.authorized_client.get(\n reverse('posts:index'))\n item_after = after_creating_post.content\n self.assertEqual(item_after, first_item)\n cache.clear()\n after_clear = self.authorized_client.get(reverse('posts:index'))\n self.assertNotEqual(item_after, after_clear)\n\n\nclass PaginatorViewTest(TestCase):\n\n def setUp(self):\n self.user = User.objects.create_user(username='auth')\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n self.group = Group.objects.create(\n title='Тестовая Группа',\n slug='Test_slug',\n description='тестовое описание группы'\n )\n test_nout_more = 3\n test_nout = settings.POSTS_PER_PAGE + test_nout_more\n\n self.posts = Post.objects.bulk_create(\n [\n Post(\n text=f'Тестовые посты номер {number}',\n author=self.user,\n group=self.group\n )\n for number in range(test_nout)\n ]\n )\n\n def tearDown(self) -> None:\n cache.clear()\n\n def test_second_page_contains_three_records(self):\n \"\"\"На второй странице должно быть три поста.\"\"\"\n\n response = self.client.get(reverse('posts:index') + '?page=2')\n response = self.client.get(\n reverse('posts:group_list',\n kwargs={'slug': self.group.slug}) + '?page=2')\n response = self.client.get(\n reverse('posts:profile', args=[self.user]) + '?page=2')\n\n self.assertEqual(len(response.context['page_obj']), 3)\n\n def test_first_page_contains(self):\n \"\"\"Тест Пагинатора для Первой страницы.\"\"\"\n url_names = {\n reverse('posts:index'): settings.POSTS_PER_PAGE,\n reverse('posts:group_list', kwargs={'slug': self.group.slug}):\n settings.POSTS_PER_PAGE,\n reverse('posts:profile', args=[self.user]):\n settings.POSTS_PER_PAGE,\n }\n\n for value, expected in url_names.items():\n with self.subTest(value=value):\n response = self.client.get(value + '?page=1')\n self.assertEqual(len(response.context['page_obj']), expected)\n\n\nclass CommentTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='auth1')\n cls.not_author = User.objects.create_user(\n username='not_author')\n cls.group = Group.objects.create(title='Тестовая группа',\n slug='test_group')\n cls.post = Post.objects.create(text='Тестовый текст',\n group=cls.group,\n author=cls.user)\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n cache.clear()\n\n def tearDown(self) -> None:\n cache.clear()\n\n def test_comment_authorized_client(self):\n \"\"\"Коментировать может только авторизованный пользователь.\"\"\"\n self.comment = Comment.objects.create(post_id=self.post.id,\n author=self.user,\n text='Тестовый коммент')\n response = self.authorized_client.get(\n reverse('posts:post_detail', kwargs={'post_id': self.post.id}))\n comments = {response.context['comments'][0].text: 'Тестовый коммент',\n response.context['comments'][0].author: self.user.username\n }\n for value, expected in comments.items():\n self.assertEqual(comments[value], expected)\n self.assertTrue(response.context['form'], 'форма получена')\n\n def test_comment_added_correctly(self):\n \"\"\"\"Комментарий появляется на странице поста.\"\"\"\n self.comment = Comment.objects.create(post_id=self.post.id,\n author=self.user,\n text='Тестовый коммент')\n\n response = self.authorized_client.get(\n reverse('posts:post_detail', kwargs={'post_id': self.post.id}))\n\n comment = response.context['comments']\n\n self.assertIn(self.comment, comment, 'коммента нет на странице поста')\n\n\nclass FollowTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.post_autor = User.objects.create(\n username='post_autor',\n )\n cls.post_follower = User.objects.create(\n username='post_follower',\n )\n cls.post = Post.objects.create(\n text='Подпишись на меня',\n author=cls.post_autor,\n )\n\n def setUp(self):\n cache.clear()\n self.author_client = Client()\n self.author_client.force_login(self.post_follower)\n self.follower_client = Client()\n self.follower_client.force_login(self.post_autor)\n\n def tearDown(self) -> None:\n cache.clear()\n\n def test_follow_on_user(self):\n \"\"\"Проверка подписки на пользователя.\"\"\"\n count_follow = Follow.objects.count()\n\n self.follower_client.post(\n reverse('posts:profile_follow',\n kwargs={'username': self.post_follower}))\n\n follow = Follow.objects.all().latest('id')\n\n self.assertEqual(Follow.objects.count(), count_follow + 1)\n self.assertTrue(\n Follow.objects.filter(\n author=follow.author_id,\n user=follow.user_id).exists()\n )\n\n def test_unfollow_on_user(self):\n \"\"\"Проверка отписки от пользователя.\"\"\"\n Follow.objects.create(\n user=self.post_autor,\n author=self.post_follower)\n count_follow = Follow.objects.count()\n\n self.follower_client.post(\n reverse('posts:profile_unfollow',\n kwargs={'username': self.post_follower}))\n\n self.assertEqual(Follow.objects.count(), count_follow - 1)\n self.assertFalse(Follow.objects.all().exists())\n\n def test_follow_on_authors(self):\n \"\"\"Проверка записей у тех кто подписан.\"\"\"\n post = Post.objects.create(\n author=self.post_autor,\n text=\"Подпишись на меня\")\n Follow.objects.create(\n user=self.post_follower,\n author=self.post_autor)\n response = self.author_client.get(\n reverse('posts:follow_index'))\n self.assertIn(post, response.context['page_obj'].object_list)\n\n def test_notfollow_on_authors(self):\n \"\"\"Проверка записей у тех кто не подписан.\"\"\"\n post = Post.objects.create(\n author=self.post_autor,\n text=\"Подпишись на меня\")\n response = self.author_client.get(\n reverse('posts:follow_index'))\n self.assertNotIn(post, response.context['page_obj'].object_list)\n","repo_name":"madina-zvezda/hw05_final","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":16780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3337280088","text":"def get_disc_loss(real_X, fake_X, disc_X, adv_criterion):\n '''\n Return the loss of the discriminator given inputs.\n Parameters:\n real_X: the real images from pile X\n fake_X: the generated images of class X\n disc_X: the discriminator for class X; takes images and returns real/fake class X\n prediction matrices\n adv_criterion: the adversarial loss function; takes the discriminator \n predictions and the target labels and returns a adversarial \n loss (which you aim to minimize)\n '''\n disc_fake_X_pred = disc_X(fake_X.detach()) # Detach generator\n disc_fake_X_loss = adv_criterion(disc_fake_X_pred, torch.zeros_like(disc_fake_X_pred))\n disc_real_X_pred = disc_X(real_X)\n disc_real_X_loss = adv_criterion(disc_real_X_pred, torch.ones_like(disc_real_X_pred))\n disc_loss = (disc_fake_X_loss + disc_real_X_loss) / 2\n return disc_loss\n","repo_name":"ungarl/course-content","sub_path":"tutorials/W8_AutoEncoders_GANs/solutions/W8_Tutorial2_Solution_Ex61.py","file_name":"W8_Tutorial2_Solution_Ex61.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"429949739","text":"from collections import deque\nfrom sys import stdin\ninput = stdin.readline\n\ndef bfs(graph, visited, s_x, s_y, e_x, e_y, I):\n # 나이트 이동 방향\n dx = [2, 1, -1, -2, -2, -1, 1, 2]\n dy = [1, 2, 2, 1, -1, -2, -2, -1]\n\n queue = deque()\n visited[s_x][s_y] = 1 # 시작지점 방문\n queue.append((s_x, s_y))\n while queue:\n xy = queue.popleft()\n x = xy[0]\n y = xy[1]\n # 해당 지점이 가고자 했던 지점이라면\n if e_x == x and e_y == y: \n return graph[x][y]\n for i in range(8): \n nx = x + dx[i]\n ny = y + dy[i]\n # 범위를 넘어간다면\n if (nx < 0 or ny < 0 or nx >= I or ny >= I): \n continue\n # graph[nx][ny] 값이 0이고 아직 방문하지 않았다면\n elif(graph[nx][ny] == 0 and visited[nx][ny] == 0): \n visited[nx][ny] = 1 # 방문\n graph[nx][ny] = graph[x][y] + 1 # 전 지점에서 1증가\n queue.append((nx, ny)) # queue에 추가\n\n \nif __name__ == \"__main__\":\n T = int(input()) # Testcase\n while T:\n I = int(input()) # 체스판 크기\n graph = [[0] * I for _ in range(I)] # 나이트 이동을 저장할 2차원 배열\n visited = [[0] * I for _ in range(I)] # 방문했는지 체크하는 2차원 배열\n s_x, s_y = map(int, input().split()) # 시작점\n e_x, e_y = map(int, input().split()) # 가고자하는 목표점\n print(bfs(graph, visited, s_x, s_y, e_x, e_y, I))\n T += -1\n","repo_name":"worldbrighteststar/Boostcamp_2st_Hot6","sub_path":"Algorithm/JAEHYEON/7562.py","file_name":"7562.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41788315065","text":"class DockersValidatorSettings(object): # do NOT set BaseValidatorSettings as parent\n optional_fields = ['brand', 'price']\n ignore_fields = [\n 'is_in_store_only', 'is_out_of_stock',\n 'google_source_site', 'description', 'special_pricing',\n 'bestseller_rank', 'img_count', 'video_count'\n ]\n ignore_log_errors = False # don't check logs for errors?\n ignore_log_duplications = True # ... duplicated requests?\n ignore_log_filtered = True # ... filtered requests?\n test_requests = {\n 'sdfsdgdf': 0, # should return 'no products' or just 0 products\n 'benny benassi': 0,\n 'jeans': [20, 150],\n 'red': [20, 150],\n 'black': [50, 300],\n 'green': [10, 110],\n 'blue jeans': [5, 150],\n 'black shoes': [10, 100],\n 'black fit': [20, 150],\n 'leather': [20, 200],\n }\n\ntest_urls = {\n 'http://www.dockers.com/US/en_US/mens-pants/p/471870008',\n 'http://www.dockers.com/US/en_US/mens-pants/p/469650006',\n 'http://www.dockers.com/US/en_US/mens-pants/p/469670006',\n 'http://www.dockers.com/US/en_US/mens-pants/p/478600005',\n 'http://www.dockers.com/US/en_US/mens-pants/p/462350002'\n\n }","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/product_ranking/validators/dockers_validator.py","file_name":"dockers_validator.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15018411531","text":"#!/usr/bin/python\n\nfrom scenario_builder.utils import setup_custom_logger\nimport argparse\nfrom scenario_builder import manager\n \nparser = argparse.ArgumentParser(description='Forensic Scenario Builder CLI')\nparser.add_argument('--bot', required=True, nargs=1, help='Location of file describing bot container or vm.')\nparser.add_argument('--bot-num', nargs=1, type=int, default=[1], help='Number of bot containers to spawn. Defaults to one.')\nparser.add_argument('--attacker', required=True, nargs=1, help='Location of file describing attacker container or vm.')\nparser.add_argument('--attacker-ip', nargs=1, default=['random'], help='Assign static ip to attacker. Defaults to random assignment.')\nparser.add_argument('--victim', required=True, nargs=1, help='Location of file describing victim container or vm.')\nparser.add_argument('--victim-ip', nargs=1, default=['random'], help='Assign static ip to victim. Defaults to random assignment.')\nparser.add_argument('--time-out', nargs=1, type=int, default=[5], help='Number of minutes to run scenario. Defaults to 5 minutes.')\nparser.add_argument('--logs', nargs='?', const='/var/log/', help='Location to pull log file(s) from on victim after scenario is done. Defaults to /var/log.')\nparser.add_argument('--disk-image', nargs='?', const='./filesystem.image.gz', help='Create disk image of victim after scenario is done')\nparser.add_argument('--memory-dump', nargs='?', const='./mem-image.lime', help='Create memory dump of victim after scenario is done')\nparser.add_argument('--pcap', nargs='?', const='./capture.pcap', help='Create packet capture of scenario traffic')\nparser.add_argument('--subnet', default='10.0.0.0/8', help='Subnet to place scenarios and containers on. Defaults to 10.0.0.0/8')\n\nargs = parser.parse_args()\narg_dict = {}\narg_dict['bot'] = {'dir': args.bot[0],\n 'num-ips': args.bot_num[0],\n 'manager': 'docker'}\n\narg_dict['attacker'] = {'dir': args.attacker[0],\n 'ip': args.attacker_ip[0],\n 'manager': 'docker'}\n\narg_dict['victim'] = {'dir': args.victim[0],\n 'ip': args.victim_ip[0],\n 'manager': 'vagrant'}\n\narg_dict['subnet'] = args.subnet\narg_dict['pcap'] = args.pcap\narg_dict['disk_image'] = args.disk_image\narg_dict['logs'] = args.logs\narg_dict['mem_dump'] = args.memory_dump\narg_dict['timeout'] = args.time_out[0]\n\nlogger = setup_custom_logger('root')\nlogger.debug('Starting up scenario builder')\nlogger.debug('Argument Dictionary: {}'.format(arg_dict))\nmanager.run_scenario(arg_dict)\n","repo_name":"ameserole/Forensic-Scenario-Builder","sub_path":"scenario_builder.py","file_name":"scenario_builder.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}