diff --git "a/4836.jsonl" "b/4836.jsonl" new file mode 100644--- /dev/null +++ "b/4836.jsonl" @@ -0,0 +1,725 @@ +{"seq_id":"369669858","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^seed/$', views.seed,name='seed'),\n url(r'^add/$', views.addMedicine, name='add'),\n url(r'^setAvail/$', views.setAvailability, name='setAvail'),\n]\n","sub_path":"Medicine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377227593","text":"import subprocess\nimport sys\nimport unittest\n\n\nclass TestRequestsGevent(unittest.TestCase):\n def test_patch(self):\n # Since this test depends on import ordering it is run in a separate\n # process with a fresh interpreter.\n p = subprocess.Popen(\n [sys.executable, \"tests/contrib/requests_gevent/run_test.py\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n p.wait()\n\n assert p.stdout.read() == b\"Test succeeded\\n\", p.stderr.read()\n","sub_path":"tests/contrib/requests_gevent/test_requests_gevent.py","file_name":"test_requests_gevent.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"12666564","text":"# encoding: UTF-8\n# Autor: Siham El Khoury Caviedes, A01374764\n\n# Tarea sobre listas.\n\ndef sumarElementos(lista):\n listaAcumulada = []\n numero = len(lista)\n suma = 0\n for i in range(numero):\n suma = suma + lista[i]\n listaAcumulada.append(suma)\n return listaAcumulada\n\n\ndef quitarPrimeroUltimo(lista):\n listaSin = lista\n listaSin.remove(lista[0])\n listaSin.remove(lista[-1])\n return listaSin\n\n\ndef verificarOrdenNumeros(lista):\n i = 0\n continuar = True\n while continuar == True:\n numero1 = lista[i]\n numero2 = lista[i+1]\n if (numero1 + 1) == numero2:\n continuar == True\n while i != len(lista):\n i = i+1\n else:\n continuar == False\n veredicto = \"False\"\n return veredicto\n veredicto = \"True\"\n return veredicto\n\ndef verificarOrdenLetras(lista):\n abecedario = [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z]\n print(set(lista).issubset(abecedario))\n\n\ndef main():\n lista =[]\n numero = 0\n numero = int(input(\"Teclea el número que deseas añadir a la lista [-1 para salir]: \"))\n while numero != -1:\n lista.append(numero)\n numero = int(input(\"Teclea el número que deseas añadir a la lista [-1 para salir]: \"))\n\n print(\"Ejercicio 1.\")\n listaAcumulada = sumarElementos(lista)\n print(\"la lista\", lista, \"regresa la lista acumulada\", listaAcumulada)\n\n print(\"Ejercicio 2.\")\n listaSin = quitarPrimeroUltimo(lista)\n print(\"Lista nueva:\", listaSin)\n\n print(\"Ejercicio 3.\")\n eleccion = int(input(\"Teclea 1 si deseas trabajar con números y 2 si deseas trabajar con letras: \"))\n if eleccion == 1:\n elemento = 0\n while elemento != -1:\n elemento = int(input(\"Teclea el elemento que deseas añadir a la lista [-1 para salir]: \"))\n lista.append(elemento)\n veredicto = verificarOrdenNumeros(lista)\n print(\"¿La lista está en orden?\", veredicto)\n\n elif eleccion == 2:\n elemento = 0\n while elemento != \"z\":\n elemento = input(\"Teclea la letra minúscula que deseas añadir a la lista [z para salir]: \")\n lista.append(elemento)\n veredicto = verificarOrdenLetras(lista)\n print(\"¿La lista está en orden?\", veredicto)\n\n\nmain()\n","sub_path":"listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522940336","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.utils.timezone import utc\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('testando_model', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='contato',\n name='data',\n field=models.DateTimeField(default=datetime.datetime(2015, 4, 2, 14, 51, 40, 430955, tzinfo=utc)),\n preserve_default=True,\n ),\n ]\n","sub_path":"pelo-terminal/testando_model/testando_model/migrations/0002_auto_20150402_1451.py","file_name":"0002_auto_20150402_1451.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"535749929","text":"import turtle\r\nimport random\r\nfrom turtle import *\r\nimport math\r\nimport time\r\n\r\n\r\n\r\npawn2= turtle.Turtle()\r\npawn = turtle.Turtle()\r\npawn.shape(\"turtle\")\r\npawn2.shape(\"turtle\")\r\ntime.sleep(2)\r\n\r\npawn.goto(0, 0)\r\npawn2.goto(10,10)\r\npawn.color(\"red\")\r\npawn2.color(\"blue\")\r\npawn.speed(1)\r\npawn.width(5)\r\n\r\ndef up():\r\n pawn.setheading(90)\r\n pawn.forward(20)\r\n\r\ndef down():\r\n pawn.setheading(270)\r\n pawn.forward(20)\r\n\r\ndef left():\r\n pawn.setheading(180)\r\n pawn.forward(20)\r\n\r\ndef right():\r\n pawn.setheading(0)\r\n pawn.forward(20)\r\n\r\nturtle.listen()\r\nturtle.onkey(up, 'w')\r\nturtle.onkey(down, 's')\r\nturtle.onkey(left, 'a')\r\nturtle.onkey(right, 'd')\r\nimport desenmaze\r\nturtle.done()\r\n\r\n\r\n","sub_path":"SapiensSerpentis_v2/SapiensSerpentis_v2.py","file_name":"SapiensSerpentis_v2.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"342284550","text":"import os\nimport sys\n# These have to be set before importing any mixcoatl modules\nos.environ['ES_ACCESS_KEY'] = 'abcdefg'\nos.environ['ES_SECRET_KEY'] = 'gfedcba'\nimport json\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\nfrom httpretty import HTTPretty\nfrom httpretty import httprettified\n\nimport mixcoatl.automation.tier as rsrc\nfrom mixcoatl.settings.load_settings import settings\nfrom mixcoatl.utils import camelize\n\nclass TestTier(unittest.TestCase):\n def setUp(self):\n self.cls = rsrc.Tier\n self.es_url = '%s/%s' % (settings.endpoint, self.cls.PATH)\n self.json_file = '../../tests/data/unit/automation/tier.json'\n\n @httprettified\n def test_has_all_and_is_one(self):\n '''test all() returns a list of Tier'''\n\n with open(self.json_file) as f:\n data = f.read()\n HTTPretty.register_uri(HTTPretty.GET,\n self.es_url,\n body=data,\n status=200,\n content_type=\"application/json\")\n\n s = self.cls.all()\n assert len(s) == 1\n for x in s:\n assert isinstance(x, self.cls)\n\n @httprettified\n def test_has_one(self):\n '''test Tier() returns a valid resource'''\n pk = 10429\n with open(self.json_file) as f:\n data = json.load(f)\n data[self.cls.COLLECTION_NAME][:] = [d for d in data[self.cls.COLLECTION_NAME] if\n d[camelize(self.cls.PRIMARY_KEY)] == pk]\n HTTPretty.register_uri(HTTPretty.GET,\n self.es_url + '/' + str(pk),\n body=json.dumps(data),\n status=200,\n content_type=\"application/json\")\n s = self.cls(pk)\n assert s.tier_id == pk\n assert s.breach_increment == 1\n assert s.breach_period_in_minutes == 5\n assert s.cooldown_period_in_minutes == 5\n assert s.deployment['deployment_id'] == 13607\n assert s.description == 'This is what we call a tier.'\n assert s.last_breach_change_timestamp == '2012-12-18T18:42:06.160+0000'\n assert s.lower_cpu_threshold == 25\n assert s.lower_ram_threshold == 25\n assert s.maximum_servers == 1\n assert s.minimum_servers == 1\n assert s.name == 'Sample Tier'\n assert s.removable is False\n assert s.scaling_rules == 'BASIC'\n assert s.status == 'BREACH_LOWER'\n assert s.upper_cpu_threshold == 75\n assert s.upper_ram_threshold == 75\n","sub_path":"tests/unit/automation/test_tier.py","file_name":"test_tier.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"424378501","text":"# Definition for a Node.\nclass Node:\n def __init__(self, val, left, right, next):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\n\nclass Solution:\n def connect(self, root):\n if not root:\n return\n node = root\n while node:\n temp = Node(0, None, None, None)\n cur = temp\n while node:\n if node.left:\n cur.next = node.left\n cur = cur.next\n if node.right:\n cur.next = node.right\n cur = cur.next\n node = node.next\n node = temp.next\n return root\n\n\nif __name__ == \"__main__\":\n node1 = Node(1, None, None, None)\n node2 = Node(2, None, None, None)\n node3 = Node(3, None, None, None)\n\n node1.left = node2\n node1.right = node3\n Solution().connect(node1)\n\n","sub_path":"Populating Next Right Pointers in Each Node II.py","file_name":"Populating Next Right Pointers in Each Node II.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"379081558","text":"import requests\nfrom operator import itemgetter\n\n# 执行API调用并存储响应\nurl = 'https://hacker-news.firebaseio.com/v0/topstories.json'\nresult = requests.get(url, timeout = 10)\nprint(\"Status code:\", result.status_code)\n\n# 处理有关每篇文章的信息\nsubmission_ids = result.json()\nsubmission_dicts = []\nfor submission_id in submission_ids:\n # 对每篇文章都执行一个API调用\n url = ('https://hacker-news.firebaseio.com/v0/item/' + str(submission_id) + '.json')\n submission_result = requests.get(url)\n print(submission_result.status_code)\n response_dict = submission_result.json()\n\n submission_dict = {'title': response_dict['title'],\n 'link': 'http://news.ycombinator.com/item?id=' + str(submission_id),\n 'comments': response_dict.get('descendants',0)}\n submission_dicts.append(submission_dict)\n\nsubmission_dicts = sorted(submission_dicts, key = itemgetter('comments'), reverse = True)\n\nfor submission_dict in submission_dicts:\n print(\"\\nTitle:\", submission_dict['title'])\n print(\"Discussion link:\", submission_dict['link'])\n print(\"Comments:\", submission_dict['comments'])\n\n\n\n\n","sub_path":"python/pythonProject12/hn_submissions.py","file_name":"hn_submissions.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"371179422","text":"import grpc\nimport time\n\nimport pytest\n\nfrom tests.integration.aurorabridge_test.client import Client\nfrom tests.integration.stateless_job import query_jobs\n\n\n@pytest.fixture\ndef client():\n client = Client()\n\n yield client\n\n # Delete all jobs\n _delete_jobs()\n\n\ndef _delete_jobs(respool_path='/AuroraBridge',\n timeout_secs=20):\n jobs = query_jobs(respool_path)\n\n for job in jobs:\n job.delete(force_delete=True)\n\n # Wait for job deletion to complete.\n deadline = time.time() + timeout_secs\n while time.time() < deadline:\n try:\n jobs = query_jobs(respool_path)\n if len(jobs) == 0:\n return\n time.sleep(2)\n except grpc.RpcError as e:\n # Catch \"not-found\" error here because QueryJobs endpoint does\n # two db queries in sequence: \"QueryJobs\" and \"GetUpdate\".\n # However, when we delete a job, updates are deleted first,\n # there is a slight chance QueryJobs will fail to query the\n # update, returning \"not-found\" error.\n if e.code() == grpc.StatusCode.NOT_FOUND:\n time.sleep(2)\n continue\n raise\n\n assert False, 'timed out waiting for jobs to be deleted'\n","sub_path":"tests/integration/aurorabridge_test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"268045431","text":"import colorama\n\nfrom autofit import conf\nfrom autolens import runners\nfrom autolens.commands.base import Base, prepend_working_directory\n\n\ndef color(text, fore):\n \"\"\"\n Apply a color to some text.\n\n Parameters\n ----------\n text: str\n The original text\n fore: colorama.ansi.AnsiFore\n The color to be applied to the text\n\n Returns\n -------\n text: str\n Colored text\n \"\"\"\n return \"{}{}{}\".format(fore, text, colorama.Fore.RESET)\n\n\ndef blue(text):\n \"\"\"\n Make text blue\n \"\"\"\n return color(text, colorama.Fore.BLUE)\n\n\ndef red(text):\n \"\"\"\n Make text red\n \"\"\"\n return color(text, colorama.Fore.RED)\n\n\nclass Pipeline(Base):\n\n def run(self):\n name = self.options['']\n conf.instance = conf.Config(self.config_path, self.output_path)\n try:\n if self.options['--info']:\n tup = runners.pipeline_dict[name]\n print()\n pl = tup.make()\n print(red(name))\n print(tup.doc)\n print()\n print(red(\"Phases\"))\n print(\"\\n\".join([\"{}\\n {}\".format(phase.__class__.__name__, blue(phase.doc)) for phase in pl.phases]))\n return\n if name is not None:\n if name == \"test\":\n self.run_pipeline(runners.TestPipeline())\n return\n self.run_pipeline(runners.pipeline_dict[name].make())\n return\n except KeyError:\n print(\"Pipeline '{}' does not exist.\\n\".format(name))\n\n print_pipelines()\n\n def run_pipeline(self, pl):\n from autolens.imaging import image as im\n if self.is_using_hdu:\n image = im.load_imaging_from_fits(self.data_path, self.image_hdu, self.noise_hdu, self.psf_hdu,\n self.pixel_scale)\n else:\n image = im.load_imaging_from_path(self.image_path, self.noise_path, self.psf_path,\n pixel_scale=self.pixel_scale)\n pl.run(image)\n\n @property\n def is_using_hdu(self):\n \"\"\"\n Returns\n -------\n is_using_hdu: bool\n True iff --datas option is set. --datas is the path to a file with multiple datas layers accessible by setting\n hdus.\n \"\"\"\n return self.options[\"--datas\"] is not None\n\n @property\n def image_hdu(self):\n \"\"\"\n Returns\n -------\n str: image_hdu\n The hdu of the image datas in the datas file\n \"\"\"\n return int(self.options[\"--image-hdu\"])\n\n @property\n def noise_hdu(self):\n \"\"\"\n Returns\n -------\n str: noise_hdu\n The hdu of the noise datas in the datas file\n \"\"\"\n return int(self.options[\"--noise-hdu\"])\n\n @property\n def psf_hdu(self):\n \"\"\"\n Returns\n -------\n str: psf_hdu\n The hdu of the psf datas in the datas file\n \"\"\"\n return int(self.options[\"--psf-hdu\"])\n\n @property\n @prepend_working_directory\n def image_path(self):\n \"\"\"\n Get the relative or absolute path to the input datas_. If the path does not begin with '/' then the current\n working directory will be prepended.\n\n Returns\n -------\n str: path\n The path to the image\n \"\"\"\n return self.options['--image']\n\n @property\n @prepend_working_directory\n def data_path(self):\n \"\"\"\n Get the relative or absolute path to the input datas. Input datas includes datas_, noise and psf with different\n hdu values input by the user. If the path does not begin with '/' then the current working directory will be\n prepended.\n\n Returns\n -------\n str: path\n The path to the datas\n \"\"\"\n return self.options['--datas']\n\n @property\n @prepend_working_directory\n def noise_path(self):\n \"\"\"\n Get the relative or absolute path to the input noise. If the path does not begin with '/' then the current\n working directory will be prepended.\n\n Returns\n -------\n str: path\n The path to the noise\n \"\"\"\n return self.options['--noise']\n\n @property\n @prepend_working_directory\n def psf_path(self):\n \"\"\"\n Get the relative or absolute path to the input psf. If the path does not begin with '/' then the current\n working directory will be prepended.\n\n Returns\n -------\n str: path\n The path to the psf folder or psf.\n \"\"\"\n return self.options['--psf']\n\n @property\n def pixel_scale(self):\n \"\"\"\n Returns\n -------\n pixel_scales: float\n The size of a single pixel, in arc seconds, as input by the user\n \"\"\"\n return float(self.options['--pixel-scale'])\n\n @property\n def config_path(self):\n \"\"\"\n Returns\n -------\n config_path: str\n The path to the configuration folder. Defaults to 'config' in the current working directory.\n \"\"\"\n if '--config' in self.options:\n config_path = self.options['--config']\n else:\n config_path = 'config'\n return config_path\n\n @property\n @prepend_working_directory\n def output_path(self):\n \"\"\"\n Returns\n -------\n output_path: str\n The path to the configuration folder. Defaults to 'output' in the current working directory.\n \"\"\"\n return self.options['--output']\n\n\ndef print_pipelines():\n \"\"\"\n Prints a list of available runners taken from the pipeline dictionary.\n \"\"\"\n print(\"Available Pipelines:\\n\")\n print(\n \"\\n\".join(\n [\"{}\\n {}\".format(key, blue(value.short_doc)) for\n key, value\n in\n runners.pipeline_dict.items()]))\n","sub_path":"autolens/commands/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243866256","text":"# from first import get_train_df, seed_everything\r\n# from utils import get_train_df\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nfrom collections import Counter\r\nimport pickle\r\n# from sklearn.multiclass import OneVsRestClassifier\r\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\r\n# from sklearn.metrics import balanced_accuracy_score\r\n# import xgboost as xgb\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom utils import get_train_df, OneVsRestClassifierNew, seed_everything\r\n\r\n\r\n# from optimize_thresholds import optimize_thresholds_D,get_err_count\r\n\r\ndef get_CV_folds(train, target, n_folds):\r\n x = pd.DataFrame(target)\r\n x.columns = ['target2']\r\n target_one_hot = pd.get_dummies(x, columns=['target2'])\r\n\r\n mskf = MultilabelStratifiedKFold(n_splits=n_folds)\r\n\r\n folds_out = {}\r\n for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target_one_hot)):\r\n folds_out[f] = v_idx\r\n\r\n return folds_out\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n seed_everything() # seeding the random number generators so we could reproduce the results\r\n\r\n n_folds = 8\r\n n_trees = 100\r\n\r\n train = get_train_df() # get the pre-processed train data\r\n cols = train.columns\r\n\r\n target = train['target'] # original classes\r\n target2 = train['target2'] # classes from phase 1 (4 classes)\r\n\r\n hist_target1 = Counter(target)\r\n hist_target2 = Counter(target2)\r\n\r\n train_other = train.loc[target2 == 'other'] # in the second phase we are working only on the rows of the \"other\" class\r\n target_other = target.loc[target2 == 'other']\r\n\r\n hist_target = Counter(target_other) # count the amount of samples of each class\r\n print(train_other.shape)\r\n\r\n medium_classes = [k for k in hist_target if hist_target[k] > 950] # in this phase we are working on the medium sized classes\r\n medium_classes.append('class07') # we add class07 and class14 also, since they were proved to be perfectly predicted (hence we bounced them\r\n medium_classes.append('class14') # up from the 3rd phase to the 2nd phase).\r\n\r\n target_other2 = np.array([x if x in medium_classes else 'other2' for x in target_other]) # the \"smaller classes\" are now put into the \"other2\" bucket.\r\n hist_target2 = Counter(target_other2)\r\n\r\n classes = np.unique(target_other2) # here are the classes we try to classify in this phase\r\n\r\n # split into folds\r\n train_other2 = train_other.drop(columns=['target', 'target2']) # drop columns \"target\" and \"target2\" from the data\r\n folds = get_CV_folds(train_other2, target_other2, n_folds)\r\n # prepare prediction array on the test set: (it is the same size as the complete train data. in each fold we leave a \"chunk\" out. train on the rest and predict the \"chunk\")\r\n pred_all = np.zeros((train_other.shape[0], len(classes)))\r\n # iterate the cv folds:\r\n models = []\r\n for f in folds:\r\n\r\n ind_train = []\r\n ind_test = list(folds[f]) # folds[f] contains the test data indices for the fold (the chunk we leave out)\r\n\r\n for f2 in folds:\r\n if f == f2:\r\n continue\r\n ind_train_temp = list(folds[f2])\r\n ind_train = ind_train + ind_train_temp # we aggregate the rest of the indices as train data\r\n\r\n assert (len(ind_train) + len(ind_test) == len(target_other2)) # check we exhaust all of the data\r\n\r\n # create Train and Test matrices\r\n X_train = train_other.iloc[ind_train]\r\n Y_train = target_other2[ind_train]\r\n Y_train_classes_orig = X_train['target'] # original classes: will be used to give weight for the samples\r\n\r\n X_test = train_other.iloc[ind_test]\r\n Y_test = target_other2[ind_test]\r\n\r\n X_train = X_train.drop(columns=['target', 'target2'])\r\n X_test = X_test.drop(columns=['target', 'target2'])\r\n\r\n print(f\"fold #{f}: just before OneVsRestClassifier\")\r\n t0 = time.time()\r\n\r\n weights = np.zeros(Y_train_classes_orig.shape) # prepare vector of sample weights\r\n hist_y_train = Counter(Y_train_classes_orig)\r\n n = 0\r\n for c in Y_train_classes_orig:\r\n weights[n] = 1 / hist_y_train[c] # the weights per row are inversely proportional to the amount of the class in the data\r\n n += 1\r\n\r\n clf_in = RandomForestClassifier(verbose=0, n_estimators=n_trees, random_state=0) # we use a RF model as the inner classifier type in OneVsRest\r\n clf = OneVsRestClassifierNew(clf_in) # we use OneVsRestClassifierNew and not OneVsRestClassifier so that we could use the sample weights\r\n clf.fit(X_train, Y_train, sample_weight=weights) # here we use the sample weights.\r\n\r\n t1 = time.time()\r\n print(f\"fold #{f}: OneVsRestClassifier elapsed = {t1 - t0}\")\r\n pred_all[ind_test] = clf.predict_proba(X_test) # predict the test data\r\n\r\n models.append(clf)\r\n\r\n # check that the models are all arranged the same way:\r\n classes = models[0].classes_\r\n for m in models:\r\n for i in range(len(classes)):\r\n assert (classes[i] == m.classes_[i])\r\n\r\n # init:\r\n y_pred_all = np.argmax(pred_all, axis=1)\r\n y_pred_classes = np.array(['abbaaaaaaaaaaaa' for _ in range(target_other2.shape[0])])\r\n err_count = {}\r\n total_count = {}\r\n n = -1\r\n for c in classes:\r\n n += 1\r\n y_pred_classes[y_pred_all == n] = c\r\n err_count[c] = 0\r\n total_count[c] = 0\r\n\r\n for (c, c2) in zip(target_other2, y_pred_classes):\r\n total_count[c] += 1\r\n if c != c2:\r\n err_count[c] += 1\r\n\r\n # here we calculate the factor to multiply the \"other2\" class prediction so as to maximize the balanced accuracy (giving a higher weight to the \"other2\" class)\r\n err_count0 = err_count\r\n\r\n f_vec = np.logspace(-2, 6, num=101) # possible factors are log spaced\r\n N = pred_all.shape[1] - 1\r\n ba_max = -np.inf\r\n factor_max = -1\r\n for factor in f_vec: # we iterate over possible factors (log spaced)\r\n pred_all_temp = pred_all.copy()\r\n\r\n pred_all_temp[:, N] = pred_all_temp[:, N] * factor # multiply the last column by a factor\r\n\r\n y_pred_all = np.argmax(pred_all_temp, axis=1) # index of predicted class\r\n y_pred_classes = np.array(['abbaaaaaaaaaaaa' for _ in range(target_other2.shape[0])])\r\n err_count = {}\r\n total_count = {}\r\n n = -1\r\n for c in classes:\r\n n += 1\r\n y_pred_classes[y_pred_all == n] = c # convert to class name\r\n err_count[c] = 0\r\n total_count[c] = 0\r\n\r\n for (c, c2) in zip(target_other2, y_pred_classes): # calculate errors\r\n total_count[c] += 1\r\n if c != c2:\r\n err_count[c] += 1\r\n\r\n ba = 0 # calculate balanced accuracy with higher weight for \"other2\" class\r\n for c in classes:\r\n if c == \"other2\":\r\n ba = ba + (1 - err_count[c] / total_count[c] * 11) # higher weight to the \"other2\" class\r\n else:\r\n ba = ba + (1 - err_count[c] / total_count[c])\r\n ba = ba / (len(classes) + 10)\r\n print(err_count)\r\n print(ba)\r\n if ba > ba_max: # if we get higher balanced accuracy then we save the factor used.\r\n factor_max = factor\r\n ba_max = ba\r\n err_count1 = err_count\r\n\r\n print(err_count0)\r\n print(err_count1)\r\n\r\n err_rate0 = {}\r\n err_rate1 = {}\r\n for c in classes:\r\n err_rate0[c] = err_count0[c] / total_count[c]\r\n err_rate1[c] = err_count1[c] / total_count[c]\r\n\r\n print(f\"err_rate0={err_rate0}\")\r\n print(f\"err_rate1={err_rate1}\")\r\n print(f\"factor_max={factor_max}\")\r\n\r\n pickle.dump(models, open(\"./files/second_phase_models.pkl\", 'wb')) # save models\r\n pickle.dump(factor_max, open(\"./files/second_phase_models_factor_max.pkl\", 'wb')) # save factor\r\n","sub_path":"second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"587669993","text":"#!/usr/bin/env python\n# -*- encode: utf-8 -*-\nl = []\nfor c in range(33,127):\n if c in (ord('O'),ord('l')): # → 0, I\n continue\n l.append(chr(c))\n print(chr(c),end='')\n\nprint()\nprint(len(l))\n","sub_path":"scripts/base92.py","file_name":"base92.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"454774071","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright (C) 2016 Canonical\n#\n# Authors:\n# Didier Roche\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; version 3.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\n'''Release-related misc utilities'''\n\nimport logging\nimport os\nimport sys\nimport subprocess\nimport yaml\n\nfrom .settings import ROOT_DIR, RELEASES_BRANCH_MAPPING, DEVICES_MAPPING\nfrom .tools import next_relevant_line\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_supported_releases_map():\n '''Load supported release and master branch map'''\n release_map = {}\n with open(os.path.join(ROOT_DIR, RELEASES_BRANCH_MAPPING)) as f:\n try:\n for line in next_relevant_line(f):\n (release, branch_name) = line.split(\" \")\n release_map[release] = branch_name\n except ValueError:\n logger.error(\"Release file is not of valid format: \")\n sys.exit(1)\n return release_map\n\n\ndef get_releases_in_context():\n '''Prepare for one release, switching context and return release name'''\n for release, branch in _get_supported_releases_map().items():\n subprocess.check_call([\"git\", \"checkout\", \"-q\", branch])\n yield release\n\n\ndef load_device_metadata(release):\n \"\"\"Return maps of variables substitution token for each device.\n\n Adding RELEASE_VERSION and DEVICE_ID automatically to each device for convenience.\n IMAGE_FILENAME and IMAGE_UNCOMPRESSED_FILENAME are also aded if IMAGE_URL is present.\n\n Format is:\n { 'device-key':\n { 'VARIABLE_NAME': 'VALUE' },\n …\n },\n …\n }\n\n Example:\n { 'rpi2': {'IMAGE_URL': 'https://download.ubuntu.com/blablabnla/rpi2-16.04.iso',\n 'IMAGE_FILENAME': 'rpi2-16.04.iso',\n 'RELEASE_VERSION': '16.04' },\n 'dragonboard': {'FOO': 'BAR',\n 'RELEAS_VERSION': '16.04' },\n }\"\"\"\n devices_metadata = {}\n with open(os.path.join(ROOT_DIR, DEVICES_MAPPING), encoding='utf-8') as f:\n devices_metadata = yaml.load(f.read())\n for device_key in devices_metadata:\n devices_metadata[device_key]['RELEASE_VERSION'] = release\n devices_metadata[device_key]['DEVICE_ID'] = device_key\n image_url = devices_metadata[device_key].get('IMAGE_URL')\n if image_url:\n devices_metadata[device_key]['IMAGE_FILENAME'] = os.path.basename(image_url)\n devices_metadata[device_key]['IMAGE_UNCOMPRESSED_FILENAME'] = \\\n os.path.basename(os.path.splitext(image_url)[0])\n return devices_metadata\n","sub_path":"sitegenerator/releases.py","file_name":"releases.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30851571","text":"\"\"\"Processes artifacst using a local plaso process.\"\"\"\n\n__author__ = u'jbn@google.com (Johan Berggren)'\n\nimport os\nimport subprocess\nimport tempfile\nimport uuid\n\nfrom dftimewolf.lib.processors.processors import BaseArtifactProcessor\n\n\nclass LocalPlasoProcessor(BaseArtifactProcessor):\n \"\"\"Process artifacts with plaso, begetting a new log2timeline.py process.\n\n Attributes:\n output_path: Where to store the result\n artifacts_path: Source data to process\n plaso_storage_file_name: File name for the resulting Plaso storage file\n plaso_storage_file_path: Full path to the result\n timezone: Timezone to use for Plaso processing\n \"\"\"\n\n def __init__(self, artifacts_path, timezone=None, verbose=False):\n \"\"\"Initialize the Plaso artifact processor object.\n\n Args:\n artifacts_path: Path to data to process\n timezone: Timezone name (optional)\n verbose: Boolean indicating if to use verbose output\n \"\"\"\n super(LocalPlasoProcessor, self).__init__(verbose=verbose)\n self.output_path = tempfile.mkdtemp()\n self.artifacts_path = artifacts_path\n self.timezone = timezone\n self.plaso_storage_file_name = u'{0:s}.plaso'.format(uuid.uuid4().hex)\n self.plaso_storage_file_path = os.path.join(self.output_path,\n self.plaso_storage_file_name)\n self.results = None\n\n def process(self):\n \"\"\"Process files with Log2Timeline from the local plaso install.\n\n Returns:\n Path to a Plaso storage file\n\n Raises:\n ValueError: If the local log2timeline.py process fails\n \"\"\"\n log_file_path = os.path.join(self.output_path, u'plaso.log')\n self.console_out.VerboseOut(u'Log file: {0:s}'.format(log_file_path))\n\n cmd = [u'log2timeline.py']\n # Since we might be running alongside another Processor, always disable\n # the status view\n cmd.extend([u'-q', u'--status_view', u'none'])\n if self.timezone:\n cmd.extend([u'-z', self.timezone])\n cmd.extend([\n u'--logfile', log_file_path, self.plaso_storage_file_path,\n self.artifacts_path\n ])\n self.console_out.VerboseOut(u'Running external command: {0:s}'.format(\n u' '.join(cmd)))\n # Running the local l2t command\n try:\n l2t_proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n _, errors = l2t_proc.communicate()\n l2t_status = l2t_proc.wait()\n if l2t_status:\n self.console_out.StdErr(errors)\n raise ValueError(u'The command {0:s} failed'.format(u' '.join(cmd)))\n except OSError as e:\n raise ValueError(\n 'An error occurred while attempting to run plaso: {0:s}'.format(e))\n\n @staticmethod\n def launch_processor(collector_output, timezone=None, verbose=False):\n \"\"\"Thread one or more LocalPlasoProcessor obects.\n\n Args:\n collector_output: Path to data to process\n timezone: Timezone name (optional)\n verbose: Boolean indicating if to use verbose output\n\n Returns:\n A list of LocalPlasoProcessor objects that can be join()ed from the\n caller.\n\n \"\"\"\n processors = []\n for name, path in collector_output:\n processor = LocalPlasoProcessor(path, timezone, verbose)\n processor.name = name\n processor.start()\n processors.append(processor)\n\n return processors\n\n @property\n def output(self):\n \"\"\"Dynamically generate plugin processor output.\"\"\"\n return [(self.name, self.plaso_storage_file_path)]\n\nMODCLASS = [('localplaso', LocalPlasoProcessor)]\n","sub_path":"dftimewolf/lib/processors/localplaso.py","file_name":"localplaso.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"256083258","text":"#!/usr/bin/env python\n# We apply Jackknife sampling to $C_i$, calculate the effective and make a fit of 2pt\n\nimport pandas as pd\nimport numpy as np\nimport gvar as gv\nimport lsqfit\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nmatplotlib.use('pdf')\n\n\nltS_GeV=0.197/0.12\n\n# Read the useful data\n# We then need to extract column 7 and reshape it to a matrix ($C_i$ matrix) such that each row represents a time series for observations under the corresponding configuration.\ndef read2pt(name):\n dat_ori = pd.read_table(name, delim_whitespace = True, header = None)\n Nconf = len(set(dat_ori[0])) # number of configurations\n dat = dat_ori[7].to_numpy().reshape((Nconf, 32)) # rows representing each observation, columns representing time steps\n return dat\n\n# Resampling is taken by extracting the $i$-th row and calculating the average along each column.\ndef Jsamp (ci):\n ci_resamp = np.zeros_like(ci)\n for nrow in range(ci.shape[0]):\n cols_extracted = np.concatenate((ci[:nrow], ci[nrow + 1:]), axis = 0)\n ci_resamp[nrow, :] = np.average(cols_extracted, axis = 0)\n return ci_resamp\n\n# Calculat the effecgtive mass\ndef eff_m_per(ci):\n Mass = ltS_GeV * np.log(ci[:, :-1]/ci[:, 1:])\n return Mass\n\n# Calculat the central value and errors without correaltion\ndef cen_err(a, jackYes):\n N_j = a.shape[0]\n if(jackYes=='jacknife'):\n return np.mean(a,axis=0), np.std(a,axis=0)*np.sqrt(N_j-1)\n else:\n return np.mean(a,axis=0), np.std(a,axis=0)/np.sqrt(N_j-1)\n \n# calcualte the covriant matrix, some change with respect to my orignal code.\ndef Covmatrix(data,jackYes):\n nf, nt = data.shape\n ave = np.broadcast_to(np.mean(data,0), (nf, nt))\n cov = data - ave\n cov = np.matmul(cov.T, cov) / nf #利用了矩阵乘法把组态求和了\n if(jackYes== 'jacknife'):\n cov=cov*(nf-1)\n return cov\n\n#### plot the effective mass\ndef plot_eff(mass_cen, mass_err,mass_cen_Jsamp, mass_err_Jsamp):\n\n xmin=2\n xmax=16\n (fig,ax) = plt.subplots(nrows=1, ncols=1, sharex=True,figsize =(8,4))\n x_range=np.arange(0,len(mass_cen))\n ax.errorbar(x_range[xmin:xmax]-0.1,mass_cen[xmin:xmax],mass_err[xmin:xmax],fmt='None',ms=3.,color='brown',ecolor='brown',label=r'$Orignal$')\n ax.errorbar(x_range[xmin:xmax]+0.1,mass_cen_Jsamp[xmin:xmax],mass_err_Jsamp[xmin:xmax],fmt='None',ms=3.,color='b',ecolor='b',label=r'$Jacknife$')\n \n plt.minorticks_on()\n ax.axhline(y=0.31, c=\"r\", ls=\"--\",lw=1, label=r'$m=0.31{\\mathrm{GeV}}$')\n plt.legend(loc='upper right')\n plt.xlabel(r'Time / Lattice Unit')\n plt.ylabel(r'Mass / GeV')\n # ax.set_ylim(0,0.9)\n \n pp = PdfPages(\"effetive_mass_self_2conf.pdf\")\n plt.savefig(pp, format='pdf')\n pp.close()\n plt.close()\n return 0\n \n#### plot and fit the 2pt\ndef plot_fit_2pt(c2pt_cen, c2pt_err,c2pt_cov):\n\n xmin=2\n xmax=30\n prior = {'c0':gv.gvar(0.02, 0.5),'m0':gv.gvar(0.3,1.),'c1':gv.gvar(1., 100.),'deltam':gv.gvar(0.5,10.)}\n \n xfit = np.arange(xmin,xmax)\n ## different errors in the fit\n# yfit= gv.gvar(c2pt_cen[xmin:xmax],c2pt_err[xmin:xmax])\n yfit= gv.gvar(c2pt_cen[xmin:xmax],np.sqrt(c2pt_cov[xmin:xmax,xmin:xmax].diagonal()))\n# yfit= gv.gvar(c2pt_cen[xmin:xmax],c2pt_cov[xmin:xmax,xmin:xmax])\n \n def fcn2pt(x, p): # fit function of x and parameters p\n ans = p['c0']*(np.exp(-x* p['m0']/ltS_GeV))*(1+p['c1']*(np.exp(-x* p['deltam']/ltS_GeV)))\n return ans\n \n fit = lsqfit.nonlinear_fit(data=(xfit, yfit),svdcut=1e-3,prior=prior, fcn=fcn2pt)\n \n print(fit.format(maxline=True))\n \n fitted_result=fcn2pt(xfit,fit.p);\n \n fitted_cen = np.array([ fitted_result[i].mean for i in range(0,len(fitted_result))])\n fitted_err = np.array([ fitted_result[i].sdev for i in range(0,len(fitted_result))])\n### We plot the 2pt and fitted results\n (fig,ax) = plt.subplots(nrows=1, ncols=1, sharex=True,figsize =(8,4))\n ax.errorbar(xfit,c2pt_cen[xmin:xmax],c2pt_err[xmin:xmax],fmt='None',ms=3.,color='brown',ecolor='brown',label=r'$Data$')\n ax.errorbar(xfit,fitted_cen,fitted_err,fmt='b',ms=3.,color='b',ecolor='b',label=r'$Jacknife$')\n ax.fill_between(xfit,fitted_cen+fitted_err,fitted_cen+(-1)*fitted_err,facecolor='b',alpha=0.4)\n plt.minorticks_on()\n plt.legend(loc='upper right')\n plt.xlabel(r'Time / Lattice Unit')\n plt.ylabel(r'$C_2$')\n \n pp = PdfPages(\"c2_self_2conf.pdf\")\n plt.savefig(pp, format='pdf')\n pp.close()\n plt.close()\n return 0\n\ndef main():\n\n c2pt = read2pt(\"pion_gamma15_p0_t0_1_self_2conf.txt\")\n c2pt_Jsamp = Jsamp(c2pt)\n eff_m_Jsamp = eff_m_per(c2pt_Jsamp)\n eff_m = eff_m_per(c2pt)\n \n ### effective mass\n mass_cen, mass_err = cen_err(eff_m,'No');\n mass_cen_Jsamp, mass_err_Jsamp = cen_err(eff_m_Jsamp,'jacknife');\n plot_eff(mass_cen, mass_err,mass_cen_Jsamp, mass_err_Jsamp);\n \n ### calculate 2pt and fit 2pt\n c2pt_cen, c2pt_err =cen_err(c2pt_Jsamp,'jacknife');\n c2pt_cov = Covmatrix(c2pt_Jsamp,'jacknife');\n plot_fit_2pt(c2pt_cen, c2pt_err,c2pt_cov);\n# print(c2pt_cov)\n return 0\nif __name__ == \"__main__\":\n main()\n print(\"end\")\n","sub_path":"plot_fit.py","file_name":"plot_fit.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226342193","text":"import cv2\nimport numpy as np\nimport time\n\n# Minimim area threshold that is boxed\nAREA_THRESHHOLD = 1000\n\n# Number of frames to skip to calculate the box\nFRAME_SKIP_COUNT = 2\n\n# Title of the window\nWINDOW_TITLE = 'Video'\n\n# Define Window settings\ncv2.namedWindow(WINDOW_TITLE, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)\ncv2.resizeWindow(WINDOW_TITLE, 1500,1000)\n\n# Create a VideoCapture object and read from input file\ncap = cv2.VideoCapture('/home/pi/Documents/PDP/VID1.mp4')\n\n# Check if camera opened successfully\nif (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n\n# Function that takes in a image and draws boxes around suspected plants\ndef box_image(img: np.array):\n # Converting image from BGR to HSV color space\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n # Generating the mask that outlines the plants\n # Method 1: Look for the color green\n mask1 = cv2.inRange(hsv, (30, 30, 30), (70, 255,255))\n # Method 2\n\n # Take the mask and clean up the holes in the mask\n # Open removes area of the holes in the mask (removes noise) and then adds area to the holes\n mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))\n # Dilate areas in the mask (Add area to the holes in the mask)\n mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3,3), np.uint8))\n\n ret,thresh = cv2.threshold(mask1, 127, 255, 0)\n contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n # List of Rectangle objects\n rect_list = []\n # Loop through each of the \"Plant\" areas\n for c in contours:\n # if the \"Plant\" is large enough draw a rectangle around it\n if cv2.contourArea(c) > AREA_THRESHHOLD:\n # get the bounding rect\n x, y, w, h = cv2.boundingRect(c)\n rect_list.append((x, y, w, h))\n # draw a green rectangle to visualize the bounding rect\n # cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 15)\n return rect_list\n\n# current frame counter\ncount = 0\nrect_list = []\n\n# Read until video is completed\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n # t0 = time.time()\n count = count + 1\n if ((count % FRAME_SKIP_COUNT) == 0):\n rect_list = box_image(frame)\n # t1 = time.time()\n\n for rects in rect_list:\n x, y, w, h = rects\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 15)\n\n # Display the resulting frame\n cv2.imshow(WINDOW_TITLE,frame)\n\n # Press Q on keyboard to exit\n if cv2.waitKey(1) == ord('q'):\n break\n\n # print(f'Frame {count} Calc Time: {t1-t0}')\n\n # Break the loop\n else:\n break\n\n# When everything done, release the video capture object\ncap.release()\n\n# Closes all the frames\ncv2.destroyAllWindows()\n","sub_path":"convert_video.py","file_name":"convert_video.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"348715687","text":"def captcha_sum(captcha):\n # define sum and set it to nada\n sum = 0\n #define length and half_length - not necessary but makes it look a bit nicer\n length = len(captcha)\n half_length = len(captcha)//2\n\n # add half the string again to the end since it \"wraps\"\n captcha += captcha[0:half_length]\n\n # cycle through string checking for dupes\n index = 0\n while index < length:\n if captcha[index] == captcha[index + half_length]:\n # add to sum if it's a dupe\n sum += int(captcha[index])\n index = index + 1\n\n return sum\n","sub_path":"1/1-2.py","file_name":"1-2.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"560553016","text":"import tkinter as tk\nimport numpy as np\nfrom pacman_utils import add\n\n\nclass PacManGraphics:\n\tdef __init__(self, root, maze, width=640, height=480):\n\t\tself.root = root\n\t\tself.maze = maze\n\t\tself.canvas = None\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.scoreboard = None\n\t\tself.draw_graphics()\n\t\t\n\tdef draw_graphics(self):\n\t\tcanvas = tk.Canvas(self.root, width=self.width, height=self.height)\n\t\tdx = self.width / (self.maze.n_columns - 1)\n\t\tdy = self.height / (self.maze.n_rows - 1)\n\t\tself.canvas = Canvas(canvas, dx, dy)\n\t\tcanvas.pack()\n\t\tself.add_background()\n\t\tself.draw_maze()\n\t\tself.create_scoreboard()\n\n\tdef add_background(self, color='black'):\n\t\tself.canvas.canvas.create_rectangle(0, 0, self.width, self.height, outline=color, fill=color)\n\n\tdef draw_maze(self):\n\t\tfor item in self.maze.get_items():\n\t\t\tif item is not None: item.draw_item(self.canvas)\n\n\tdef create_scoreboard(self):\n\t\tx = self.canvas.dx\n\t\ty = self.height - self.canvas.dy\n\t\tself.scoreboard = self.canvas.canvas.create_text(x, y, fill='white', anchor='w', text='Score: 0')\n\n\tdef update_scoreboard(self, score):\n\t\tself.canvas.canvas.itemconfigure(self.scoreboard, text='Score: ' + str(score))\n\n\tdef start(self):\n\t\tself.root.mainloop()\n\n\tdef quit(self):\n\t\tself.canvas.canvas.destroy()\n\t\tself.root.quit()\n\n\nclass Canvas:\n\tdef __init__(self, canvas, dx, dy):\n\t\tself.canvas = canvas\n\t\tself.dx = dx\n\t\tself.dy = dy\n\n\nclass PacMan:\n\tcolor = 'yellow'\n\tr = 0.5\n\n\tdef __init__(self, position):\n\t\tself.position = position\n\t\tself.canvas = None\n\t\tself.item = None\n\t\tself.direction = None\n\n\tdef draw_item(self, canvas):\n\t\tself.canvas = canvas\n\t\tcanvas_coordinates = get_canvas_coordinates(canvas, self.position)\n\t\tradius = self.r * min(canvas.dx, canvas.dy)\n\t\tself.item = create_circle(canvas.canvas, canvas_coordinates, radius, self.color)\n\n\tdef move_item(self, displacement):\n\t\tself.direction = tuple(np.sign(x) for x in displacement) # This attribute is needed for class ChaseGhost\n\t\tself.position = add(self.position, displacement)\n\t\tself.canvas.canvas.move(self.item, *get_canvas_displacement(self.canvas, displacement))\n\n\nclass Ghost:\n\tdef __init__(self, position, color):\n\t\tself.position = self.intial_position = position\n\t\tself.color = color\n\t\tself.item = None\n\t\tself.direction = None\n\n\tdef draw_item(self, canvas):\n\t\tself.item = GhostItem(self.position, canvas, self.color)\n\n\tdef move_item(self, displacement):\n\t\tself.direction = tuple(np.sign(x) for x in displacement) # This attribute is needed for class ChaseGhost\n\t\tself.position = add(self.position, displacement)\n\t\tself.item.move_item(displacement)\n\n\tdef move_to_cave(self):\n\t\tself.position = self.intial_position\n\t\tself.item.move_to_initial_position(self.position)\n\n\tdef change_color(self, color):\n\t\tself.item.set_color(color)\n\n\nclass GhostItem:\n\twidth = 0.9\n\theight = 0.9\n\teye_radius = 4\n\tpupil_radius = 2\n\tscared_color = 'blue'\n\n\tdef __init__(self, position, canvas, color):\n\t\tself.canvas = canvas\n\t\tself.color = color\n\t\tself.body = self.draw_body(position)\n\t\tself.eyes = self.draw_eyes(position)\n\n\tdef draw_body(self, position):\n\t\twidth = self.width * min(self.canvas.dx, self.canvas.dy)\n\t\theight = self.height * min(self.canvas.dx, self.canvas.dy)\n\t\treturn create_rectange(self.canvas.canvas, get_canvas_coordinates(self.canvas, position), width, height, self.color)\n\n\tdef draw_eyes(self, position):\n\t\tcanvas_coordinates = get_canvas_coordinates(self.canvas, position)\n\t\tleft_eye_position = canvas_coordinates[0] - 0.2 * self.canvas.dx, canvas_coordinates[1] - 0.15 * self.canvas.dy\n\t\tright_eye_position = canvas_coordinates[0] + 0.2 * self.canvas.dx, canvas_coordinates[1] - 0.15 * self.canvas.dy\n\t\t# eye_radius = self.eye_radius * min(self.canvas.dx, self.canvas.dy)\n\t\t# pupil_radius = self.pupil_radius * min(self.canvas.dx, self.canvas.dy)\n\t\tleft_eye = create_circle(self.canvas.canvas, left_eye_position, self.eye_radius, 'white')\n\t\tleft_pupil = create_circle(self.canvas.canvas, left_eye_position, self.pupil_radius, 'black')\n\t\tright_eye = create_circle(self.canvas.canvas, right_eye_position, self.eye_radius, 'white')\n\t\tright_pupil = create_circle(self.canvas.canvas, right_eye_position, self.pupil_radius, 'black')\n\t\treturn left_eye, right_eye, left_pupil, right_pupil\n\n\tdef set_color(self, color):\n\t\tself.color = color\n\t\tself.canvas.canvas.itemconfigure(self.body, outline=color, fill=color)\n\n\tdef move_item(self, displacement):\n\t\tcanvas_displacement = get_canvas_displacement(self.canvas, displacement)\n\t\tfor i in [self.body, *self.eyes]:\n\t\t\tself.canvas.canvas.move(i, *canvas_displacement)\n\t\t\tself.canvas.canvas.lift(i)\n\n\tdef move_to_initial_position(self, position):\n\t\tfor i in [self.body, *self.eyes]:\n\t\t\tself.canvas.canvas.delete(i)\n\t\tself.body = self.draw_body(position)\n\t\tself.eyes = self.draw_eyes(position)\n\n\nclass Wall:\n\tcolor = 'blue'\n\twidth = 10\n\n\tdef __init__(self, position, extensions):\n\t\tself.position = position\n\t\tself.extensions = extensions\n\n\tdef draw_item(self, canvas):\n\t\t# width = self.width * min(canvas.dx, canvas.dy)\n\t\tx0, y0 = get_canvas_coordinates(canvas, self.position)\n\t\tfor position in self.extensions:\n\t\t\tx1, y1 = get_canvas_coordinates(canvas, position)\n\t\t\tif y0 == y1:\n\t\t\t\t# The wall segment is horizontal\n\t\t\t\tstart = x0 - self.width / 2, y0\n\t\t\t\tstop = x1 + self.width / 2, y1\n\t\t\telse:\n\t\t\t\t# The wall segment is vertical\n\t\t\t\tstart = x0, y0 - self.width / 2\n\t\t\t\tstop = x1, y1 + self.width / 2\n\t\t\tcanvas.canvas.create_line(start, stop, fill=self.color, width=self.width)\n\n\nclass Food:\n\tcolor = 'white'\n\tr = 0.1\n\n\tdef __init__(self, position):\n\t\tself.position = position\n\t\tself.canvas = None\n\t\tself.item = None\n\n\tdef draw_item(self, canvas):\n\t\tself.canvas = canvas\n\t\tcanvas_coordinates = get_canvas_coordinates(canvas, self.position)\n\t\tradius = self.r * min(canvas.dx, canvas.dy)\n\t\tself.item = create_circle(canvas.canvas, canvas_coordinates, radius, self.color)\n\n\tdef delete_item(self):\n\t\tself.canvas.canvas.delete(self.item)\n\t\tself.item = None\n\n\nclass PowerPellet:\n\tcolor = 'white'\n\tr = 0.2\n\n\tdef __init__(self, position):\n\t\tself.position = position\n\t\tself.canvas = None\n\t\tself.item = None\n\n\tdef draw_item(self, canvas):\n\t\tself.canvas = canvas\n\t\tcanvas_coordinates = get_canvas_coordinates(canvas, self.position)\n\t\tradius = self.r * min(canvas.dx, canvas.dy)\n\t\tself.item = create_circle(canvas.canvas, canvas_coordinates, radius, self.color)\n\n\tdef delete_item(self):\n\t\tself.canvas.canvas.delete(self.item)\n\t\tself.item = None\n\n\ndef get_canvas_displacement(canvas, displacement):\n\treturn displacement[0] * canvas.dx, displacement[1] * canvas.dy\n\n\ndef get_canvas_coordinates(canvas, position):\n\tx, y = position\n\treturn x * canvas.dx, y * canvas.dy\n\n\ndef create_circle(canvas, center_coordinates, r, outline_color, fill_color=None):\n\tx, y = center_coordinates\n\tx0, x1 = x - r - 1, x + r\n\ty0, y1 = y - r - 1, y + r\n\tif fill_color == None: \n\t\tfill_color = outline_color\n\treturn canvas.create_oval(x0, y0, x1, y1, outline=outline_color, fill=fill_color)\n\n\ndef create_rectange(canvas, center_coordinates, width, height, outline_color, fill_color=None):\n\tif fill_color == None: \n\t\tfill_color = outline_color\n\treturn canvas.create_rectangle(get_coorner_coordinates(center_coordinates, width, height), outline=outline_color, fill=fill_color)\n\n\ndef get_coorner_coordinates(center_coordinates, width, height):\n\tx, y = center_coordinates\n\tx0, y0 = x - width / 2, y + height / 2\n\tx1, y1 = x + width / 2, y - height / 2\n\treturn x0, y0, x1, y1\n","sub_path":"pacman/pacman_graphics.py","file_name":"pacman_graphics.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"345857328","text":"import json\nfrom io import BytesIO\n\nfrom tests.base_test import BaseTest\nfrom app import db\nfrom app.models import Icon, Category, ThrivType\nfrom app.resources.schema import CategorySchema, IconSchema, ThrivTypeSchema\n\n\nclass TestIcons(BaseTest):\n def test_list_category_icons(self):\n i1 = Icon(name=\"Happy Coconuts\")\n i2 = Icon(name=\"Fly on Strings\")\n i3 = Icon(name=\"Between two Swallows\")\n i4 = Icon(name=\"otherwise unladen\")\n db.session.add_all([i1, i2, i3, i4])\n db.session.commit()\n rv = self.app.get('/api/icon', content_type=\"application/json\")\n self.assertSuccess(rv)\n response = json.loads(rv.get_data(as_text=True))\n self.assertEqual(4, len(response))\n\n def test_update_icon(self):\n i = Icon(name=\"Happy Coconuts\")\n db.session.add(i)\n db.session.commit()\n i.name = \"Happier Coconuts\"\n rv = self.app.put(\n '/api/icon/%i' % i.id,\n data=json.dumps(IconSchema().dump(i).data),\n content_type=\"application/json\")\n self.assertSuccess(rv)\n response = json.loads(rv.get_data(as_text=True))\n self.assertEqual(\"Happier Coconuts\", i.name)\n\n def test_upload_icon(self):\n i = {\"name\": \"Happy Coconuts\"}\n rv = self.app.post('/api/icon', data=json.dumps(i), content_type=\"application/json\")\n self.assertSuccess(rv)\n response = json.loads(rv.get_data(as_text=True))\n icon_id = response[\"id\"]\n\n rv = self.app.put(f'/api/icon/{icon_id}', data=dict(image=(BytesIO(b\"hi everyone\"), 'test.svg'), ))\n self.assertSuccess(rv)\n data = json.loads(rv.get_data(as_text=True))\n # self.assertEqual(\n # \"https://s3.amazonaws.com/edplatform-ithriv-test-bucket/\"\n # f\"ithriv/icon/{icon_id}.svg\", data[\"url\"])\n self.assertEqual(data['name'], i['name'])\n\n def test_set_category_icon(self):\n category = Category(\n name=\"City Museum\",\n description=\"A wickedly cool amazing place in St Louis\",\n color=\"blue\")\n db.session.add(category)\n icon = Icon(name=\"Cool Places\")\n db.session.add(icon)\n db.session.commit()\n category.icon_id = icon.id\n rv = self.app.post(\n '/api/category',\n data=json.dumps(CategorySchema().dump(category).data),\n content_type=\"application/json\")\n self.assertSuccess(rv)\n response = json.loads(rv.get_data(as_text=True))\n self.assertEqual(icon.id, response[\"icon_id\"])\n self.assertEqual(\"Cool Places\", response[\"icon\"][\"name\"])\n\n def test_set_type_icon(self):\n thrivtype = ThrivType(name=\"Wickedly Cool\")\n db.session.add(thrivtype)\n icon = Icon(name=\"Cool Places\")\n db.session.add(icon)\n db.session.commit()\n thrivtype.icon_id = icon.id\n rv = self.app.post(\n '/api/category',\n data=json.dumps(ThrivTypeSchema().dump(thrivtype).data),\n content_type=\"application/json\")\n self.assertSuccess(rv)\n response = json.loads(rv.get_data(as_text=True))\n self.assertEqual(icon.id, response[\"icon_id\"])\n self.assertEqual(\"Cool Places\", response[\"icon\"][\"name\"])\n","sub_path":"tests/test_icons.py","file_name":"test_icons.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"535589780","text":"\"\"\"\nAutor: GAÑAN, Tomas // CERIONI, Enrique \nEjercicio 3: Torres de Hanoi\n\"\"\"\n\n# En el siguiente algoritmo se hace uso de la recursividad\n\ndef torresHanoi(n, torre1='1', torre2='2', torre3='3'): \n if n > 0:\n torresHanoi(n-1, torre1,torre3,torre2)\n print('El disco:', n, 'se mueve de la torre:', torre1, 'a la torre:', torre3)\n torresHanoi(n-1, torre2,torre1,torre3)\n \ndiscos = int(input('Por favor, ingrese el numero de discos: '))\ntorresHanoi(discos)\n\n# n = 1 siempre va a ser el disco mas chico\n","sub_path":"TP N°2/ej3b.py","file_name":"ej3b.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"478821204","text":"from random import randrange\n\nprint('Vítej ve hře Black Jack pro jednoho hráče. Cílem hry je mít více bodů, než krupiér.')\nprint('Každé kolo máš možnost si \"líznout\" kartu s náhodnou hodnotou 2-10. Následně hraje krupiér.')\nprint('Pokud překročíš 21 bodů, prohráváš.')\nprint()\n\nbody = 0\nkrupier = 0\n\nprint('Hra proti krupiérovi.')\nprint()\n\nwhile True:\n chce = input('Chceš si líznout kartu? (ano/ne) ')\n\n # Líže hráč i krupiér\n if chce.casefold() == 'ano' and krupier < 17:\n karta = randrange(2, 11)\n body = body + karta\n print('Máš ' + str(body) + ' bodů.')\n\n karta = randrange(2, 11)\n krupier = krupier + karta\n print('Krupiér má ' + str(krupier) + ' bodů.')\n\n print()\n\n # Líže jen hráč\n if chce.casefold() == 'ano' and krupier > 16:\n print('Krupiér končí na ' + str(krupier) + ' bodech.')\n print('Máš ' + str(body) + ' bodů.')\n \n while True:\n chce = input('Chceš si líznout kartu? (ano/ne) ')\n if chce.casefold() == 'ano':\n karta = randrange(2, 11)\n body = body + karta\n print('Máš ' + str(body) + ' bodů.')\n else:\n break\n if body > 21:\n break\n\n # Líže jen krupiér\n if chce.casefold() != 'ano' and krupier < 17:\n karta = randrange(2, 11)\n krupier = krupier + karta\n print('Končíš na ' + str(body) + ' bodech.')\n print('Krupiér má ' + str(krupier) + ' bodů.')\n\n while True:\n input('Pokračuj stisknutím Enter.')\n karta = randrange(2, 11)\n krupier = krupier + karta\n print('Krupiér má ' + str(krupier) + ' bodů.')\n if krupier > 16:\n break\n\n # Nelíže nikdo\n if chce.casefold() != 'ano' and krupier > 16:\n break\n\n if body > 21:\n break\n if krupier > 21:\n break\n\nprint('\\n' + 'Konečný výsledek je:' + '\\n' 'Ty: ' + str(body) + ' bodů' + '\\n' 'Krupiér: ' + str(krupier) + ' bodů' )\nvitez = max(body, krupier)\nprint()\nif body > 21 and krupier > 21:\n print('Je to nerozhodně!')\nif body > 21 and krupier < 22:\n print('Prohrál jsi!')\nif body < 22 and krupier > 21:\n print('Vyhrál jsi!')\nif body == krupier:\n print('Je to nerozhodně!')\nelif body < 22 and krupier < 22:\n if vitez == body:\n print('Vyhrál jsi!')\n else:\n print('Prohrál jsi!')\nprint()\n\ninput('Ukončete stisknutím libovolné klávesy.')","sub_path":"blackjack_single_player.py","file_name":"blackjack_single_player.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"602357907","text":"#!/usr/bin/env python\n\n\"\"\" Read data from mongo collection and save it's relational model to csvs \"\"\"\n\n__author__ = \"Yaroslav Litvinov\"\n__copyright__ = \"Copyright 2016, Rackspace Inc.\"\n__email__ = \"yaroslav.litvinov@rackspace.com\"\n\nimport pprint\nimport os\nimport sys\nimport json\nimport argparse\nimport logging\nfrom logging import getLogger\nimport configparser\nfrom collections import namedtuple\nfrom sets import Set\n# profiling\nfrom pstats import Stats\nfrom cProfile import Profile\n# for data input\nfrom mongo_reader.reader import MongoReader\nfrom mongo_reader.reader import mongo_reader_from_settings\n# modules mostly used by data output functions\nfrom mongo_schema.schema_engine import SchemaEngine\nfrom mongo_schema.schema_engine import create_tables_load_bson_data\nfrom mongo_schema.schema_engine import log_table_errors\nfrom gizer.opcsv import CsvWriteManager\nfrom gizer.opcsv import NULLVAL\nfrom gizer.opcreate import generate_drop_table_statement\nfrom gizer.opcreate import generate_create_table_statement\nfrom gizer.opcreate import generate_create_index_statement\nfrom gizer.opcreate import INDEX_ID_IDXS\nfrom gizer.opinsert import table_rows_list\nfrom gizer.opinsert import ENCODE_ONLY\nfrom gizer.opmultiprocessing import FastQueueProcessor\nfrom gizer.opconfig import mongo_settings_from_config\nfrom gizer.etl_mongo_reader import EtlMongoReader\n\n\nCSV_CHUNK_SIZE = 1024 * 1024 * 100 # 100MB\nETL_PROCESS_NUMBER = 8\nETL_QUEUE_SIZE = ETL_PROCESS_NUMBER*2\n\nTablesToSave = namedtuple('TablesToSave', ['rec_id', 'rows', 'errors'])\n\ndef create_table(sqltable, psql_schema_name, table_prefix):\n \"\"\" get drop / create ddl statements \"\"\"\n drop_t = generate_drop_table_statement(sqltable, psql_schema_name,\n table_prefix)\n create_t = generate_create_table_statement(sqltable, psql_schema_name,\n table_prefix)\n create_i = generate_create_index_statement(sqltable, \n psql_schema_name,\n table_prefix,\n INDEX_ID_IDXS)\n return drop_t + '\\n' + create_t + '\\n' + create_i + '\\n'\n\ndef merge_dicts(store, append):\n \"\"\" merge two dicts, return merged dict. \"\"\"\n for index_key, index_val in append.iteritems():\n cached_val = 0\n if index_key in store:\n cached_val = store[index_key]\n store[index_key] = index_val + cached_val\n return store\n\ndef save_ddl_create_statements(create_statements_file,\n schema_engine,\n psql_schema_name,\n table_prefix):\n \"\"\" save create table statements to file \"\"\"\n ddls = {}\n if not psql_schema_name:\n psql_schema_name = ''\n if not table_prefix:\n table_prefix = ''\n sqltables = create_tables_load_bson_data(schema_engine, None).tables\n for tablename, sqltable in sqltables.iteritems():\n ddls[tablename] = create_table(sqltable, psql_schema_name,\n table_prefix)\n for table_name in ddls:\n create_query = ddls[table_name]\n create_statements_file.write(create_query)\n\ndef save_csvs(csm, tables_rows):\n \"\"\" write relational tables to csv files.\n tables_rows -- dict {table_name: [rows]} of tables of rows to save\"\"\"\n written = {}\n for table_name in tables_rows:\n written[table_name] = csm.write_csv(table_name,\n tables_rows[table_name])\n return written\n\ndef async_worker_handle_mongo_rec(schema_engines, rec_collection):\n \"\"\" function intended to call by FastQueueProcessor.\n process mongo record / bson data in separate process.\n schema_engine -- SchemaEngine\n rec_collection - tuble(bson record, collection name)\"\"\"\n rows_as_dict = {}\n collection = rec_collection[1]\n rec = rec_collection[0]\n schema_engine = schema_engines[collection]\n tables_obj = create_tables_load_bson_data(schema_engine, [rec])\n for table_name, table in tables_obj.tables.iteritems():\n rows = table_rows_list(table, ENCODE_ONLY, null_value=NULLVAL)\n rows_as_dict[table_name] = rows\n return TablesToSave(rec_id=tables_obj.rec_id(),\n rows=rows_as_dict,\n errors=tables_obj.errors)\n\n# Fast queue helpers\n\ndef getargs():\n \"\"\" get args from cmdline \"\"\"\n default_request = '{}'\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config-file\", action=\"store\",\n help=\"Config file with settings\",\n type=file, required=True)\n parser.add_argument(\"-cn\", \"--collection-name\",\n help=\"Mongo collection name \", type=str, required=True)\n parser.add_argument(\"-js-request\",\n help='Mongo db search request in json format. \\\ndefault=%s' % (default_request), type=str)\n parser.add_argument(\"-psql-table-prefix\", help=\"\", type=str)\n parser.add_argument(\"--ddl-statements-file\",\n help=\"File to save create table statements\",\n type=argparse.FileType('w'), required=False)\n parser.add_argument(\"-stats-file\",\n help=\"File to write written record counts\",\n type=argparse.FileType('w'))\n parser.add_argument(\"--csv-path\",\n help=\"base path for results\",\n type=str, required=True)\n\n args = parser.parse_args()\n if args.js_request is None:\n args.js_request = default_request\n\n return args\n\ndef print_profiler_stats(profiler):\n \"\"\" profiling results \"\"\"\n profiler.disable()\n state_printer = Stats(profiler, stream=sys.stderr).sort_stats('cumulative')\n state_printer.print_stats()\n\ndef print_etl_stats(errors, all_written, etl_recs_count):\n \"\"\" etl summary \"\"\"\n ppinter = pprint.PrettyPrinter(indent=4)\n log_table_errors(\"etl errors:\", errors)\n if len(all_written):\n getLogger(__name__).info(\"written: \" + ppinter.pformat(all_written))\n else:\n getLogger(__name__).warning(\"Nothing written!\")\n getLogger(__name__).info(\"Expected Etl records count = %d\" % etl_recs_count)\n\ndef save_etl_stats(out_file, all_written):\n \"\"\" save list of tables with processed counts \"\"\"\n if out_file:\n for name, value in all_written.iteritems():\n out_file.write(name + \" \" + str(value) + \"\\n\")\n\ndef main():\n \"\"\" main \"\"\"\n #for debugging purposes\n #profiler = Profile() # profiling\n #profiler.enable()\n\n args = getargs()\n\n config = configparser.ConfigParser()\n config.read_file(args.config_file)\n\n schema_name = config['psql']['psql-schema-name']\n schemas_dir = config['misc']['schemas-dir']\n schema_path = os.path.join(schemas_dir, args.collection_name + '.json')\n schema_file = open(schema_path, 'r')\n\n mongo_settings = mongo_settings_from_config(config, 'mongo')\n\n mongo_reader = mongo_reader_from_settings(mongo_settings,\n args.collection_name,\n json.loads(args.js_request))\n schema_engine = SchemaEngine(args.collection_name, [json.load(schema_file)])\n table_names = create_tables_load_bson_data(schema_engine, None).tables.keys()\n csm = CsvWriteManager(table_names, args.csv_path, CSV_CHUNK_SIZE)\n\n etl_mongo_reader = EtlMongoReader(ETL_PROCESS_NUMBER,\n ETL_QUEUE_SIZE,\n async_worker_handle_mongo_rec,\n #1st worker param\n {args.collection_name: schema_engine}, \n {args.collection_name: mongo_reader})\n etl_mongo_reader.execute_query(args.collection_name,\n json.loads(args.js_request))\n\n getLogger(__name__).info(\"Connecting to mongo server \" + mongo_settings.host)\n received_rec_ids = Set([])\n errors = {}\n all_written = {}\n while True:\n tables_to_save = etl_mongo_reader.next()\n if not tables_to_save:\n break\n # don't process duplicates that can be accidently returned by transp\n if tables_to_save.rec_id not in received_rec_ids:\n received_rec_ids.add(tables_to_save.rec_id)\n all_written = merge_dicts(all_written,\n save_csvs(csm, tables_to_save.rows))\n errors = merge_dicts(errors, tables_to_save.errors)\n else:\n getLogger(__name__).warning(\"Skip duplicated rec_id=%s\",\n tables_to_save.rec_id)\n \n if args.ddl_statements_file:\n save_ddl_create_statements(args.ddl_statements_file,\n schema_engine,\n schema_name,\n args.psql_table_prefix)\n # save csv files\n csm.finalize()\n\n #for debugging purposes\n #print_profiler_stats(profiler)\n print_etl_stats(errors, all_written, etl_mongo_reader.etl_recs_count)\n save_etl_stats(args.stats_file, all_written)\n\n exit_code = 0\n if etl_mongo_reader.current_mongo_reader.failed or \\\n etl_mongo_reader.fast_queue.error:\n exit_code = 1\n del etl_mongo_reader\n exit(exit_code)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n stream=sys.stdout,\n format='%(asctime)s %(levelname)-8s %(message)s')\n main()\n","sub_path":"mongo_reader.py","file_name":"mongo_reader.py","file_ext":"py","file_size_in_byte":9627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"116519366","text":"#!/usr/bin/python\nimport os.path\nfrom argparse import ArgumentParser\nfrom multiprocessing import Pool\nfrom superman.file_io import parse_spectrum, write_opus, write_rruff\n\ntry:\n from itertools import imap\nexcept ImportError:\n imap = map\n\n\ndef _parse(infile):\n return infile, parse_spectrum(infile)\n\n\ndef main():\n writers = dict(opus=write_opus, rruff=write_rruff)\n extensions = dict(opus='.opus', rruff='.txt')\n ap = ArgumentParser()\n ap.add_argument('-f', '--fmt', choices=writers, help='Format to convert to.')\n ap.add_argument('-o', '--outdir', default='.', help='Output directory.')\n ap.add_argument('--procs', type=int, default=1, help='# of processes to use.')\n ap.add_argument('files', nargs='+', help='Input file(s).')\n args = ap.parse_args()\n\n if args.procs > 1:\n imap = Pool(args.procs).imap_unordered\n\n writer = writers[args.fmt]\n ext = extensions[args.fmt]\n for infile, traj in imap(_parse, args.files):\n # Note: We append instead the new file extension (instead of replacing),\n # because the original extension can carry useful information.\n outfile = os.path.join(args.outdir, os.path.basename(infile)) + ext\n writer(outfile, traj, 'Converted spectrum from file: ' + infile)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/convert_spectra.py","file_name":"convert_spectra.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"498015557","text":"from collections import namedtuple\nimport tensorflow as tf\n\n_AccumulatorTuple = namedtuple(\n \"AccumulatorTuple\", ['first_order_moment_denom_accum', 'first_order_moment_num_accum', 'second_order_moment_denom_accum', 'second_order_moment_num_accum'])\n\n\nclass GenerativeLearningEM:\n\n def __init__(self, spn, online=True, reset_per_epoch=False, with_labels=False, with_sequence_lens=False):\n \"\"\"\n Utility class for learning SPNs in generative settings. The inner loop does not apply to (x_i, y_i) pairs,\n but simply to x_i. Will use ``libspn_keras.optimizers.OnlineExpectationMaximization`` as the optimizer.\n\n Args:\n spn: An instance of ``tf.keras.Model`` representing the SPN to train\n \"\"\"\n self._spn = spn\n self._trainable_variable_copies = [_copy_variable(v) for v in self._spn.trainable_variables]\n self._trainable_variables_initial_state = [_copy_variable(v) for v in self._spn.trainable_variables]\n self._online = online\n self._reset_per_epoch = reset_per_epoch\n self._with_labels = with_labels\n self._with_sequence_lens = with_sequence_lens\n\n @tf.function\n def _train_one_step(self, train_batch):\n \"\"\"\n Trains one step for a ``keras.Model``\n\n Args:\n x: A batch of samples\n\n Returns:\n The log marginal likelihood\n \"\"\"\n with tf.GradientTape() as tape:\n if self._with_labels:\n if self._with_sequence_lens:\n x, seq_lens, labels = train_batch\n log_likelihood = self._spn([x, seq_lens])\n else:\n x, labels = train_batch\n log_likelihood = self._spn(x)\n elif self._with_sequence_lens:\n x, seq_lens = train_batch\n log_likelihood = self._spn([x, seq_lens])\n else:\n x = train_batch[0]\n log_likelihood = self._spn(x)\n\n grads = tape.gradient(log_likelihood, self._spn.trainable_variables)\n\n vars_to_assign = self._spn.trainable_variables if self._online else self._trainable_variable_copies\n\n for v, g in zip(vars_to_assign, grads):\n v.assign(v + g)\n\n return log_likelihood\n\n def fit(self, train_data: tf.data.Dataset, epochs, steps_per_epoch=None):\n \"\"\"\n Fits the parameters of the SPN\n\n Args:\n train_data: An instance of ``tf.data.Dataset`` from which we get batches of :math:`x_i`\n steps_per_epoch: Steps per epoch\n \"\"\"\n for epoch in range(epochs):\n log_probability_x = 0.0\n samples = 0\n step = 0\n for train_batch in train_data:\n log_probability_x += tf.reduce_sum(self._train_one_step(train_batch))\n samples += tf.shape(train_batch[0])[0]\n step += 1\n if steps_per_epoch is not None and step == steps_per_epoch:\n break\n if not self._online:\n for v, v_copy in zip(self._spn.trainable_variables, self._trainable_variable_copies):\n v.assign(v_copy)\n\n if self._reset_per_epoch:\n for v_initial, v_copy in zip(\n self._trainable_variables_initial_state, self._trainable_variable_copies):\n v_copy.assign(v_initial)\n\n log_probability_x /= tf.cast(samples, tf.float32)\n tf.print('Epoch', epoch, ': mean log(p(X)) =', log_probability_x)\n\n def evaluate(self, test_dataset):\n log_marginal_likelihood = 0.0\n samples = 0\n for test_batch in test_dataset:\n samples += tf.shape(test_batch[0])[0]\n log_marginal_likelihood += tf.reduce_sum(self._spn(test_batch))\n log_marginal_likelihood /= tf.cast(samples, tf.float32)\n tf.print(\"Eval: mean log(p(X)) =\", log_marginal_likelihood)\n return log_marginal_likelihood\n\n\ndef _copy_variable(v):\n return tf.Variable(\n trainable=v.trainable, name=v.name.rstrip(':0123456789') + \"_offline_em_copy\", dtype=v.dtype, shape=v.shape,\n initial_value=tf.identity(v)\n )\n\n","sub_path":"libspn_keras/utils/generative_learning_em.py","file_name":"generative_learning_em.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558550271","text":"# -*- coding: utf-8 -*-\n\nimport smtplib\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\n \n\n'''\nHow to send an email with Gmail as provider using Python?\nhttps://stackoverflow.com/questions/10147455/how-to-send-an-email-with-gmail-as-provider-using-python\n\nHow to Send Emails with Gmail using Python\nhttps://stackabuse.com/how-to-send-emails-with-gmail-using-python/\n\npython学习通过smtp发送电子邮件\nhttps://blog.csdn.net/pengzhi5966885/article/details/74701442\n\n飄逸的python - 發送帶各種類型附件的郵件\nhttps://blog.csdn.net/handsomekang/article/details/9811355\n\n'''\n\n\n''' ------------------------------------- Mail參數設定 ------------------------------------- '''\nSMTP_Server = \"smtp.gmail.com\" # 設定SMTP伺服器位址(Gmail: \"smtp.gmail.com\")\n\nmail_user = \"Your mail address\" # 寄件人信箱帳號\nmail_password = \"Your mail password\" # 寄件人信箱密碼\nmail_port = 465 # 465 Port 必須要用SSL加密來進行\nsent_from = mail_user # 信件寄送來源位址\nmail_to = \"Send to mail address\" # 信件寄送目的位址\n\n''' ---------------------------------------------------------------------------------------- '''\n\n\n''' ------------------------------------- Mail內容設定 ------------------------------------- '''\n# -- 建立一個訊息主體 --\nmsg = MIMEMultipart()\n\nmsg['Subject'] = \"Python SMTP GMAIL 寄送信件測試\" # 信箱主題\nmsg['From'] = sent_from # 來源位址\nmsg['To'] = mail_to # 目的位址\n\n# 信件內容(主要內容請打在這)\ncontent = MIMEText(\"Python 寄送信件測試.\")\n\n# 信件附件清單\nfile_list = {'test.txt', 'test2.txt'}\n\n''' ---------------------------------------------------------------------------------------- '''\n\n\n# 取得當前所在位置\ncurrentPath = os.getcwd()\n# print('currentPath: ' + currentPath)\n\n\n# 檢查確認是否有附件,如果有就串接到訊息主體去\ndef check_filelist():\n\n if len(file_list) != 0:\n\n for read_item in file_list:\n\n print(\"附件檔名: {}\".format(read_item))\n\n # https://www.w3schools.com/python/python_file_handling.asp\n # http://www.runoob.com/python/python-func-open.html\n # open('檔名','模式') 'r' 代表指讀取檔案內容,'b'代表為二元數值(binary)) , 't' 代表為文字\n part = MIMEApplication(\n open(os.path.join(currentPath , read_item), 'rt').read())\n part.add_header('Content-Disposition',\n 'attachment', filename=read_item)\n\n msg.attach(part) # 訊息主題串接附件\n\n\nif __name__ == '__main__':\n\n try:\n\n # 訊息主體串接信件內容\n msg.attach(content)\n \n # 檢查是否有帶附件\n check_filelist() \n\n # using a Secure Connection , for 465 port (Google)\n server = smtplib.SMTP_SSL(host = SMTP_Server,port = mail_port)\n\n server.ehlo() # 詢問信箱SMTP伺服器連接\n\n server.login(mail_user, mail_password) # 登入信箱服務伺服器\n\n server.sendmail(sent_from, mail_to, msg.as_string()) # 傳送信件\n\n server.close() # 關閉信箱服務伺服器\n\n print('已傳送信件至 {} !!'.format(mail_to))\n\n except Exception as exception:\n\n print('傳送信件至 {} 失敗...!'.format(mail_to))\n\n print(\"{}\".format(exception))\n","sub_path":"SMTPDemo_sample.py","file_name":"SMTPDemo_sample.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"402285595","text":"\nfrom flask import Blueprint, json, g, make_response\nfrom os import path\nimport sys\n\n# add the parent directory for in-project imports\nbase_dir = path.abspath(path.join(path.dirname(path.abspath(__file__)), '..'))\nif base_dir not in sys.path:\n sys.path.append(base_dir)\nfrom lib import query\nfrom lib import converters\nfrom errors import errors\n\n\nhousing = Blueprint('housing_blueprint', __name__)\nTABLE = 'housing_t'\n\n\"\"\"\nDictionary of querystring parameters tied to their column names and a function\nto form a WHERE statement.\n\"\"\"\nroom_attributes = {\n 'room_location_area': {\n 'column': 'roomlocationarea',\n 'converter': converters.where_string\n },\n 'residential_area': {\n 'column': 'residentialarea',\n 'converter': converters.where_string\n },\n 'room_location': {\n 'column': 'roomlocation',\n 'converter': converters.where_string\n },\n 'room_location_section': {\n 'column': 'roomlocationsection',\n 'converter': converters.where_string\n },\n 'room_location_floor_suite': {\n 'column': 'roomlocationfloorsuite',\n 'converter': converters.where_string\n },\n 'is_suite': {\n 'column': 'issuite',\n 'converter': converters.where_bool\n },\n 'floor_suite_webdescription': {\n 'column': 'floorsuitewebdescription',\n 'converter': converters.where_string\n },\n 'room': {\n 'column': 'room',\n 'converter': converters.where_string\n },\n 'room_area': {\n 'column': 'roomarea',\n 'converter': converters.where_int\n },\n 'room_space': {\n 'column': 'roomspace',\n 'converter': converters.where_string\n },\n 'room_type': {\n 'column': 'roomtype',\n 'converter': converters.where_string\n },\n 'ay_12_13_rs_status': {\n 'column': 'ay1213rsstatus',\n 'converter': converters.where_string\n },\n 'point_value': {\n 'column': 'pointvalue',\n 'converter': converters.where_double\n },\n 'lottery_number': {\n 'column': 'lotterynumber',\n 'converter': converters.where_int\n },\n}\n\n\n@housing.route('/rooms/options/')\n@errors.catch_error\ndef options(attr):\n \"\"\"\n Returns all options found in the database for this attribute\n\n @param attr: an attribute of the room objects\n \"\"\"\n if attr not in room_attributes:\n raise errors.AppError('INVALID_ATTRIBUTE')\n # strip the dictionary down to the releveant attribute\n relevant_values = {attr: room_attributes[attr]}\n pg_query, values = query.build_query(TABLE, relevant_values)\n g.cursor.execute(pg_query, values)\n results = g.cursor.fetchall()\n if not len(results): # no results, shouldn't be called\n raise errors.AppError('NO_RESULTS')\n return make_response(json.dumps({\n 'results': results,\n 'status': 200\n }), 200)\n\n\n@housing.route('/rooms')\n@housing.route('/rooms/')\n@errors.catch_error\ndef rooms(page=0):\n \"\"\" Returns all rooms that match the given querystring \"\"\"\n pg_query, values = query.build_query(TABLE, room_attributes, page=page)\n g.cursor.execute(pg_query, values)\n results = g.cursor.fetchall()\n if not len(results): # no results\n raise errors.AppError('NO_RESULTS')\n return make_response(json.dumps({\n 'results': results,\n 'status': 200\n }), 200)\n","sub_path":"data/housing/housing.py","file_name":"housing.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"433995325","text":"__author__ = \"Luke Liu\"\n#encoding=\"utf-8\"\n\n# inputs are the social_media words of someone, what we want to do,\n# is by knowing these data, we can predict the gender,the income level,and the age\n\n# so the gender and the income are classification,and the age is regression\nfrom keras import Input\nfrom keras import Model\nfrom keras.optimizers import RMSprop\nfrom keras import layers\n\nvocabulary_size = 50000\nnum_income_groups = 10\n\ninput_text = Input(shape=(None,),dtype='int32',name='media_resource')\n\ninput_text_embedd = layers.Embedding(vocabulary_size,32,input_length=100)(input_text)\nx = layers.Conv1D(128, 5, activation='relu')(input_text_embedd)\nx=layers.MaxPooling1D(5)(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.MaxPooling1D(5)(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.Conv1D(256, 5, activation='relu')(x)\nx = layers.GlobalMaxPooling1D()(x)\nx = layers.Dense(128, activation='relu')(x)\n\nage_pred = layers.Dense(1,name='age')(x)\nincome_pred=layers.Dense(10,activation='softmax',name='income')(x)\ngender_pred=layers.Dense(1,activation='sigmoid',name='gender')(x)\n\nmodel=Model(input_text,[age_pred,income_pred,gender_pred])\nmodel.summary()\n\nmodel.compile(optimizer='rmsprop',\n loss={\n 'age':'mse',\n 'income':'categorical_crossentropy',\n 'gender':'binary_crossentropy'\n },\n loss_weights={'age': 0.25, 'income': 1., 'gender': 10.}\n )\nimport numpy as np\ninput_text = np.random.randint(1,vocabulary_size,size=(5000,100))\n\n# income\nincome_tr=np.random.randint(1,10,size=(5000,10))\n# gender\ngender_tr=np.random.randint(0,2,size=(5000,))\n\n# age\nage_tr=np.random.randint(10,70,size=(5000,))\n\nmodel.fit(input_text,[age_tr,income_tr,gender_tr],\n batch_size=64,\n epochs=10,\n validation_split=0.2)","sub_path":"Keras高级编程实践/Keras实现多输出模型.py","file_name":"Keras实现多输出模型.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"630253930","text":"from selenium import webdriver\n\ndriver = webdriver.Chrome(r\"E:\\chromedriver_win32\\chromedriver.exe\") #指定driver路径\ndriver.get(\"http://www.weather.com.cn/html/province/jiangsu.shtml\") #获取页面\nelement=driver.find_element_by_id(\"forecastID\") #通过forecastID定位元素\nprint(element.text) #打印页面文本内容信息\ncities=element.text.split(\"℃\\n\")\n#print(cities) 调试打印\nlowestcity={} #定义一个字典存放最低气温及城市信息\nlowestcity[\"lowtep\"]=int(cities[0].split(\"\\n\")[1].split(\"℃/\")[0]) #最低温度,默认先取第一个\nlowestcity[\"cities\"]=cities[0].split(\"\\n\")[0] #最低温度的城市\nfor one in cities: #遍历取每个城市元素比较,取出最低温度值及对应的城市\n if int(one.split(\"\\n\")[1].split(\"℃/\")[0]) 0:\n loss = float(v) - float(h)\n noise_induced_loss[k] = loss\n else:\n pass\n\n print (\"Noise induced loss: \")\n print (noise_induced_loss)\n\n with open(\"../variables.json\", \"r+\") as f:\n data = json.load(f)\n data[\"noise_induced_loss\"][\"250\"] = noise_induced_loss[\"250\"]\n data[\"noise_induced_loss\"][\"500\"] = noise_induced_loss[\"500\"]\n data[\"noise_induced_loss\"][\"1000\"] = noise_induced_loss[\"1000\"]\n data[\"noise_induced_loss\"][\"2000\"] = noise_induced_loss[\"2000\"]\n data[\"noise_induced_loss\"][\"4000\"] = noise_induced_loss[\"4000\"]\n data[\"noise_induced_loss\"][\"8000\"] = noise_induced_loss[\"8000\"]\n f.seek(0)\n json.dump(data, f)\n f.truncate()\n\n\n# Show the result\n\ndef show_result(mean, age_related, noise_induced):\n pass\n","sub_path":"Result/calculate_result.py","file_name":"calculate_result.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"241903795","text":"import numpy as np\n\nclass PDPolicy:\n def __init__(self, env, swingup=True, eps_expl=0.00):\n self.K = np.asarray([[10, 2.0]])\n self.eps_expl = eps_expl\n self.env = env\n self.swingup = swingup\n\n def get_action(self, x):\n\n th, thdot = x\n\n\n if th < -np.pi/2 and self.swingup:\n u = [np.sign(thdot) - .1*thdot]\n elif th > np.pi/2 and self.swingup:\n u = [np.sign(thdot) - .1*thdot]\n elif np.random.rand() < self.eps_expl:\n u = self.env.action_space.sample()\n else:\n u = np.dot(-self.K, [th, thdot])\n\n return np.clip(u,self.env.action_space.low,self.env.action_space.high)[0]\n","sub_path":"05-value-approximation/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"317086130","text":"import pandas as pd\nfrom scipy import stats\nfrom trafficintelligence import metadata\n\n\nheadway = pd.read_csv('data.csv')\n\n# Removing cases without hwt\nheadway = headway[headway['site'].isin([\"montcalm-chartwell\", \"montcalm-inverness\", \"montcalm-voie\"])]\nheadway.loc[headway[\"leading_user_type\"].isin([0, 3, 5, 6]), [\"leading_user_type\"]] = 1\n\nheadway = headway.drop(headway.loc[(headway.av==1) & (headway.leading_user_type!=7)].index)\nheadway = headway.drop(headway.loc[headway.hwt_15 > 30].index)\n\ntableLocation = \" \"\ntableD = \"D (k-s statitic)\"\ntableP = \"p-value\"\n\nlocations = [\"montcalm-inverness\", \"montcalm-chartwell\", \"montcalm-voie\"]\n# Going through all the sites (previously selected locations)\nfor site in locations:\n # Compute K-S test\n dist1 = headway.loc[(headway.leading_user_type == 1) & (headway.site == site)][\"hwt_15\"]\n dist2 = headway.loc[(headway.leading_user_type == 7) & (headway.site == site)][\"hwt_15\"]\n D_statistic, p_value = stats.ks_2samp(dist1, dist2)\n # Add values to table\n tableLocation += \" & {}\".format(site)\n tableD += \" & {:.4}\".format(D_statistic)\n tableP += \" & {:.4}\".format(p_value)\n\nprint(tableLocation + \" \\\\\\ \\hline \\n\" + tableD + \" \\\\\\ \\hline \\n\" + tableP + \" \\\\\\ \\hline\")\n","sub_path":"results/performance/headway/ks-test.py","file_name":"ks-test.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444470032","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom datetime import date, datetime\nfrom os import path\nimport time\n\n#Finds and starts webdriver\nURL = 'https://services.recwell.wisc.edu/FacilityOccupancy'\nPATH = './chromedriver.exe'\n\nwhile(1):\n #gets the date and time\n today = date.today()\n now = datetime.now()\n\n d = today.strftime('%m/%d/%y')\n t = now.strftime('%H:%M:%S')\n\n print('Date and time: ', d, ' ' , t )\n\n driver = webdriver.Chrome(executable_path=PATH)\n driver.get(URL)\n\n #parses content\n content = driver.page_source\n soup = BeautifulSoup(content)\n driver.quit()\n\n #find data\n name = soup.findAll(attrs='occupancy-count')[0].find('strong')\n\n #add to pandas, then export to csv\n df = pd.DataFrame({'Date': d, 'Time': t, 'Occupency': [name.text]})\n if not path.exists('data.csv'):\n df.to_csv('data.csv', index=False, encoding='Utf-8')\n else:\n df.to_csv('data.csv', mode='a', header=False, index=False, encoding='Utf-8')\n\n \n time.sleep(900)\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428968202","text":"from django.contrib import admin\nfrom .models import Product, Comments\n\nfrom .models import Review\n\n\nclass ReviewAdmin(admin.ModelAdmin):\n model = Review\n list_display = ('product', 'rating', 'user_name', 'comment', 'pub_date')\n list_filter = ['pub_date', 'user_name']\n search_fields = ['comment']\n\n\nadmin.site.register(Review, ReviewAdmin)\n\nclass ProductAdmin(admin.ModelAdmin):\n list_display = [\"__str__\", \"slug\"]\n class Meta:\n model = Product\n\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(Comments)\n","sub_path":"updatepr/wproject1m/ecommerce2/product/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"368907787","text":"#\r\n# Name:\r\n# File: midB.py\r\n# Date: Fall 2019\r\n\r\nfrom graphics import *\r\n\r\ndef main():\r\n\r\n # set up graphics window \r\n\r\n myTitle = \"Test02b - Fall 2019\"\r\n screenWidth = 401\r\n screenHeight = 401\r\n win = GraphWin(myTitle, screenWidth, screenHeight)\r\n win.setBackground(\"white\")\r\n\r\n # click mouse twice to draw blue rectangle\r\n\r\n\r\n # click mouse once to draw red circle\r\n\r\n \r\n # click anywhere to end\r\n \r\n\r\nmain() \r\n\r\n","sub_path":"program deatails/midterm/midB.py","file_name":"midB.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335381879","text":"# Company: sql_alchemy_base\n# Author: Rob McGinley\n# Date: April 30, 2011\n# Copyright: 2011\n\nfrom backend.rpc.dispatch import register_method\nfrom database_connectivity import DatbaseConnectivity\nfrom user import required_user_perm_dec\nfrom backend.rpc.dispatch import register_method\n\nimport backend.models.submitted_article as submitted_article_model\nimport backend.models.user as user_model\nimport rpc_exceptions\n\n\nclass SubmittedArticle(DatbaseConnectivity):\n \"\"\"\n Wrapper class for common methods to create a new article. Needed so we can easily \n obtain an engine/session\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create session for correct object query/update/create/delete\n \"\"\"\n super(SubmittedArticle, self).__init__()\n \n @required_user_perm_dec\n def createArticle(self, user_dict, params):\n info = params[0]\n \n if self.exists(info['url']):\n raise rpc_exceptions.ArticleSubmissionExists\n \n user = self.session.query(user_model.User).filter_by(\n username=user_dict['username']).first()\n \n new_submission = submitted_article_model.SubmittedArticle(\n user=user,\n description=info['description'],\n title=info['title'],\n url=info['url']\n )\n \n self.session.add(new_submission)\n self.session.commit()\n \n return new_submission.to_dict()\n \n def exists(self, url):\n \"\"\"\n Tests if a submission already exists. Throws and \"SubmissionExists\" if it does already.\n \"\"\"\n test = self.session.query(\n submitted_article_model.SubmittedArticle).filter_by(url=url).first()\n return test is not None\n\n \n def list(self):\n pass\n\nsubmitedarticle = SubmittedArticle()\nregister_method('submittedarticle.createArticle', submitedarticle.createArticle)\n","sub_path":"services/backend/rpc/submitted_article.py","file_name":"submitted_article.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"559951852","text":"# -*- coding: utf-8 -*-\n'''\n提交命令:\n/opt/spark-2.0.2/bin/spark-submit \\\n--master yarn \\\n--deploy-mode client \\\n--driver-memory 15g \\\n--driver-cores 15 \\\n--queue project.hongjing \\\nall_info_merge.py {version}\n'''\n\nimport sys\nimport os\n\nfrom pyspark.sql import functions as fun\nfrom pyspark.conf import SparkConf\nfrom pyspark.sql import SparkSession\n\n\ndef get_read_path(file_name):\n return \"{path}/{file_name}/{version}\".format(version=RELATION_VERSION,\n file_name=file_name,\n path=IN_PATH_ONE)\n\ndef get_spark_session(): \n conf = SparkConf()\n conf.setMaster('yarn-client')\n conf.set(\"spark.yarn.am.cores\", 7)\n conf.set(\"spark.executor.memory\", \"70g\")\n conf.set(\"spark.executor.instances\", 20)\n conf.set(\"spark.executor.cores\", 15)\n conf.set(\"spark.python.worker.memory\", \"3g\")\n conf.set(\"spark.default.parallelism\", 6000)\n conf.set(\"spark.sql.shuffle.partitions\", 6000)\n conf.set(\"spark.broadcast.blockSize\", 1024) \n conf.set(\"spark.shuffle.file.buffer\", '512k')\n conf.set(\"spark.speculation\", True)\n conf.set(\"spark.speculation.quantile\", 0.98)\n \n spark = SparkSession \\\n .builder \\\n .appName(\"hgongjing2_all_info_merge\") \\\n .config(conf=conf) \\\n .enableHiveSupport() \\\n .getOrCreate() \n \n return spark \n\n\ndef spark_data_flow(tidversion):\n #读取相关信息数据\n basic_df = spark.read.parquet(\n get_read_path('basic')\n )\n #失信被执行人\n dishonesty_df = spark.read.parquet(\n get_read_path('dishonesty')\n )\n #执行人数\n zhixing_df = spark.read.parquet(\n get_read_path('zhixing')\n )\n #行政处罚\n xzcf_df = spark.read.parquet(\n get_read_path('xzcf')\n )\n #经营异常\n jyyc_df = spark.read.parquet(\n get_read_path('jyyc')\n )\n #开庭公告\n ktgg_df = spark.read.parquet(\n get_read_path('ktgg')\n )\n #法院公告\n rmfygg_df = spark.read.parquet(\n get_read_path('rmfygg')\n )\n #裁判文书\n zgcpwsw_df = spark.read.parquet(\n get_read_path('zgcpwsw')\n )\n #变更信息\n bgxx_df = spark.read.parquet(\n get_read_path('bgxx')\n )\n #黑企业名单\n black_df = spark.read.parquet(\n get_read_path('black_company')\n ).withColumnRenamed(\n 'company_type', 'is_black_company'\n )\n #step three的输出结果\n all_company_info = spark.read.parquet( \n \"{path}/\"\n \"all_company_info/\"\n \"{version}\".format(path=IN_PATH_TWO, \n version=RELATION_VERSION)\n )\n tid_all_company_info = all_company_info.where(\n all_company_info.risk_rank == u'高危预警'\n ).select(\n 'bbd_qyxx_id',\n fun.when(\n all_company_info.risk_rank == u'高危预警', True\n ).alias('is_high_company')\n )\n #类金融名单\n ljr_sample_df = spark.read.parquet(\n get_read_path('ljr_sample')\n )\n tid_ljr_sample_df = ljr_sample_df.select(\n 'bbd_qyxx_id',\n fun.when(\n ljr_sample_df.company_type != u'', True\n ).alias('is_leijinrong')\n )\n #民间借贷\n lawsuit_df = spark.read.parquet(\n get_read_path('lawsuit') \n )\n #公司字号\n namefrag_df = spark.read.parquet(\n '{path}/all_namefrag/{version}'.format(path=IN_PATH_THREE,\n version=RELATION_VERSION)\n )\n #关联方\n relation_df = spark.read.parquet(\n '{path}/all_relation/{version}'.format(path=IN_PATH_THREE,\n version=RELATION_VERSION)\n )\n\n #合并中间数据结果\n tid_info_merge = basic_df.select(\n basic_df.bbd_qyxx_id,\n basic_df.company_name,\n basic_df.enterprise_status,\n basic_df.address,\n basic_df.company_province\n ).join(\n black_df,\n black_df.company_name == basic_df.company_name,\n 'left_outer'\n ).join(\n namefrag_df,\n namefrag_df.company_name == basic_df.company_name,\n 'left_outer'\n ).join(\n lawsuit_df,\n lawsuit_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n tid_ljr_sample_df,\n tid_ljr_sample_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n tid_all_company_info,\n tid_all_company_info.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n bgxx_df,\n bgxx_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n zgcpwsw_df,\n zgcpwsw_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n rmfygg_df,\n rmfygg_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n ktgg_df,\n ktgg_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n jyyc_df,\n jyyc_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n xzcf_df,\n xzcf_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n zhixing_df,\n zhixing_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).join(\n dishonesty_df,\n dishonesty_df.bbd_qyxx_id == basic_df.bbd_qyxx_id,\n 'left_outer'\n ).select(\n basic_df.bbd_qyxx_id,\n basic_df.company_name,\n basic_df.enterprise_status,\n basic_df.address,\n 'namefrag',\n 'dishonesty_num'\n ,'zhixing_num'\n ,'xzcf_num'\n ,'jyyc_num'\n ,'ktgg_num'\n ,'rmfygg_num'\n ,'zgcpwsw_num'\n ,'bgxx_dict'\n ,'is_black_company'\n ,'is_high_company'\n ,'is_leijinrong'\n ,'lawsuit_num'\n ,'company_province'\n ).dropDuplicates(\n ['bbd_qyxx_id']\n ).cache(\n )\n\n #中间数据落地\n os.system(\n (\"hadoop fs -rmr \" \n \"{path}/tid_info_merge/{version} \").format(path=TMP_PATH,\n version=RELATION_VERSION)\n )\n tid_info_merge.coalesce(\n 100\n ).write.parquet(\n \"{path}/\"\n \"tid_info_merge/{version}\".format(version=RELATION_VERSION,\n path=TMP_PATH))\n tid_info_merge = spark.read.parquet(\n \"{path}/\"\n \"tid_info_merge/{version}\".format(version=RELATION_VERSION,\n path=TMP_PATH))\n\n #构建属性图\n tid_relation_2_df = relation_df.join(\n tid_info_merge,\n tid_info_merge.bbd_qyxx_id == relation_df.a,\n 'right_outer'\n ).select(\n tid_info_merge.bbd_qyxx_id.alias('a'),\n 'b',\n 'c',\n 'b_degree',\n 'c_degree',\n 'bc_relation',\n 'b_isperson',\n 'c_isperson',\n tid_info_merge.company_name.alias('a_name'),\n 'b_name',\n 'c_name',\n tid_info_merge.namefrag.alias('a_namefrag'),\n tid_info_merge.dishonesty_num.alias('a_dishonesty'),\n tid_info_merge.zhixing_num.alias('a_zhixing'),\n tid_info_merge.xzcf_num.alias('a_xzcf'),\n tid_info_merge.jyyc_num.alias('a_jyyc'),\n tid_info_merge.ktgg_num.alias('a_ktgg'),\n tid_info_merge.rmfygg_num.alias('a_rmfygg'),\n tid_info_merge.zgcpwsw_num.alias('a_zgcpwsw'),\n tid_info_merge.bgxx_dict.alias('a_bgxx'),\n tid_info_merge.lawsuit_num.alias('a_lending')\n )\n \n tid_relation_3_df = tid_relation_2_df.join(\n tid_info_merge,\n tid_info_merge.bbd_qyxx_id == tid_relation_2_df.b,\n 'left_outer'\n ).select(\n 'a',\n 'b',\n 'c',\n 'b_degree',\n 'c_degree',\n 'bc_relation',\n 'b_isperson',\n 'c_isperson',\n 'a_name',\n 'b_name',\n 'c_name',\n 'a_namefrag',\n 'a_dishonesty', 'a_zhixing', 'a_xzcf',\n 'a_jyyc', 'a_ktgg', 'a_rmfygg',\n 'a_zgcpwsw', 'a_bgxx', 'a_lending',\n tid_info_merge.dishonesty_num.alias('b_dishonesty'),\n tid_info_merge.zhixing_num.alias('b_zhixing'),\n tid_info_merge.xzcf_num.alias('b_xzcf'),\n tid_info_merge.jyyc_num.alias('b_jyyc'),\n tid_info_merge.ktgg_num.alias('b_ktgg'),\n tid_info_merge.rmfygg_num.alias('b_rmfygg'),\n tid_info_merge.zgcpwsw_num.alias('b_zgcpwsw'),\n tid_info_merge.bgxx_dict.alias('b_bgxx'),\n tid_info_merge.address.alias('b_address'),\n tid_info_merge.is_black_company.alias('b_is_black_company'),\n tid_info_merge.is_high_company.alias('b_is_high_company'),\n tid_info_merge.is_leijinrong.alias('b_is_leijinrong'),\n tid_info_merge.enterprise_status.alias('b_estatus'),\n tid_info_merge.lawsuit_num.alias('b_lending'),\n tid_info_merge.company_province.alias('b_company_province')\n )\n \n tid_relation_4_df = tid_relation_3_df.join(\n tid_info_merge,\n tid_info_merge.bbd_qyxx_id == tid_relation_2_df.c,\n 'left_outer' \n ).select(\n 'a', 'b', 'c',\n 'b_degree', 'c_degree', 'bc_relation',\n 'b_isperson', 'c_isperson',\n 'a_name', 'b_name', 'c_name',\n 'a_namefrag', \n 'a_dishonesty', 'a_zhixing', 'a_xzcf',\n 'a_jyyc', 'a_ktgg', 'a_rmfygg',\n 'a_zgcpwsw', 'a_bgxx', 'a_lending', \n 'b_dishonesty', 'b_zhixing', 'b_xzcf',\n 'b_jyyc', 'b_ktgg', 'b_rmfygg',\n 'b_zgcpwsw', 'b_bgxx', 'b_address',\n 'b_is_black_company', 'b_is_high_company',\n 'b_is_leijinrong', 'b_estatus', 'b_lending',\n 'b_company_province',\n tid_info_merge.dishonesty_num.alias('c_dishonesty'),\n tid_info_merge.zhixing_num.alias('c_zhixing'),\n tid_info_merge.xzcf_num.alias('c_xzcf'),\n tid_info_merge.jyyc_num.alias('c_jyyc'),\n tid_info_merge.ktgg_num.alias('c_ktgg'),\n tid_info_merge.rmfygg_num.alias('c_rmfygg'),\n tid_info_merge.zgcpwsw_num.alias('c_zgcpwsw'),\n tid_info_merge.bgxx_dict.alias('c_bgxx'),\n tid_info_merge.address.alias('c_address'),\n tid_info_merge.is_black_company.alias('c_is_black_company'),\n tid_info_merge.is_high_company.alias('c_is_high_company'),\n tid_info_merge.is_leijinrong.alias('c_is_leijinrong'),\n tid_info_merge.enterprise_status.alias('c_estatus'),\n tid_info_merge.lawsuit_num.alias('c_lending'),\n tid_info_merge.company_province.alias('c_company_province')\n )\n\n return tid_relation_4_df\n\ndef run():\n tid_df = spark_data_flow(RELATION_VERSION)\n\n os.system(\n (\"hadoop fs -rmr \" \n \"{path}/\"\n \"all_info_merge\"\n \"/{version}\").format(path=OUT_PATH, \n version=RELATION_VERSION)) \n tid_df.coalesce(\n 300\n ).write.parquet(\n \"{path}/\"\n \"all_info_merge/\"\n \"{version}\".format(path=OUT_PATH, \n version=RELATION_VERSION)\n )\n\n\nif __name__ == '__main__': \n import configparser\n conf = configparser.ConfigParser() \n conf.read(\"/data5/antifraud/Hongjing2/conf/hongjing2.py\")\n \n #中间结果版本\n RELATION_VERSION = sys.argv[1]\n \n #输入参数\n IN_PATH_ONE = \"/user/antifraud/hongjing2/dataflow/step_one/raw/\"\n IN_PATH_TWO = \"/user/antifraud/hongjing2/dataflow/step_three/prd/\"\n IN_PATH_THREE = \"/user/antifraud/hongjing2/dataflow/step_four/tid/raw/\"\n OUT_PATH = \"/user/antifraud/hongjing2/dataflow/step_four/tid/tid/\"\n TMP_PATH = \"/user/antifraud/hongjing2/dataflow/step_four/tid/tmp/\"\n \n spark = get_spark_session()\n\n run()","sub_path":"src/step_four/tid/tid/all_info_merge.py","file_name":"all_info_merge.py","file_ext":"py","file_size_in_byte":11630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"58396409","text":"#coding:utf-8\r\n'''\r\nCreated on 2016-8-4\r\n\r\n@author: u410\r\n'''\r\nimport urllib2\r\n\r\nclass HtmlDownloader(object):\r\n \r\n \r\n def download(self,url):\r\n if url is None:\r\n return None\r\n headers = {'Accept':'text/html, application/xhtml+xml, */*',\r\n 'Accept-Language':'zh-CN',\r\n 'User-Agent':'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0) QQBrowser/8.2.4257.400'\r\n }\r\n requ = urllib2.Request(url,headers=headers)\r\n response = urllib2.urlopen(requ,timeout=10)\r\n \r\n if response.getcode()!= 200:\r\n return None\r\n \r\n return response.read()\r\n \r\n \r\n \r\n\r\n\r\n\r\n","sub_path":"html_downloader.py","file_name":"html_downloader.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"11174749","text":"#General Imports\nimport sys\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport copy\nimport random\nimport json\nimport pickle\n\n#Tensorflow\nfrom absl import flags\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\n#Sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nfrom mia.estimators import ShadowModelBundle, AttackModelBundle, prepare_attack_data\n\nfrom .bnlearn_data import train_input_fn, eval_input_fn \n\ndef to_onehot(inp):\n s = pd.Series(inp)\n out = pd.get_dummies(s)\n return out\n \n# Trains a simple DNN on the output probabilities of both the correlational and the causal model.\ndef my_attack_model(features, labels, mode, params):\n \"\"\"DNN with one hidden layers and learning_rate=0.1.\"\"\"\n # Create three fully connected layers.\n net = tf.feature_column.input_layer(features, params['feature_columns'])\n\n for units in params['hidden_units']:\n net = tf.layers.dense(net, units=units, activation=tf.nn.relu)\n net = tf.layers.batch_normalization(net, training=True)\n net = tf.nn.relu(net)\n \n # Compute logits (1 per class).\n logits = tf.layers.dense(net, params['n_classes'], activation=None)\n\n #logits = logits + 0.9\n\n # Compute predictions.\n predicted_classes = tf.argmax(logits, 1)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'class_ids': predicted_classes[:, tf.newaxis],\n 'probabilities': tf.nn.softmax(logits),\n 'logits': logits,\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Compute loss.\n loss = tf.losses.sparse_softmax_cross_entropy(labels=tf.argmax(labels,1), logits=logits)\n\n # Compute evaluation metrics.\n accuracy = tf.metrics.accuracy(labels=tf.argmax(labels,1),\n predictions=predicted_classes,\n name='acc_op')\n metrics = {'accuracy': accuracy}\n tf.summary.scalar('accuracy', accuracy[1])\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n\n # Create training op.\n assert mode == tf.estimator.ModeKeys.TRAIN\n # optimizer = tf.train.ProximalAdagradOptimizer(\n # learning_rate=params['learning_rate'],\n # l2_regularization_strength=0.001\n # )\n optimizer = tf.train.AdamOptimizer(\n learning_rate=tf.train.exponential_decay(\n learning_rate=params['learning_rate'],\n global_step=tf.train.get_global_step(),\n decay_steps=1000,\n decay_rate=0.96)) \n # optimizer = tf.train.RMSPropOptimizer(learning_rate=params['learning_rate'])\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n\ndef mia(X_att_train, y_att_train, X_att_test, y_att_test, my_feature_columns, batch_size, train_steps, mdir):\n\n attacker = tf.estimator.Estimator(\n model_fn = my_attack_model,\n params={\n 'feature_columns': my_feature_columns,\n 'hidden_units': [8, 4],\n 'n_classes': 2,\n 'n_train_examples': len(X_att_train),\n 'learning_rate': 0.001\n },\n )\n # Train the attacker classifier\n #print(X_att_train)\n #print(y_att_train)\n attacker.train(\n input_fn=lambda:train_input_fn(X_att_train, y_att_train, batch_size), \n steps=train_steps)\n\n # Evaluate the attacker model.\n eval_result_train = attacker.evaluate(input_fn=lambda:eval_input_fn(X_att_train, y_att_train, batch_size))\n\n # Evaluate the attacker model.\n eval_result_test = attacker.evaluate(input_fn=lambda:eval_input_fn(X_att_test, y_att_test, batch_size))\n\n # Get the prediction confidences from the attacker model\n predict_result = attacker.predict(input_fn=lambda:eval_input_fn(X_att_test, y_att_test, batch_size))\n\n attack_guess = []\n for i in predict_result:\n attack_guess.extend(i['class_ids'])\n\n return {'tr_attack': eval_result_train, 'te_attack': eval_result_test} \n\n","sub_path":"utils/privacy_attack.py","file_name":"privacy_attack.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"356815884","text":"import random\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\nimport math\nimport difflib\nimport chardet\n#计算过程中差值矩阵的变化\ndiffsequencechange=list()\n#记录嵌入水印的位置\nLocationx=[]\nLocationy=[]\n#被嵌入水印\nembed=''\n#被提取水印\nextracted=''\n#获取图像一维nampy数组\ndef img_arry_flatten(image_path):\n '''\n :param imagepath: 图像\n :return:图像像素一维数组\n '''\n # 处理异常\n if os.path.splitext(image_path)[1]=='.bmp':\n im=Image.open(image_path).save('temp.bmp')\n im = Image.open('temp.bmp').convert('L')\n\n #获得图像的高和宽\n img_heigth=im.size[0]\n img_width=im.size[1]\n\n\n # 调用pylab中的属于numpy的array方法将灰度图像转化为二维灰度值的矩阵\n # 矩阵中每一个值都是0~255之间的数\n im_arry = np.array(im)\n\n # 调用array对象的flatten()方法将二维灰度的矩阵转化为一维列表(类似数组)\n im_arry_flatten = im_arry.flatten()\n\n return im_arry_flatten,img_heigth,img_width\n#整数转化为特定长度的二进制字符串\ndef bin_value(a,length):\n '''\n 将整数转化为指定长度的二进制字符串\n :param a: 待转换的整数\n :param length: 指定转化的二进制的长度\n :return: 转换的指定长度的二进制字符串\n '''\n result='{0:b}'.format(a)\n stresult=str(result)\n if len(stresult)>length:\n print('The length of the watermark is larger than %d'%length)\n while len(stresult)tempmin and difsequence[i]tempmin:\n im_arry_flatten[i+1]=im_arry_flatten[i+1]+1\n\n\n return im_arry_flatten,(dmin,dmax),difsequence1,originrate,(img_heigth,img_width),maxembedrate\n#差值矩阵直方图\ndef histdiff():\n # 生成图像差值序列\n i=0\n for difsequence in diffsequencechange:\n plt.figure(\"hist of imagedifsequence\")\n temp=difsequence[:]\n temp=sorted(temp)\n temp=list(set(temp))\n n, bins, patches = plt.hist(difsequence, bins=len(temp), normed=1, facecolor='g', alpha=0.75)\n arrymean=np.mean(difsequence)\n arrystd=np.std(difsequence)\n y = mlab.normpdf(bins, arrymean, arrystd)\n plt.plot(bins, y, 'b--')\n plt.legend(loc=\"lower right\")\n plt.xlabel('Difference')\n plt.ylabel('Percentage')\n plt.title(r'Histogram of diffsequence: $\\mu='+str(arrymean)+r'$, $\\sigma='+str(arrystd)+r'$')\n # plt.show()\n savepath = '/Users/cclin/PycharmProjects/project/histdiff/' + 'histdif'+str(i)+'.jpeg'\n plt.savefig(savepath)\n i+=1\n# 图像特征提取,即获得图像最大水印嵌入容量\ndef extractmaxcap(imagepath):\n '''\n 图像特征提取,即获得图像最大水印嵌入容量\n :param imagepath: 图像路径\n :return:\n '''\n im_arry_flatten, (dmin, dmax), difsequence1, originrate, (img_heigth, img_width),maxembedrate=dealimage(imagepath)\n return float(maxembedrate/2)\n#嵌入水印\ndef embedwatermark(image_path,watermark,cap):\n embed=watermark\n print(\"The embed watermark is\",watermark)\n im_arry_flatten,(dmin,dmax),difsubsequence,rate,(img_heigth,img_width),maxembedrate=dealimage(image_path)\n im_arry_flatten1,(dmin,dmax),difsubsequence,rate,(img_heigth,img_width),maxembedrate=dealimage(image_path)\n for i in range(0,len(im_arry_flatten1)):\n im_arry_flatten1[i]=255\n count=0\n for c in difsubsequence:\n if c==dmax:\n count+=1\n ratedmax=count/len(difsubsequence)\n # 把水印的每个字符转化为8位二进制字符串,防止溢出\n wmbinstr = \"\"\n i = 0\n for c in watermark:\n wmbinstr = wmbinstr + bin_value(ord(c), 8)\n i += 1\n index=0\n difsubsequence2=[0]*int(len(im_arry_flatten)/2)\n i=0\n j=0\n #记录需要嵌入水印但是像素值为255的像素值的位置\n Locationaviod = []\n #处理差值序列,嵌入水印\n if len(watermark)*8 > len(difsubsequence)*ratedmax:\n print(\"The length of watrermark is larger than the capacity\")\n\n else:\n # 处理差值序列,嵌入水印\n for c in difsubsequence:\n difsubsequence2[i]=c\n if c==dmax and indexlen(wmbinstr)-1:\n break\n dif = int(im_arry_flatten[i + 1]) - int(im_arry_flatten[i])\n if dif == dmax and wmbinstr[j]=='1' and im_arry_flatten[i + 1]!=255:\n im_arry_flatten[i + 1] = im_arry_flatten[i + 1] + 1\n Locationaviod.append(i+1)\n #记录嵌入水印的位置\n Locationy.append((i+1)/512)\n Locationx.append((i+1)%512)\n im_arry_flatten1[i+1]=0\n j+=1\n elif dif==dmax and wmbinstr[j]=='0':\n im_arry_flatten[i + 1] = im_arry_flatten[i + 1]\n j += 1\n diffsequencechange.append(difsubsequence2)\n # 嵌入完所有信息,使用reshpe函数将包含水印的一维列表转化为numpy的二维数组\n im_array_embed = np.reshape(im_arry_flatten, (img_heigth, img_width))\n im_array_locate = np.reshape(im_arry_flatten1, (img_heigth, img_width))\n # 调用Image模块的fromarry方法将二维数组转化为图像保存\n im_embed = Image.fromarray(im_array_embed)\n im_locate=Image.fromarray(im_array_locate)\n path2 = image_path.split('/')[-1]\n savepath = '/Users/cclin/PycharmProjects/project/embed/' +str(cap)+ 'embed_' + path2\n savepath1='/Users/cclin/PycharmProjects/project/locate/' +str(cap)+ 'locate_' + path2\n im_embed.save(savepath)\n im_locate.save(savepath1)\n #嵌入位置\n Location()\n return (dmin,dmax),len(wmbinstr),savepath\n#提取水印,恢复图像\ndef extractwatermark(image_path,dmax,dmin,wmlen):\n # 生成嵌入水印后图像的差值序列\n im_arry_flatten, img_heigth, img_width = img_arry_flatten(image_path)\n difsequence = [0] * img_heigth * int(img_width / 2)\n j = 0\n for i in range(0, img_width * img_heigth - 1, 2):\n difsequence[j] = int(im_arry_flatten[i + 1]) - int(im_arry_flatten[i])\n j+=1\n watermarkbin=\"\"\n watermark=\"\"\n totalwatermark=\"\"\n #提取水印\n index=0\n for d in difsequence:\n if index>wmlen-1:\n break\n if index>0 and (index%8==0) and watermarkbin!=\"\":\n watermark = watermark + chr(int(watermarkbin, 2))\n watermarkbin = \"\"\n if d==dmax+1:\n watermarkbin=watermarkbin+'1'\n totalwatermark = totalwatermark+'1'\n index+=1\n elif d==dmax:\n watermarkbin = watermarkbin + '0'\n totalwatermark = totalwatermark + '0'\n index+=1\n watermark = watermark + chr(int(watermarkbin, 2))\n print(\"The extracted watermark is:\", watermark)\n extracted=watermark\n subsequence=[0]*int(len(im_arry_flatten)/2)\n #恢复图像\n #对嵌入过程进行恢复\n j=0\n for i in range(0, len(im_arry_flatten) - 1, 2):\n # print(\"wmbinstr[j]\",wmbinstr[j])\n if j > len(totalwatermark)-1 :\n break\n dif = int(im_arry_flatten[i + 1]) - int(im_arry_flatten[i])\n\n if dif == dmax+1 :\n im_arry_flatten[i + 1] = im_arry_flatten[i + 1] - 1\n\n j += 1\n elif dif == dmax :\n im_arry_flatten[i + 1] = im_arry_flatten[i + 1]\n j += 1\n j = 0\n for i in range(0, img_width * img_heigth - 1, 2):\n subsequence[j] = int(im_arry_flatten[i + 1]) - int(im_arry_flatten[i])\n j += 1\n diffsequencechange.append(subsequence)\n #对预处理进行恢复\n subsequence1 = [0] * int(len(im_arry_flatten) / 2)\n tempmax = dmax\n tempmin = dmin\n if dmax < dmin:\n tempmax = dmin\n tempmin = dmax\n # I'(i,2j)=I(i,2j)-1(差值位于dmax,dmin),\n for i in range(0, img_width * img_heigth - 1, 2):\n dif = int(im_arry_flatten[i + 1]) - int(im_arry_flatten[i])\n if dif <= tempmax and dif > tempmin:\n im_arry_flatten[i + 1] = im_arry_flatten[i + 1] - 1\n j = 0\n for i in range(0, img_width * img_heigth - 1, 2):\n subsequence1[j] = int(im_arry_flatten[i + 1]) - int(im_arry_flatten[i])\n j += 1\n diffsequencechange.append(subsequence1)\n # 提取完所有信息,使用reshpe函数将包含水印的一维列表转化为numpy的二维数组\n im_array_extracted = np.reshape(im_arry_flatten, (img_heigth, img_width))\n # 调用Image模块的fromarry方法将二维数组转化为图像保存\n im_extracted = Image.fromarray(im_array_extracted)\n path2 = image_path.split('/')[-1]\n savepath = '/Users/cclin/PycharmProjects/project/image/' + 'recover_' + path2\n #恢复图像的保存\n im_extracted.save(savepath)\n return savepath,watermark\n#对比寻找原图像与恢复图像的差异\ndef comparison(img_path,recoverimg_path):\n orim_arry_flatten, img_heigth, img_width = img_arry_flatten(img_path)\n reim_arry_flatten, img_heigth, img_width = img_arry_flatten(recoverimg_path)\n\n pixeldifdict={}\n difflag=0\n for i in range(0,img_width*img_heigth):\n if orim_arry_flatten[i]==reim_arry_flatten[i]:\n continue\n else:\n pixeldifdict['index:'+str(i)]=str(int(orim_arry_flatten[i]))+' '+str(int(reim_arry_flatten[i]))\n difflag=1\n continue\n\n if difflag==0:\n print(\"There is no difference between original image and recovered image!\")\n else:\n print(\"The difference is below:\")\n for key,item in pixeldifdict.items():\n print(key,\":\",item,end=\",\")\n spot={}\n difflag1 = 0\n for i in range(0,len(embed)):\n if i>len(extracted):\n print(\"The embed-water is different with the extracted-watermark\")\n if embed[i]==extracted[i]:\n continue\n else:\n difflag1=1\n spot['index:'+str(i)]=str(embed[i])+':'+str(extracted[i])\n continue\n if difflag1==0:\n print(\"There is no difference between embed watermark and extracted watermark!\")\n\n else:\n print(\"Difference\")\n for key, item in spot.items():\n print(key, \":\", item, end=\",\")\n#计算容量\ndef calculateER(pixelcount,embedcount):\n '''\n 计算容量 bit/pixel\n :param pixelcount:\n :param embedcount:\n :return: 容量 单位 bit/pixel\n '''\n return embedcount*1/pixelcount\n#计算PSNR\ndef calculatePSNR(originimg_path,dealimg_path):\n '''\n :param originimg_path: 算法处理前图片\n :param dealimg_path: 算法处理后图片\n :return:\n '''\n ori_image_arry_flatten,imgheigth,imawidth=img_arry_flatten(originimg_path)\n deal_image_arry_flatten,imgheigth1,imawidth1=img_arry_flatten(dealimg_path)\n\n temp1=0.0\n for index in range(0,len(ori_image_arry_flatten)):\n temp1 = temp1+ (deal_image_arry_flatten[index]-ori_image_arry_flatten[index])**2\n #MSE是原图像与处理图像间的均方误差\n #temp1=len(ori_image_arry_flatten)/2\n MSE=float(temp1/len(ori_image_arry_flatten))\n\n MAX=255\n if(MSE==0):\n PSNR=10*math.log(MAX*MAX,10)\n else:\n temp=(MAX*MAX)/MSE\n PSNR=10*math.log(temp,10)\n\n return PSNR\n#计算SSIM相关性\ndef calculateSSIM(image_path1,image_path2):\n im1=cv2.imread(image_path1,0)\n im2=cv2.imread(image_path2,0)\n assert len(im1.shape) == 2 and len(im2.shape) == 2\n assert im1.shape == im2.shape\n mu1 = im1.mean()\n mu2 = im2.mean()\n sigma1 = np.sqrt(((im1 - mu1) ** 2).mean())\n sigma2 = np.sqrt(((im2 - mu2) ** 2).mean())\n sigma12 = ((im1 - mu1) * (im2 - mu2)).mean()\n k1, k2, L = 0.01, 0.03, 255\n C1 = (k1*L) ** 2\n C2 = (k2*L) ** 2\n C3 = C2/2\n l12 = (2*mu1*mu2 + C1)/(mu1 ** 2 + mu2 ** 2 + C1)\n c12 = (2*sigma1*sigma2 + C2)/(sigma1 ** 2 + sigma2 ** 2 + C2)\n s12 = (sigma12 + C3)/(sigma1*sigma2 + C3)\n ssim = l12 * c12 * s12\n return ssim\n#绘制PSNR_ER_SSIM\ndef drawPSNR_ER_SSIM(originimg_path,maxcapacity):\n '''\n 绘制PSNR~容量曲线\n :param originimg_path:\n :param dealimg_path:\n :return:\n '''\n\n im_arry_flatten, im_heigth, im_width = img_arry_flatten(originimg_path)\n #嵌入不同长度的水印,使图像具有不同水印容量。\n CAPACITY=[0.0]*19\n for i in range(0,19):\n CAPACITY[i]=float(maxcapacity*i/18)\n dealimg_path=[0]*19\n j=0\n for c in CAPACITY:\n watermark=producewatermaerk(im_heigth*im_width*c)\n (dmin,dmax),wmlen,dealimg_path[j]=embedwatermark(originimg_path,watermark,c)\n j+=1\n #绘制具有不同嵌入水印容量的直方图\n i=0\n for path in dealimg_path:\n drawhist(path,CAPACITY[i])\n i+=1\n #绘制PSNR_ER曲线\n PSNR=[0]*19\n SSIM=[0]*19\n for i in range(0,j):\n PSNR[i]=calculatePSNR(originimg_path,dealimg_path[i])\n SSIM[i]=calculateSSIM(originimg_path,dealimg_path[i])\n fig = plt.figure(\"PSNR~容量曲线\")\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(CAPACITY, PSNR)\n plt.xlabel('CAPACITY')\n plt.ylabel('PSNR')\n\n fig2 = plt.figure(\"SSIM~容量曲线\")\n ax = fig2.add_subplot(1, 2, 2)\n ax.plot(CAPACITY, SSIM)\n plt.xlabel('CAPACITY')\n plt.ylabel('SSIM')\n\n savepath='/Users/cclin/PycharmProjects/project/PSNR_ER/psnr_er.jpeg'\n plt.savefig(savepath)\n\n fig3 = plt.figure()\n ax = fig3.add_subplot(111, projection='3d')\n for c, m in [('r', 'o'), ('b', '^')]:\n ax.scatter(CAPACITY, SSIM,PSNR, c=c, marker=m)\n ax.set_xlabel('CAPACITY')\n ax.set_ylabel('SSIM')\n ax.set_zlabel('PSNR')\n savepath = '/Users/cclin/PycharmProjects/project/PSNR_ER/psnr_er_ssim.jpeg'\n plt.savefig(savepath)\n\n mpl.rcParams['legend.fontsize'] = 10\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)\n ax.plot(CAPACITY, SSIM, PSNR, label='parametric curve')\n ax.legend()\n savepath = '/Users/cclin/PycharmProjects/project/PSNR_ER/psnr_er_ssim1.jpeg'\n plt.savefig(savepath)\n#对比本算法与传统的基于直方图修改的水印嵌入算法\ndef campareER(image_path):\n im_arry_flatten,image_height,image_width,=img_arry_flatten(image_path)\n #计算获得图像中频率最大的像素值\n pixel=[0]*256\n rate=[0]*256\n # 获得零值点的个数\n countzero = 0\n for i in range(0,len(im_arry_flatten)):\n pixel[im_arry_flatten[i]]+=1\n if im_arry_flatten[i]==0:\n countzero+=1\n for i in range(0,256):\n rate[i]=pixel[i]/len(im_arry_flatten)\n maxpixel=rate.index(max(rate))\n maxpixelcount=pixel[maxpixel]\n #获得本实验算法Dmax(具有最大概率的差值的个数)/dmaxI(差值的值)\n #dmin\n im_arry_flatten, (dmin, dmax), difsequence1, originrate, (img_heigth, img_width), maxembedrate = dealimage(image_path)\n name=image_path.split('/')[-1].split('.')[0]\n print(\"测试图像:\",name,\"原始图像直方图的最大值个数:\",maxpixelcount,\"原始图像直方图零值点个数:\",countzero,\n \"差值矩阵的Dmax:\",len(difsequence1)*maxembedrate,\"dmax:\",dmax,\"dmin:\",dmin)\n#嵌入水印位置\ndef Location():\n fig3 = plt.figure()\n ax = fig3.add_subplot(111)\n #for c, m in [('r', 'o'), ('b', '^')]:\n ax.set_title('Embedded position distribution')\n ax.scatter(Locationx, Locationy,marker='x',c='b',cmap='b' )\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n #ax.set_zlabel('bit plane')\n savepath = '/Users/cclin/PycharmProjects/project/PSNR_ER/scatter.jpeg'\n plt.savefig(savepath)\n#椒盐噪声\n\ndef PepperandSalt(img_path,percetage):\n '''\n 椒盐噪声\n :param img_path: 图像路径\n :param percetage: 百分比\n :return: 椒盐噪声处理后的图片的存储路径\n '''\n noise_img=cv2.imread(img_path,0)\n img_height=noise_img.shape[0]\n img_width=noise_img.shape[1]\n NoiseNum=int(percetage*img_width*img_height)\n for i in range(NoiseNum):\n randX=random.randint(0,img_height-1)\n randY=random.randint(0,img_width-1)\n if random.randint(0,1)<=0.5:\n noise_img[randX,randY]=0\n else:\n noise_img[randX,randY]=255\n path2=img_path.split('/')[-1]\n pepperandsaltpath='/Users/cclin/PycharmProjects/project/pnoise/p_'+path2\n cv2.imwrite(pepperandsaltpath,noise_img)\n # cv2.imshow('pepperandsalt',noise_img)\n cv2.waitKey(0)\n return pepperandsaltpath\n#高斯噪声\ndef GaussianNoise(img_path,means,sigma,percetage):\n '''\n 高斯噪声\n :param img_path: 原图像路径\n :param means: 均值\n :param sigma: 标准差\n :param percetage: 百分比\n :return:gaussiansavepath 高斯噪声处理图像的存储路径\n '''\n noise_img=cv2.imread(img_path,0)\n img_height=noise_img.shape[0]\n img_width=noise_img.shape[1]\n NoiseNum = int(percetage * img_height * img_width)\n for i in range(NoiseNum):\n randX = random.randint(0, img_height - 1)\n randY = random.randint(0, img_width - 1)\n noise_img[randX, randY] = noise_img[randX, randY] + random.gauss(means, sigma)\n if noise_img[randX, randY] < 0:\n noise_img[randX, randY] = 0\n elif noise_img[randX, randY] > 255:\n noise_img[randX, randY] = 255\n path2 = img_path.split('/')[-1]\n gaussiansavepath='/Users/cclin/PycharmProjects/project/gnosie/g_'+path2\n cv2.imwrite( gaussiansavepath,noise_img)\n # cv2.imshow('gaussiannoise',noise_img)\n cv2.waitKey(0)\n return gaussiansavepath\n\ndef meanfilter(imagepath):\n img=cv2.imread(imagepath,0)\n #中值滤波\n result = cv2.blur(img, (7, 7))\n # result=cv2.GaussianBlur(img, (3, 3), 0)\n path2 = imagepath.split('/')[-1]\n savepath = '/Users/cclin/PycharmProjects/project/meanfilte/m_' + path2\n cv2.imwrite(savepath, result)\n return savepath\n#计算误码率\ndef calculemistackerate(fi1,fi2):\n f1=open(fi1,'r')\n f2=open(fi2,'r')\n w1=f1.read()\n w2=f2.read()\n count=0\n for i in range(0,len(w1)):\n if i>=len(w2):\n break\n if w1[i]!=w2[i]:\n count+=1\n return count/len(w1)\n\n\ndef batchdeal():\n pathlist=[\n #'airplane512.bmp',\n # 'baboon512.bmp',\n # 'beach512.bmp',\n # 'boat1_512.bmp',\n # 'Boats512.bmp',\n # 'colar512.bmp',\n # 'desk512.bmp',\n # 'flowers512.bmp',\n # 'goldhill512.bmp',\n 'jg512.bmp',\n 'lena512.bmp',\n 'peppers2_512.bmp',\n 'peppers512.bmp',\n 'Sailboat512.bmp',\n 'scence512.bmp',\n 'timg1_512.bmp',\n 'timg2_512.bmp',\n 'wheats512.bmp',\n 'Zelda512.bmp']\n\n for path in pathlist:\n originpath='/Users/cclin/PycharmProjects/project/imagelib/'+path\n maxcaprate = extractmaxcap(originpath)\n watermark = producewatermaerk(512 * 512 * maxcaprate)\n (dmin, dmax), wmlen, savepath = embedwatermark(originpath,watermark,maxcaprate)\n # 图像差值矩阵直方图\n histdiff()\n # 提取水印,恢复图像\n resavepath,watermark1 = extractwatermark(savepath, dmax, dmin, wmlen)\n # 对比原图像与恢复后的图像,被嵌入水印与提取水印的差异\n comparison(originpath, resavepath)\n\n\nif __name__=='__main__':\n originpath=\"/Users/cclin/PycharmProjects/project/imagelib/baboon512.bmp\"\n maxcaprate=extractmaxcap(originpath)\n # #获得图像水印最大嵌入量比率,并根据最大嵌入量生成水印\n watermark=producewatermaerk(512*512*maxcaprate)\n # emwpath='/Users/cclin/PycharmProjects/project/ew/ew'+originpath.split('/')[-1].split('.')[0]+'.txt'\n # dmaxpath='/Users/cclin/PycharmProjects/project/dmax/'+originpath.split('/')[-1].split('.')[0]+'.txt'\n # dminpath='/Users/cclin/PycharmProjects/project/dmin/'+originpath.split('/')[-1].split('.')[0]+'.txt'\n # wmpath='/Users/cclin/PycharmProjects/project/wmlen/'+originpath.split('/')[-1].split('.')[0]+'.txt'\n # f = open(emwpath,'w')\n # f.write(watermark)\n # f.close()\n #预处理图像,嵌入水印\n (dmin, dmax), wmlen, savepath= embedwatermark(originpath,watermark,maxcaprate)\n # f2=open(dmaxpath,'w')\n # f2.write(str(dmax))\n # f2.close()\n # f3=open(dminpath,'w')\n # f3.write(str(dmin))\n # f3.close()\n # f5=open(wmpath,'w')\n # f5.write(str(wmlen))\n # f5.close()\n #图像差值矩阵直方图\n histdiff()\n # # 高斯噪声\n # # savepath = GaussianNoise(savepath, 2, 4, 0.2)\n # #savepath=PepperandSalt(savepath,0.2)\n # # savepath=meanfilter(savepath)\n # emwpath='/Users/cclin/PycharmProjects/project/ew/ew'+originpath.split('/')[-1].split('.')[0]+'.txt'\n # # savepath=\n # # 提取水印,恢复图像\n # f2 = open(dmaxpath, 'r')\n # dmax=int(f2.read())\n # f2.close()\n # f3 = open(dminpath, 'r')\n # dmin=int(f3.read())\n # f3.close()\n # f5 = open(wmpath,'r')\n # wmlen=int(f5.read())\n # f5.close()\n # #savepath='/Users/cclin/PycharmProjects/project/copy/boat1_512.bmp'\n resavepath,watermark1=extractwatermark(savepath,dmax,dmin,wmlen)\n # etwpath = '/Users/cclin/PycharmProjects/project/et/et' + originpath.split('/')[-1].split('.')[0] + '.txt'\n # f = open(etwpath, 'w')\n # f.write(watermark1)\n # f.close()\n #对比原图像与恢复后的图像,被嵌入水印与提取水印的差异\n comparison(originpath,resavepath)\n # f=open('/Users/cclin/PycharmProjects/project/subsequece.txt','w')\n # f.write(diffsequencechange)\n # for dif in diffsequencechange:\n # print(diffsequencechange,end=\"\\n\\n\")\n histdiff()\n PSNR=calculatePSNR(originpath,savepath)\n #计算嵌入量\n cap=extractmaxcap(originpath)\n print(\"嵌入量\",cap)\n print(\"PSNR\",PSNR)\n campareER(originpath)\n # drawPSNR_ER_SSIM(originpath,cap)\n ssim=calculateSSIM(originpath,savepath)\n print(\"SSIM\",ssim)\n # drawhist('/Users/cclin/PycharmProjects/project/image/lena512.bmp',1)\n # drawhist('/Users/cclin/PycharmProjects/project/embed/embed_lena512.bmp',2)\n # drawhist('/Users/cclin/PycharmProjects/project/image/recover_embed_lena512.bmp',3)\n # batchdeal()\n # path=\"/Users/cclin/PycharmProjects/project/embed/0.0embed_peppers512.bmp\"\n # path2=GaussianNoise(path,2,4,0.2)\n # comparison(path2)\n","sub_path":"Reversible Information Hiding Algorithm Based on Host Image Difference Matrix/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":25641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"485904238","text":"# uniform content loss + adaptive threshold + per_class_input + recursive G\n# improvement upon cqf37\nfrom __future__ import division\nimport os, time, scipy.io, imageio\nimport rawpy\n# from rawkit.raw import Raw\nimport PIL\nimport cv2\nimport tensorflow as tf\nfrom skimage.measure import compare_psnr, compare_ssim\n\ntf.set_random_seed(819)\nimport tensorflow.contrib.slim as slim\nimport numpy as np\n\nnp.random.seed(819)\n# import rawpy\nimport glob\n\nfrom loss import *\nfrom octconv_unet import oct_unet\n\ninput_dir = '../../datasets/raw/eosr/train/'\ngt_dir = '../../datasets/raw/eosr/train/'\ncheckpoint_dir = './checkpoint/eosr_oct_fuse/'\nresult_dir = './result_eosr_oct_fuse/'\n\nif not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\nif not os.path.exists(result_dir):\n os.mkdir(result_dir)\n\n# get train IDs\ntrain_fns = glob.glob(gt_dir + '*.JPG')\ntrain_ids = [os.path.basename(train_fn)[0:-4] for train_fn in train_fns]\nnp.random.shuffle(train_ids)\ntrain_ids = train_ids[:int(len(train_ids) / 2)]\n\nalpha = 0.25 # octave conv 'alpha' param\nps = 512 # patch size for training\nlmd = 0.5 # l1 and perceptual loss weight\nsave_freq = 500\n\nDEBUG = 0\nif DEBUG == 1:\n save_freq = 2\n train_ids = train_ids[0:5]\n\n\ndef pack_raw(raw):\n # pack Bayer image to 4 channels\n im = raw.raw_image.astype(np.float32)\n\n ### Crop the border\n # Sensor Width : 6888\n # Sensor Height : 4546\n # Sensor Left Border : 156\n # Sensor Top Border : 58\n # Sensor Right Border : 6875\n # Sensor Bottom Border : 4537\n im = im[57:4537, 155:6875]\n # im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level\n black_level = raw.black_level_per_channel[0]\n im = np.maximum(im - black_level, 0) / (np.max(raw.raw_image) - black_level)\n\n im = np.expand_dims(im, axis=2)\n img_shape = im.shape\n H = img_shape[0]\n W = img_shape[1]\n\n cfa = raw.raw_pattern\n cfa_dict = {'RGGB': [[0, 1], [3, 2]], 'BGGR': [[2, 3], [1, 0]], 'GBRG': [[3, 2], [0, 1]]}\n if (cfa == cfa_dict['RGGB']).all():\n out = np.concatenate((im[0:H:2, 0:W:2, :],\n im[0:H:2, 1:W:2, :],\n im[1:H:2, 0:W:2, :],\n im[1:H:2, 1:W:2, :]), axis=2)\n elif (cfa == cfa_dict['BGGR']).all():\n out = np.concatenate((im[1:H:2, 1:W:2, :],\n im[0:H:2, 1:W:2, :],\n im[1:H:2, 0:W:2, :],\n im[0:H:2, 0:W:2, :]), axis=2)\n elif (cfa == cfa_dict['GBRG']).all():\n out = np.concatenate((im[1:H:2, 0:W:2, :],\n im[0:H:2, 0:W:2, :],\n im[1:H:2, 1:W:2, :],\n im[0:H:2, 1:W:2, :]), axis=2)\n else:\n raise ValueError('Unsupported CFA configuration: {}'.format(cfa))\n return out\n\n\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)\nconfig = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\nin_image = tf.placeholder(tf.float32, [None, None, None, 4])\ngt_image = tf.placeholder(tf.float32, [None, None, None, 3])\nout_image = oct_unet(in_image, alpha)\n\nG_l1loss = tf.reduce_mean(tf.abs(out_image - gt_image))\n#G_msssimloss = tf.reduce_mean(1 - tf.image.ssim_multiscale(out_image, gt_image, 1.0))\n# G_l1loss = tf.reduce_mean(compute_l1_loss(out_image, gt_image))\n# features = [\"conv1_2\", \"conv2_2\", \"conv3_2\"]\n# G_perceploss = tf.reduce_mean(compute_percep_loss(gt_image, out_image, features, withl1=False))\n#G_loss = lmd * G_l1loss + (1 - lmd) * G_msssimloss\nG_loss = G_l1loss\ntf.summary.scalar('l1loss', G_loss)\n#tf.summary.scalar('msssimloss', G_msssimloss)\n#tf.summary.scalar('sum_loss', G_loss)\n\nt_vars = tf.trainable_variables()\nlr = tf.placeholder(tf.float32)\ntf.summary.scalar('lr', lr)\n\nwith tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n G_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)\n\nmerged = tf.summary.merge_all()\nlog_dir = result_dir + 'logs'\nif not os.path.exists(log_dir):\n os.mkdir(log_dir)\ntrain_writer = tf.summary.FileWriter(log_dir, sess.graph)\n\nsaver = tf.train.Saver()\nsess.run(tf.global_variables_initializer())\nckpt = tf.train.get_checkpoint_state(checkpoint_dir)\nif ckpt:\n print('loaded ' + ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n# Raw data takes long time to load. Keep them in memory after loaded.\ngt_images = [None] * len(train_ids)\ninput_images = [None] * len(train_ids)\n# input_images = {}\n# input_images['300'] = [None] * len(train_ids)\n# input_images['250'] = [None] * len(train_ids)\n# input_images['100'] = [None] * len(train_ids)\n# input_images['300'] = None\n# input_images['250'] = None\n# input_images['100'] = None\n\ng_loss = np.zeros((5000, 1))\n\nallfolders = glob.glob(result_dir + '*0')\nlastepoch = 0\nfor folder in allfolders:\n lastepoch = np.maximum(lastepoch, int(folder[-4:]))\n\nlearning_rate = 1e-4\ncnt = 0\nfor epoch in range(lastepoch, 4001):\n if os.path.isdir(result_dir + '%04d' % epoch):\n continue\n # cnt = 0\n #if epoch > 1500:\n # learning_rate = 1e-4\n if epoch > 2000:\n learning_rate = 1e-5\n\n for ind in np.random.permutation(len(train_ids)):\n # get the path from image id\n train_id = train_ids[ind]\n print(train_id)\n in_files = glob.glob(input_dir + '%s.dng' % train_id)\n # in_path = in_files[np.random.random_integers(0, len(in_files) - 1)]\n in_path = in_files[0]\n # in_fn = os.path.basename(in_path)\n # TODO: batch批量读取raw图\n gt_files = glob.glob(gt_dir + '%s.JPG' % train_id)\n gt_path = gt_files[0]\n # gt_fn = os.path.basename(gt_path)\n # in_exposure = float(in_fn[9:-5])\n # gt_exposure = float(gt_fn[9:-5])\n # ratio = min(gt_exposure / in_exposure, 300)\n\n st = time.time()\n cnt += 1\n\n if input_images[ind] is None:\n raw = rawpy.imread(in_path)\n input_images[ind] = np.expand_dims(pack_raw(raw), axis=0)\n print(\"time raw: \", time.time() - st)\n # if input_images[str(ratio)[0:3]][ind] is None:\n # raw = rawpy.imread(in_path)\n # input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw), axis=0) * ratio\n\n # gt_raw = rawpy.imread(gt_path)\n # im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)\n # gt_images[ind] = np.expand_dims(np.float32(im / 65535.0), axis=0)\n # raw = rawpy.imread(in_path)\n # input_images[str(ratio)[0:3]] = np.expand_dims(pack_raw(raw), axis=0) * ratio\n #\n # gt_image_rgb = np.expand_dims(\n # np.float32(np.array(PIL.Image.open(gt_path)) / 65535.), axis=0)\n H = input_images[ind].shape[1]\n W = input_images[ind].shape[2]\n\n if gt_images[ind] is None:\n # im = raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)\n # gt_images[ind] = np.expand_dims(np.float32(im / 65535.0), axis=0)\n gt_images[ind] = np.expand_dims(\n np.float32(cv2.resize(\n cv2.imread(gt_path, cv2.IMREAD_UNCHANGED)[..., ::-1], (W * 2, H * 2)) / 255.), axis=0)\n print(\"time gt: \", time.time() - st)\n # crop\n # print('**input,gt image shape: ', (H, W), gt_images[ind].shape)\n xx = np.random.randint(0, W - ps)\n yy = np.random.randint(0, H - ps)\n input_patch = input_images[ind][:, yy:yy + ps, xx:xx + ps, :]\n gt_patch = gt_images[ind][:, yy * 2:yy * 2 + ps * 2, xx * 2:xx * 2 + ps * 2, :]\n\n # print('**in,gt shape: ',\n # sess.run(tf.shape(input_patch)),\n # sess.run(tf.shape(gt_patch)))\n if np.random.randint(2, size=1)[0] == 1: # random flip\n input_patch = np.flip(input_patch, axis=1)\n gt_patch = np.flip(gt_patch, axis=1)\n if np.random.randint(2, size=1)[0] == 1:\n input_patch = np.flip(input_patch, axis=2)\n gt_patch = np.flip(gt_patch, axis=2)\n if np.random.randint(2, size=1)[0] == 1: # random transpose\n input_patch = np.transpose(input_patch, (0, 2, 1, 3))\n gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))\n # gt_patch = np.transpose(gt_patch, (0, 2, 1))\n\n input_patch = np.minimum(input_patch, 1.0)\n\n summary, _, G_current, output = sess.run([merged, G_opt, G_loss, out_image],\n feed_dict={in_image: input_patch, gt_image: gt_patch,\n lr: learning_rate})\n output = np.minimum(np.maximum(output, 0), 1)\n\n # print('**output shape', sess.run(tf.shape(output)))\n g_loss[ind] = G_current\n # if cnt % 20 == 0:\n train_writer.add_summary(summary, cnt)\n print(\"%d %d Loss=%.3f Time=%.3f\" % (epoch, cnt, np.mean(g_loss[np.where(g_loss)]), time.time() - st))\n\n if epoch % save_freq == 0:\n if not os.path.isdir(result_dir + '%04d' % epoch):\n os.makedirs(result_dir + '%04d' % epoch)\n\n psnr = compare_psnr(output[0, :, :, :], gt_patch[0, :, :, :], data_range=1.0)\n ssim = compare_ssim(output[0, :, :, :], gt_patch[0, :, :, :], multichannel=True)\n\n with open(os.path.join(log_dir, 'val.txt'), 'a+') as f:\n f.write('epoch: ' + str(epoch) + ' id: ' + str(train_id)\n + ' psnr: ' + str(psnr) + 'ssim: ' + str(ssim) + '\\n')\n temp = np.concatenate((gt_patch[0, :, :, :], output[0, :, :, :]), axis=1)\n # PIL.Image.fromarray((temp * 255).astype('uint8')).convert('RGB').save(\n # result_dir + '%04d/%05d_00_train_%d_pil.jpg' % (epoch, train_id, ratio))\n scipy.misc.toimage(temp * 255, high=255, low=0, cmin=0, cmax=255).save(\n result_dir + '%04d/%s_00_train.jpg' % (epoch, train_id))\n\n saver.save(sess, checkpoint_dir + 'model.ckpt')\n","sub_path":"train_octunet_eosr.py","file_name":"train_octunet_eosr.py","file_ext":"py","file_size_in_byte":10180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"167458280","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Date: 2018/6/29\n\nimport time\nimport requests\n\n\nwith open(\"articles.json\", \"r\") as f:\n content = f.read()\n\n\nDUMP_DATA = [\n {\n \"catid\": 18,\n \"title\": \"与名师交流\",\n \"description\": \"与智者同行,与名师交流。\",\n \"ext[content]\": content,\n \"_ajax\": 1\n },\n]\n\n# 登录api\nLOGIN_URL = \"https://www.pxto.com.cn/user/login\"\n\n# 文章发布api\nUPLOAD_ARTICLE_URL = \"https://www.pxto.com.cn/user/jigou/sysnewsadd\"\n\nLOGIN_DATA = {\n \"account\": \"13512137664\",\n \"password\": \"apple406\"\n}\n\nhttp = requests.session()\n\n\ndef login():\n \"\"\"\n 登录入口\n \"\"\"\n response = http.post(\n url=LOGIN_URL,\n headers={\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n data=LOGIN_DATA\n )\n\n if response.json().get(\"status\") == 1:\n return True\n else:\n return False\n\n\ndef upload_article():\n\n success = 0\n error = 0\n\n for item in DUMP_DATA:\n response = http.post(\n url=UPLOAD_ARTICLE_URL,\n headers={\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n data=item\n ).json()\n\n print(\n \"标题文章为: \\033[32;1m{}\\033[0m 发布状态结果: {}\".format(\n item[\"title\"],\n response.get(\"info\")\n ),\n )\n\n if response.get(\"status\") == 1:\n success += 1\n else:\n error += 1\n\n # 延长发布时间\n time.sleep(61)\n\n return success, error\n\n\nif login():\n s, e = upload_article()\n print(\n \"\\r\\n本次共发布{}篇文章, 成功 \\033[32;1m{}\\033[0m 篇, 失败 \\033[31;1m{}\\033[0m 篇\".format(\n s + e, s, e\n )\n )\nelse:\n print(\"账户或密码错误, 重新配置\")\n","sub_path":"20180701/copyfile1/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"563619165","text":"from tqdm import tqdm\nimport os\nimport requests\nfrom random import shuffle\nimport cfscrape\n\nfolder = \"platesmania\"\nif not os.path.exists(folder):\n os.makedirs(folder)\nwith open(\"platesmania-images.txt\", 'r') as f:\n annotations = f.readlines()\nshuffle(annotations)\nfor annotation in tqdm(annotations):\n try:\n name = annotation.strip().split(\"/\")\n name = name[len(name) - 3] + name[len(name) - 1]\n path = os.path.join(folder, name)\n if not os.path.exists(path):\n scraper = cfscrape.create_scraper()\n\n url = annotation.strip()\n print(url)\n cfurl = scraper.get(url).content\n with open(path, 'wb') as f:\n f.write(cfurl)\n except Exception as exep:\n print(exep)\n\n\n\n\n\n","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"538041445","text":"from flask import Blueprint, render_template, request, session, redirect, url_for, g\n\nfrom application import db\nfrom libs.decoration import logon_required\nfrom libs.models import Question, User, Comment\n\nqa = Blueprint('qa', __name__)\n\n\n@qa.route('/list')\ndef lists():\n \"\"\"\n 问答列表\n :return:\n \"\"\"\n return render_template('home/qa/qa_list.html')\n\n\n@qa.route('/release/', methods=['GET', 'POST'])\n@logon_required\ndef release_qa():\n \"\"\"\n 问答发布视图\n :return:\n \"\"\"\n if request.method == 'GET':\n return render_template('home/qa/release_qa.html')\n if request.method == 'POST':\n title = request.form.get('title')\n content = request.form.get('content')\n q = Question(title=title, content=content)\n q.author = User.query.filter_by(id=session.get('user_id')).first()\n db.session.add(q)\n db.session.commit()\n return redirect(url_for('home.index'))\n\n\n@qa.route('/detail/')\ndef detail(question_id):\n question = Question.query.filter_by(id=question_id).first()\n comments = question.comments\n context = {\n 'question': question,\n 'total_comments': len(comments)\n }\n return render_template('home/qa/detail.html', **context)\n\n\n@qa.route('/comment', methods=['POST'])\n@logon_required\ndef comment():\n \"\"\"\n 评论\n :return:\n \"\"\"\n content = request.form.get('content') # content:是内容,comment是评论\n question_id = request.form.get('question_id')\n user_id = session.get('user_id')\n\n if content is None:\n return redirect(url_for('qa.detail', question_id=question_id))\n\n question = Question.query.filter_by(id=question_id).first()\n user = User.query.filter_by(id=user_id).first()\n\n com = Comment(content=content)\n com.question = question\n com.author = user\n db.session.add(com)\n db.session.commit()\n # 跳转本蓝图内不加蓝图名\n return redirect(url_for('qa.detail', question_id=question_id))\n","sub_path":"views/home/question_answer.py","file_name":"question_answer.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171596438","text":"#coding=utf-8\nimport numpy as np\nimport tensorflow as tf\nimport os\n\ntf.reset_default_graph()\n\n# 获取训练数据和测试数据\ndef get_data(number):\n list_x = []\n list_label = []\n for i in range(number):\n x = np.random.randn(1)\n # 这里构建数据的分布满足 y = 2 * x + 10\n label = 2 * x + np.random.randn(1) * 0.01 + 10\n list_x.append(x)\n list_label.append(label)\n return list_x, list_label\n\ndef inference(x):\n weight = tf.get_variable(\"weight\",[1])\n bias = tf.get_variable(\"bias\",[1])\n y = x * weight + bias\n return y\n\ntrain_x = tf.placeholder(tf.float32)\ntrain_label = tf.placeholder(tf.float32)\ntest_x = tf.placeholder(tf.float32)\ntest_label = tf.placeholder(tf.float32)\n\nwith tf.variable_scope(\"inference\"):\n \"\"\"\n # 判断重用属性是否为False?\n print(tf.get_variable_scope().reuse==False)\n \n 默认作用域不可重用,修改为可重用.\n with tf.variable_scope(\"inference\",reuse=True):\n \n 下面三行相当于第一行不可重用,后面修改之后,该作用域空间可以重用.\n \"\"\" \n train_y = inference(train_x) \n tf.get_variable_scope().reuse_variables()\n test_y = inference(test_x) \n\ntrain_loss = tf.square(train_y - train_label)\ntest_loss = tf.square(test_y - test_label)\nopt = tf.train.GradientDescentOptimizer(0.002)\ntrain_op = opt.minimize(train_loss)\n\ntrain_data_x, train_data_label = get_data(5000) #读取训练数据的函数\ntest_data_x, test_data_label = get_data(1)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n sess.run(init)\n if os.path.exists(\"./model/checkpoint\"):\n saver.restore(sess,\"./model/my.ckpt\")\n for i in range(5000):\n sess.run(train_op, feed_dict={train_x: train_data_x[i],\n train_label:train_data_label[i]})\n if i % 1000 == 0: \n test_loss_value = sess.run(test_loss, \n feed_dict={test_x:test_data_x[0],\n test_label: test_data_label[0]}) \n print(\"step %d eval loss is %.3f\" %(i,test_loss_value))\n save_path = saver.save(sess,\"./model/my.ckpt\")\n","sub_path":"Chapter3/3.2/3.2.3/Saver_save_restore.py","file_name":"Saver_save_restore.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"334944938","text":"# Solution to exercise FrogRiverOne\n# http://www.codility.com/train/\n\ndef solution(X, A):\n \n count_array = [0 for i in range(X+1)]\n empty_positions = X\n \n for i in range(len(A)):\n a = A[i]\n if count_array[a] == 0:\n empty_positions -= 1\n count_array[a] += 1\n if empty_positions == 0:\n return i\n\t\n return -1","sub_path":"FrogRiverOne.py","file_name":"FrogRiverOne.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"552041799","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport pygame\n\ndisplay_width = 460\ndisplay_height = 600\n\n\nclass Button:\n def __init__(self, font, size, text, text_color, button_color, left, top, width, height):\n self.gameDisplay = pygame.display.set_mode(display_width, display_height)\n self.gameDisplay.fill(button_color)\n self.loc = pygame.draw.rect(self.gameDisplay, text_color, (left, top, width, height))\n self.text = font.render(text, True, text_color)\n self.font = pygame.font.Font(font, size)\n\n def gameDisplay(self, x, y):\n self.gameDisplay = pygame.display.set_mode((x, y))\n\n \n","sub_path":"class-button.py","file_name":"class-button.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"119316646","text":"from pymongo import MongoClient\nimport pymongo\nimport json\nimport csv\nfrom mongo_db_connect import db\n\nitems = db.discuss.find({})\n\nfor item in items:\n\t\n\t# User Variables\n\tuserId = str(item['userIdEncrypt'])\n\tuserRole = str(item['userRole']) # D2L User Code\n\trole = item['role'] # Human Readable Role\n\t\n\t# Post Variables\n\tpId = item['_id'] # Post ID, also unique key in mongo\n\tsubject = item['subject']\n\tmessage = item['messageFinal']\n\tdatePosted = item['datePosted']\n\treplyPostIds = item['replyPostIds']\n\tparentPostId = item['parentPostId']\n\twordCount = item['wordCount']\n\t# Course Variables\n\touId = str(item['d2lid']) # D2L Course Id\n\tcourseName = item['courseName']\n\tsemester = item['semester']\n\t\n\t# Discussion Variables\n\tforumId = item['forumId']\n\tforumName = item['forumName']\n\ttopicId = item['topicId']\n\ttopicName = item['topicName']\n\ttopicDescription = item['topicDescriptionClean']\n\tthreadId = item['threadId']\n\n\n\ttopicLen = len(topicDescription)\n\n\tif topicLen != 0 and parentPostId == None:\n\t\t\n\t\tfor r in replyPostIds:\n\n\t\t\tinstructorReply = db.discuss.find({ '_id' : r , 'role' : 'Instructor' })\n\n\t\t\tfor i in instructorReply:\n\t\t\t\tprint(\"Topic\")\n\t\t\t\tprint(topicDescription)\n\t\t\t\tprint(\"Initial Student Post:\")\n\t\t\t\tprint(message)\n\t\t\t\tprint(\"Instructor Reply:\")\n\t\t\t\tprint(i['messageFinal'])\n\t\t\t\tprint(\"////////////////////////////////////\")","sub_path":"Data Analysis Scripts/discussions_topic_student_teacher.py","file_name":"discussions_topic_student_teacher.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"247774925","text":"\"\"\"\nWASL is an experimental custom textual shading language.\nIt's currently in a broken state. It's probably better\nto support WSL than roll our own shading language. This was fun though :)\n\"\"\"\n\nfrom textx import metamodel_from_str\n\nfrom ._module import ShaderModule\nfrom . import _generator_bc as bc\n\n\ngrammar = \"\"\"\nProgram: Procedure;\nComment: /#.*$/;\nProcedure: 'fn' name=ID '(' params*=IOParameter[','] ','? ')' '{' body=Body '}';\nIOParameter: name=ID ':' mode=ID type=ID location=Location;\nLocation: INT | ID;\nParameter: name=ID ':' type=ID;\nBody: expressions+=Statement;\nStatement: Assignment | Expression;\nExpression: CallExpr | Sum;\nCallExpr: name=ID '(' args+=Expression[','] ','? ')';\nAssignment: lhs=ID '=' rhs=Expression;\nSum: lhs=Term rhs*=SumRHS;\nSumRHS: op=AddOp value=Term;\nTerm: lhs=Factor rhs*=TermRHS;\nTermRHS: op=MulOp value=Factor;\nFactor: IdentifierIndexed | Identifier | Number;\nMulOp: '*' | '/';\nAddOp: '+' | '-';\nNumber: value=FLOAT;\nIdentifier: name=ID;\nIdentifierIndexed: name=ID '[' index=Expression ']';\n\"\"\".lstrip()\n\n\nmeta_model = metamodel_from_str(grammar, classes=[])\n\n\ndef wasl2shader(code, shader_type=None):\n \"\"\"Compile WASL code to a ShaderModule object.\n\n WASL is our own defined domain specific language (DSL) to write shaders.\n It is highly experimental. The code is parsed using textx, the resulting\n AST is converted to bytecode, from which binary SpirV can be generated.\n \"\"\"\n if not isinstance(code, str):\n raise TypeError(\"wasl2shader expects a string.\")\n\n ast = meta_model.model_from_str(code)\n\n converter = Wasl2Bytecode()\n converter.convert(ast)\n bytecode = converter.dump()\n\n return ShaderModule(code, bytecode, \"shader from WASL\")\n\n\nclass Wasl2Bytecode:\n \"\"\"Compile WASL AST to bytecode.\"\"\"\n\n def convert(self, ast):\n self._opcodes = []\n self.visit(ast)\n\n def dump(self):\n return self._opcodes\n\n def emit(self, opcode, arg):\n self._opcodes.append((opcode, arg))\n\n def visit(self, node):\n\n method_name = \"visit_\" + node.__class__.__name__.lower()\n getattr(self, method_name)(node)\n\n def visit_procedure(self, node):\n for param in node.params:\n if param.mode == \"input\":\n self.emit(bc.CO_INPUT, (param.name, param.location, param.type))\n elif param.mode == \"output\":\n self.emit(bc.CO_OUTPUT, (param.name, param.location, param.type))\n elif param.mode == \"uniform\":\n raise NotImplementedError()\n else:\n raise TypeError(\n f\"Funcion argument {param.name} must be input, output or uniform, not {param.mode}.\"\n )\n\n for node in node.body.expressions:\n self.visit(node)\n\n def visit_assignment(self, node):\n self.visit(node.rhs)\n self.emit(bc.CO_STORE, node.lhs)\n\n def visit_sum(self, node):\n self.visit(node.lhs)\n for term in node.rhs:\n self.visit(term)\n 1 / 0\n\n def visit_term(self, node):\n self.visit(node.lhs)\n for term_rhs in node.rhs:\n self.visit(term_rhs.value)\n self.emit(bc.CO_BINARY_OP, term_rhs.op)\n\n def visit_identifier(self, node):\n self.emit(bc.CO_LOAD, node.name)\n\n def visit_identifierindexed(self, node):\n self.emit(bc.CO_LOAD, node.name)\n self.visit(node.index)\n self.emit(bc.CO_INDEX, None)\n\n def visit_number(self, node):\n self.emit(bc.CO_LOAD_CONSTANT, node.value)\n\n def visit_callexpr(self, node):\n self.emit(bc.CO_LOAD, node.name)\n for arg in node.args:\n self.visit(arg)\n self.emit(bc.CO_CALL, len(node.args))\n","sub_path":"venv/Lib/site-packages/pyshader/wasl.py","file_name":"wasl.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"453792774","text":"#l1 = [1, 5, 12, 18, 19, 20]\n#l2 = [2, 3, 4, 17]\n\ndef merge(l1, l2):\n res = []\n compare = lambda x, y: x > y\n while len(l1) > 0 and len(l2) > 0:\n #if l1[0] > l2[0]:\n if compare(l1[0], l2[0]):\n res.append(l2[0])\n l2.pop(0)\n else:\n res.append(l1[0])\n l1.pop(0)\n \n while len(l1) > 0:\n res.append(l1[0])\n l1.pop(0)\n \n while len(l2) > 0:\n res.append(l2[0])\n l2.pop(0)\n \n# print(l1)\n# print(l2)\n print(res)\n return res\n\ndef mergeSort(L):\n ans = []\n\n if len(L) < 2:\n ans = L\n return ans\n\n middle = len(L) // 2\n print(middle)\n print(L[:middle])\n print(L[middle:])\n# mergeSort(L[:middle])\n\n L1 = mergeSort(L[middle:])\n L2 = mergeSort(L[:middle])\n\n return merge(L1, L2)\n\n# print(ans)\n# return ans\n\nL = [2, 3, 1]\nprint(mergeSort(L))\n","sub_path":"src/python/introduction/chap10/10-2/10_4.py","file_name":"10_4.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173482836","text":"import FWCore.ParameterSet.Config as cms\n\n##--------- good primary vertices ---------------\nfrom PhysicsTools.SelectorUtils.pvSelector_cfi import pvSelector\ngoodOfflinePrimaryVertices = cms.EDFilter(\"PrimaryVertexObjectFilter\",\n src = cms.InputTag('offlinePrimaryVertices'),\n filterParams = pvSelector.clone( minNdof = cms.double(4.0), maxZ = cms.double(24.0) )\n)\n\nfrom CommonTools.ParticleFlow.pfNoPileUp_cff import * \nfrom CommonTools.ParticleFlow.pfParticleSelection_cff import *\n\npfPileUp.checkClosestZVertex = False\npfPileUp.Vertices = 'goodOfflinePrimaryVertices'\npfPileUp.PFCandidates = 'particleFlow'\npfNoPileUp.bottomCollection = 'particleFlow'\n\nfrom CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi import goodOfflinePrimaryVertices\npfNoPileUpSequence.insert(0, goodOfflinePrimaryVertices)\n\npileUpSubtractionSequence = cms.Sequence(\n pfNoPileUpSequence +\n pfParticleSelectionSequence\n )\n\n# JETS CA8 ----------------------------\n\nfrom RecoJets.JetProducers.ak5PFJets_cfi import ak5PFJets\nca8PFJetsCHS = ak5PFJets.clone(\n src = 'pfNoPileUp',\n jetPtMin = cms.double(30.0),\n doAreaFastjet = cms.bool(True),\n rParam = cms.double(0.8),\n jetAlgorithm = cms.string(\"CambridgeAachen\"),\n)\n\njetSource = 'ca8PFJetsCHS'\n\n# corrections \nfrom PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi import *\npatJetCorrFactorsCA8CHS = patJetCorrFactors.clone()\npatJetCorrFactorsCA8CHS.src = jetSource\n# will need to add L2L3 corrections in the cfg\npatJetCorrFactorsCA8CHS.levels = ['L1FastJet', 'L2Relative', 'L3Absolute']\npatJetCorrFactorsCA8CHS.payload = 'AK7PFchs'\npatJetCorrFactorsCA8CHS.useRho = True\n\nfrom PhysicsTools.PatAlgos.producersLayer1.jetProducer_cfi import *\npatJetsCA8CHS = patJets.clone()\npatJetsCA8CHS.jetSource = jetSource\npatJetsCA8CHS.addJetCharge = False\npatJetsCA8CHS.embedCaloTowers = False\npatJetsCA8CHS.embedPFCandidates = False\npatJetsCA8CHS.addAssociatedTracks = False\npatJetsCA8CHS.addBTagInfo = False\npatJetsCA8CHS.addDiscriminators = False\npatJetsCA8CHS.addJetID = False\npatJetsCA8CHS.addGenPartonMatch = False\npatJetsCA8CHS.embedGenPartonMatch = False\npatJetsCA8CHS.addGenJetMatch = False\npatJetsCA8CHS.getJetMCFlavour = False\npatJetsCA8CHS.jetCorrFactorsSource = cms.VInputTag(cms.InputTag('patJetCorrFactorsCA8CHS'))\n\n#### Adding Nsubjetiness\n\npatJetsCA8CHSwithNsub = cms.EDProducer(\"NjettinessAdder\",\n src=cms.InputTag(\"patJetsCA8CHS\"),\n cone=cms.double(0.8)\n)\n\n# JETS PRUNED CA8 ----------------------------\n\nfrom RecoJets.JetProducers.ak5PFJetsPruned_cfi import ak5PFJetsPruned\nca8PFJetsCHSpruned = ak5PFJetsPruned.clone(\n src = 'pfNoPileUp',\n jetPtMin = cms.double(30.0),\n doAreaFastjet = cms.bool(True),\n rParam = cms.double(0.8),\n jetAlgorithm = cms.string(\"CambridgeAachen\"),\n)\n\njetSource = 'ca8PFJetsCHSpruned'\n\n# corrections \nfrom PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi import *\npatJetCorrFactorsCA8CHSpruned = patJetCorrFactors.clone()\npatJetCorrFactorsCA8CHSpruned.src = jetSource\n# will need to add L2L3 corrections in the cfg\npatJetCorrFactorsCA8CHSpruned.levels = ['L1FastJet', 'L2Relative', 'L3Absolute']\npatJetCorrFactorsCA8CHSpruned.payload = 'AK7PFchs'\npatJetCorrFactorsCA8CHSpruned.useRho = True\n\n\npatJetsCA8CHSpruned = patJets.clone()\npatJetsCA8CHSpruned.jetSource = jetSource\npatJetsCA8CHSpruned.addJetCharge = False\npatJetsCA8CHSpruned.embedCaloTowers = False\npatJetsCA8CHSpruned.embedPFCandidates = False\npatJetsCA8CHSpruned.addAssociatedTracks = False\npatJetsCA8CHSpruned.addBTagInfo = False\npatJetsCA8CHSpruned.addDiscriminators = False\npatJetsCA8CHSpruned.addJetID = False\npatJetsCA8CHSpruned.addGenPartonMatch = False\npatJetsCA8CHSpruned.embedGenPartonMatch = False\npatJetsCA8CHSpruned.addGenJetMatch = False\npatJetsCA8CHSpruned.getJetMCFlavour = False\npatJetsCA8CHSpruned.jetCorrFactorsSource = cms.VInputTag(cms.InputTag('patJetCorrFactorsCA8CHSpruned'))\n\n\nca8Jets = cms.Sequence(\n goodOfflinePrimaryVertices +\n pfNoPileUpSequence +\n ca8PFJetsCHS + \n patJetCorrFactorsCA8CHS +\n patJetsCA8CHS + \n patJetsCA8CHSwithNsub +\n ca8PFJetsCHSpruned +\n patJetCorrFactorsCA8CHSpruned +\n patJetsCA8CHSpruned\n)\n","sub_path":"python/PAT_ca8jets_simple_cff.py","file_name":"PAT_ca8jets_simple_cff.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"499842754","text":"\"\"\"This script contains classes to help import animations.\"\"\"\n\n# ***** BEGIN LICENSE BLOCK *****\n#\n# Copyright © 2005-2015, NIF File Format Library and Tools contributors.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the NIF File Format Library and Tools\n# project nor the names of its contributors may be used to endorse\n# or promote products derived from this software without specific\n# prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# ***** END LICENSE BLOCK *****\n\nfrom pyffi.formats.nif import NifFormat\nfrom io_scene_mnif.modules.property.material.material_export import Material\nfrom io_scene_mnif.utility.nif_global import NifOp\n\n\nclass PropertyHelper:\n \n def __init__(self, parent):\n self.object_property = ObjectProperty(parent)\n self.material_property = Material(parent)\n \n \nclass ObjectProperty:\n \n def __init__(self, parent):\n self.nif_export = parent\n \n def export_vertex_color_property(self, block_parent, flags=1,\n vertex_mode=0, lighting_mode=1):\n \"\"\"Create a vertex color property, and attach it to an existing block\n (typically, the root of the nif tree).\n \n @param block_parent: The block to which to attach the new property.\n @param flags: The C{flags} of the new property.\n @param vertex_mode: The C{vertex_mode} of the new property.\n @param lighting_mode: The C{lighting_mode} of the new property.\n @return: The new property block.\n \"\"\"\n # create new vertex color property block\n vcol_prop = self.nif_export.objecthelper.create_block(\"NiVertexColorProperty\")\n \n # make it a property of the parent\n block_parent.add_property(vcol_prop)\n \n # and now export the parameters\n vcol_prop.flags = flags\n vcol_prop.vertex_mode = vertex_mode\n vcol_prop.lighting_mode = lighting_mode\n \n return vcol_prop\n \n def export_z_buffer_property(self, block_parent, flags=15, func=3):\n \"\"\"Create a z-buffer property, and attach it to an existing block\n (typically, the root of the nif tree).\n\n @param block_parent: The block to which to attach the new property.\n @param flags: The C{flags} of the new property.\n @param func: The C{function} of the new property.\n @return: The new property block.\n \"\"\"\n # create new z-buffer property block\n zbuf = self.nif_export.objecthelper.create_block(\"NiZBufferProperty\")\n\n # make it a property of the parent\n block_parent.add_property(zbuf)\n\n # and now export the parameters\n zbuf.flags = flags\n zbuf.function = func\n\n return zbuf\n \n def export_alpha_property(self, flags=0x00ED, threshold=0):\n \"\"\"Return existing alpha property with given flags, or create new one\n if an alpha property with required flags is not found.\"\"\"\n # search for duplicate\n for block in self.nif_export.dict_blocks:\n if isinstance(block, NifFormat.NiAlphaProperty) and block.flags == flags and block.threshold == threshold:\n return block\n\n # no alpha property with given flag found, so create new one\n alpha_prop = self.nif_export.objecthelper.create_block(\"NiAlphaProperty\")\n alpha_prop.flags = flags\n alpha_prop.threshold = threshold\n return alpha_prop\n\n def export_specular_property(self, flags=0x0001):\n \"\"\"Return existing specular property with given flags, or create new one\n if a specular property with required flags is not found.\"\"\"\n # search for duplicate\n for block in self.nif_export.dict_blocks:\n if isinstance(block, NifFormat.NiSpecularProperty) and block.flags == flags:\n return block\n\n # no specular property with given flag found, so create new one\n spec_prop = self.nif_export.objecthelper.create_block(\"NiSpecularProperty\")\n spec_prop.flags = flags\n return spec_prop\n\n def export_wireframe_property(self, flags=0x0001):\n \"\"\"Return existing wire property with given flags, or create new one\n if an wire property with required flags is not found.\"\"\"\n # search for duplicate\n for block in self.nif_export.dict_blocks:\n if isinstance(block, NifFormat.NiWireframeProperty) and block.flags == flags:\n return block\n\n # no wire property with given flag found, so create new one\n wire_prop = self.nif_export.objecthelper.create_block(\"NiWireframeProperty\")\n wire_prop.flags = flags\n return wire_prop\n\n def export_stencil_property(self):\n \"\"\"Return existing stencil property with given flags, or create new one\n if an identical stencil property.\"\"\"\n # search for duplicate\n for block in self.nif_export.dict_blocks:\n if isinstance(block, NifFormat.NiStencilProperty):\n # all these blocks have the same setting, no further check\n # is needed\n return block\n\n # no stencil property found, so create new one\n stencil_prop = self.nif_export.objecthelper.create_block(\"NiStencilProperty\")\n if NifOp.props.game == 'FALLOUT_3':\n stencil_prop.flags = 19840\n return stencil_prop\n","sub_path":"src/io_scene_mnif/modules/property/property_export.py","file_name":"property_export.py","file_ext":"py","file_size_in_byte":6632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"300634603","text":"import numpy as np\nimport os\nimport pandas as pd\nimport math\nimport cv2 \nprint('2dcent and perspective')\n\ndef load_txt(file_path): # load result file\n txt_file = []\n filenames = os.listdir(file_path) # 返回指定的文件夹包含的文件\n for item in filenames:\n # print(\"item:\",item)\n txt_file.append(item)\n return txt_file\n\n\ndef parse_txt(txt_name):\n data = []\n with open(txt_name, 'r') as file_read:\n while True:\n line = file_read.readline()\n\n if not line:\n break\n line = line.strip('\\n')\n tem = line.split(\" \")\n data.append(tem)\n\n file_read.close()\n\n return data\n\n\ndef data_to_dict(path, file_list):\n dict_data = dict()\n for index, file_name in enumerate(file_list):\n file_id = file_name.split(\".\")\n file_id = file_id[0]\n file_path = os.path.join(path, file_name)\n data = parse_txt(file_path)\n dict_data[\"{}\".format(file_id)] = data\n\n return dict_data\n\ndef statstic_label_to_csv(label,path):\n\n list_result ={}\n list_w = []\n list_h = []\n list_l = []\n list_x = []\n list_y = []\n list_z = []\n list_theta= []\n list_name=[]\n list_index = [ ]\n for key, value in label.items():\n # if(len(value) == 0):\n if(value == []):\n continue\n count = len(value)\n for index in range(count): # loop for result\n # if(value[index][0] != 'Car' and value[index][0] != 'Van' and value[index][0] != 'Truck'):\n # continue\n list_w.append(value[index][8])\n list_h.append(value[index][9])\n list_l.append(value[index][10])\n list_x.append(value[index][11])\n list_y.append(value[index][12])\n list_z.append(value[index][13])\n list_theta.append(value[index][14])\n list_name.append(value[index][0])\n list_index.append(str(key)+str(index))\n newtable = pd.DataFrame(columns=['index','name','w','h','l','x','y','z','theta'])\n newtable['index'] = list_index\n newtable['name'] = list_name\n newtable['w'] = list_w\n newtable['h'] = list_h\n newtable['l'] = list_l\n newtable['x'] = list_x\n newtable['y'] = list_y\n newtable['z'] = list_z\n newtable['theta'] = list_theta\n\n newtable.to_csv(path,\n index=False, sep=',')\n return\n\n\n\n \n\n\ndef get_iou(result, label):\n result_left_x = float(result[0])\n result_left_y = float(result[1])\n result_right_x = float(result[2])\n result_right_y = float(result[3])\n\n label_left_x = float(label[0])\n label_left_y = float(label[1])\n label_right_x = float(label[2])\n label_right_y = float(label[3])\n\n # compute each area of retangle\n result_area = (result_right_x - result_left_x) * \\\n (result_right_y - result_left_y)\n label_area = (label_right_x - label_left_x)*(label_right_y - label_left_y)\n\n # compute the sum of area\n sum_area = result_area + label_area\n\n # find the each edge of intersect rectangle\n left_line = max(result_left_x, label_left_x)\n right_line = min(result_right_x, label_right_x)\n top_line = max(result_left_y, label_left_y)\n bottom_line = min(result_right_y, label_right_y)\n\n # judge there is an intersect\n if(left_line >= right_line or top_line >= bottom_line):\n return 0\n else:\n intersect = (right_line - left_line)*(bottom_line - top_line)\n intersect = (intersect/(sum_area-intersect))*1.0\n return intersect\n\n\ndef replace_data(label_data, result_data, replace_index):\n bev_center_x = 3\n bev_center_y = 4\n # bbox_right_x = 5\n # bbox_right_y = 6\n # ground_height = 7\n\n result_data_final = result_data\n result_list = []\n\n world_location_x_index = 11\n world_location_y_index = 12\n world_location_z_index = 13\n\n for key, value in result_data.items():\n # if(len(value) == 0):\n if(value == []):\n continue\n\n count = len(value)\n for index in range(count): # loop for result\n\n # if(value[index][0] != 'Car' and value[index][0] != 'Van' and value[index][0] != 'Truck'):\n # continue\n if(value[index][0] != 'Car'):\n continue\n\n sum_one = []\n sum_abs_min = 1e10\n sum_iou_max = -10\n\n result_bev_center_x = value[index][bev_center_x]\n result_bev_center_y = value[index][bev_center_y]\n # result_right_x = value[index][bbox_right_x]\n # result_right_y = value[index][bbox_right_y]\n result_coordinate = [result_bev_center_x, result_bev_center_y]\n\n # result_ground_height = value[index][ground_height]\n\n # label\n label_list = label_data[key]\n list_size = len(label_list)\n if list_size == 0:\n continue\n\n for index_label in range(list_size):\n label_one = label_list[index_label]\n\n # if(label_one[0] != 'Car' and label_one[0] != 'Van' and label_one[0] != 'Truck'):\n # continue\n if(label_one[0] != 'Car'):\n continue\n # world to uv label\n label_left_x = label_one[bbox_left_x+1]\n label_left_y = label_one[bbox_left_y+1]\n label_right_x = label_one[bbox_right_x+1]\n label_right_y = label_one[bbox_right_y+1]\n label_coordinate = [label_left_x,\n label_left_y, label_right_x, label_right_y]\n\n # sum_abs = abs(float(result_left_x)-float(label_left_x)) \\\n # + abs(float(result_left_y)-float(label_left_y)) \\\n # + abs(float(result_right_x)-float(label_right_x)) \\\n # + abs(float(result_right_y)-float(label_right_y))\n\n iou_data = get_iou(result_coordinate, label_coordinate)\n\n # if(sum_abs < sum_abs_min):\n # sum_abs_min = sum_abs\n # ground_height_index = 12\n # label_h_final = label_one[ground_height_index]\n\n if(iou_data >= sum_iou_max):\n sum_iou_max = iou_data\n ground_height_index = 12\n ground_distance_index = 13\n label_h_final = label_one[ground_height_index]\n\n label_distance_final = label_one[ground_distance_index]\n\n # calculate ground height error\n error = abs(float(label_h_final) - float(result_ground_height))\n # label_h_final result_ground_height index\n result_one = [key, label_h_final,\n result_ground_height, error, label_distance_final]\n result_list.append(result_one)\n\n return result_list\n\ndef write_csv(result, path):\n #result_one = [key, label_x,label_y,result_bev_center_x,result_bev_center_y,distance]\n # result_one = [key, label_x,label_y,result_bev_center_x,result_bev_center_y,distance_center,distance_perpective]\n ID = []\n label_x = []\n label_y = []\n result_bev_center_x = []\n result_bev_center_y = []\n distance = []\n label_height = []\n result_height = []\n error = []\n \n for index, value in enumerate(result):\n ID.append(int(value[0]))\n label_x.append(value[1])\n label_y.append(value[2])\n result_bev_center_x.append(value[3])\n result_bev_center_y.append(value[4])\n distance.append(value[5])\n label_height.append(value[6])\n result_height.append(value[7])\n error.append(value[8])\n \n \n # data_frame = pd.DataFrame({'ID':int(value[0]),'label':float(value[1]),'pred':float(value[2]),'error':float(value[3])})\n data_frame = pd.DataFrame(\n {'ID': ID, 'label_x': label_x, 'label_y': label_y, 'result_bev_center_x': result_bev_center_x, 'result_bev_center_y': result_bev_center_y,'distance' : distance,'label_height' : label_height,'result_height' : result_height,'error' : error})\n # data_frame.to_csv('data4_log_2block_val_distance.csv',\n # index=False, sep=',')\n data_frame.to_csv(path,\n index=False, sep=',')\n\ndef write_csv_2d(result, path):\n # result_one = [key, label_x,label_y,result_bev_center_x,result_bev_center_y,result_per_center_x,result_per_center_y,resdistance_center,distance_perpective]\n ID = []\n \n label_bev_x = []\n label_bev_y = []\n\n result_bev_center_x = []\n result_bev_center_y = []\n z = []\n distance_perpective = []\n x = []\n gtz = []\n for index, value in enumerate(result):\n ID.append(int(value[0]))\n label_bev_x.append(value[1])\n label_bev_y.append(value[2])\n result_bev_center_x.append(value[3])\n result_bev_center_y.append(value[4])\n distance_perpective.append(value[5])\n z.append(value[6])\n x.append(value[7])\n gtz.append(value[8]) \n # data_frame = pd.DataFrame({'ID':int(value[0]),'label':float(value[1]),'pred':float(value[2]),'error':float(value[3])})\n data_frame = pd.DataFrame(\n {'ID': ID, 'label_bev_x':label_bev_x,'label_bev_y':label_bev_y, 'result_bev_center_x': result_bev_center_x, 'result_bev_center_y': result_bev_center_y,'distance_perpective' : distance_perpective,'z':z,'gt':gtz,'x':x})\n # data_frame.to_csv('data4_log_2block_val_distance.csv',\n # index=False, sep=',')\n data_frame.to_csv(path,\n index=False, sep=',')\n\n\ndef write_data_txt(data, path):\n\n for key, value in data.items():\n\n txt_path = path + key + \".txt\"\n file_write = open(txt_path, \"w+\")\n count = len(value)\n if(count == 0):\n file_write.close()\n continue\n\n for index in range(count):\n data_one = value[index]\n\n for data in data_one: # loop for result\n file_write.write(data)\n file_write.write(\" \")\n\n file_write.write(\"\\n\")\n file_write.close()\n\n\ndef mkdir(path):\n folder = os.path.exists(path)\n if not folder:\n os.mkdir(path)\n else:\n print(\"the folder exist!\")\n\n\ndef world_to_uv(label_data, calib_data):\n\n world_location_x_index = 11\n world_location_y_index = 12\n world_location_z_index = 13\n label_one_location = []\n uv_data_dict = {}\n for key, value in label_data.items():\n if(value == []):\n continue\n uv_point_list = []\n count = len(value)\n for index in range(count): # loop for result\n if(value[index][0] != 'Car' and value[index][0] != 'Van' and value[index][0] != 'Truck'):\n continue\n\n world_location_x = float(value[index][world_location_x_index])\n #world_location_y = float(value[index][world_location_y_index])\n world_location_y = float(1.65)\n world_location_z = float(value[index][world_location_z_index])\n world_location = np.array([world_location_x,world_location_y,world_location_z]).T\n\n xmin = float(value[index][4])\n ymin = float(value[index][5])\n xmax = float(value[index][6])\n ymax = float(value[index][7])\n\n # calib world to uv\n camera_index = 2# p2 camera\n calib_list = calib_data[key]\n calib_size = len(calib_list)\n if calib_size == 0:\n continue\n p2_camera = calib_list[2]\n p2_camera = p2_camera[1:]\n p2_camera =np.array(p2_camera,dtype=float)\n calib = np.reshape(p2_camera,[3,4])\n fx, cx, fy, cy = calib[0,0], calib[0,2], calib[1,1], calib[1,2]\n K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1])\n K = K.reshape(3,3)\n camera_point = np.dot(K, world_location)\n uv_point = camera_point/(world_location_z)\n # check boundary, condition is wrong\n image_h = 384\n image_w = 1280\n if(uv_point[0]>= image_w or uv_point[0]<= 0 or uv_point[1]>= image_h or uv_point[1]<= 0):\n uv_point = [-9999,-9999,-9999,-9999,-9999, -9999,-9999]\n uv_point_list.append(uv_point)\n continue\n \n uv_point = [uv_point[0],uv_point[1],fy,cy,fx,cx,world_location_z]\n uv_point_list.append(uv_point) \n \n uv_data_dict[\"{}\".format(key)] = uv_point_list\n \n return uv_data_dict\n\n\ndef world_to_uv_center(label_data, calib_data):\n\n label_one_location = []\n uv_data_dict = {}\n for key, value in label_data.items():\n if(value == []):\n continue\n uv_point_list = []\n count = len(value)\n for index in range(count): # loop for result\n if(value[index][0] != 'Car' and value[index][0] != 'Van' and value[index][0] != 'Truck'):\n continue\n \n xmin = float(value[index][4])\n ymin = float(value[index][5])\n xmax = float(value[index][6])\n ymax = float(value[index][7])\n u = float((xmin+xmax)/2)\n v = float((ymin+ymax)/2)\n uv_point = [u,v,1,1,1,1,1] \n uv_point_list.append(uv_point) \n uv_data_dict[\"{}\".format(key)] = uv_point_list\n \n return uv_data_dict\n\ndef cal_distance(point1,point2):\n pass\n \ndef compare_data(label_bev_uv,result_data):\n \n be_center_x_index = 3\n bev_center_y_index = 4\n height_index = 5\n result_list = []\n \n for key, value in result_data.items(): # loop for dict\n #import pdb;pdb.set_trace()\n if(value == []):\n continue\n count = len(value)\n for index in range(count): # loop for list\n \n if(value[index][0] != 'Car'):\n continue\n \n min_distance = 1e10\n result_one = []\n\n result_bev_center_x = float(value[index][bev_center_x_index])\n result_bev_center_y = float(value[index][bev_center_y_index])\n result_height = float(value[index][height_index])\n result_bev_center = np.array([result_bev_center_x,result_bev_center_y,result_height],dtype=np.float32)\n \n # label\n label_list = label_bev_uv[key]\n list_size = len(label_list)\n if list_size == 0:\n continue\n \n for index_label in range(list_size):\n label_one = label_list[index_label]\n label_x = label_one[0] \n label_y = label_one[1] \n label_height = label_one[2]\n label_bev_uv_one = np.array([label_x,label_y],dtype=np.float32)\n distance = np.sqrt(np.sum(np.square(label_bev_uv_one - result_bev_center[:2])))\n #distance = math.sqrt((label_x - result_bev_center_x )^2 + (label_y - result_bev_center_y )^2)\n #distance = (label_x - result_bev_center_x )^2 + (label_y - result_bev_center_y )^2\n error = np.abs(label_height - result_height)\n if(distance < min_distance):\n min_distance = distance\n result_one = [key, label_x,label_y,result_bev_center_x,result_bev_center_y,distance,label_height,result_height,error]\n # delete wrong match \n wrong_match_threshold = 100\n distance_index = 5\n if(result_one[distance_index]>wrong_match_threshold):\n continue\n result_list.append(result_one)\n #import pdb;pdb.set_trace() \n return result_list\n\n\ndef compare_data_2d(label_bev_uv,result_data):\n \n bev_center_x_index = 3\n bev_center_y_index = 4\n\n result_list=[]\n for key, value in result_data.items(): # loop for dict\n #import pdb;pdb.set_trace()\n if(value == []):\n continue\n count = len(value)\n \n for index in range(count): # loop for list\n \n if(value[index][0] != 'Car'):\n continue\n \n min_distance = 1e10\n result_one = []\n ##for orignal##\n r_xmin = float(value[index][4])\n r_xmax = float(value[index][6])\n r_ymin = float(value[index][5])\n r_ymax = float(value[index][7])\n result_bev_center_x = float((r_xmin+r_xmax)/2)\n result_bev_center_y = float((r_ymin+r_ymax)/2)\n \n #result_bev_center_x = float(value[index][bev_center_x_index])\n #result_bev_center_y = float(value[index][bev_center_y_index])\n result_bev_center = np.array([result_bev_center_x,result_bev_center_y],dtype=np.float32)\n \n # label\n label_list = label_bev_uv[key]\n list_size = len(label_list)\n if list_size == 0:\n continue\n \n for index_label in range(list_size):\n label_one = label_list[index_label]\n \n label_bev_x = label_one[0]\n label_bev_y = label_one[1]\n fy = label_one[2]\n cy = label_one[3]\n fx = label_one[4]\n cx = label_one[5]\n gt = label_one[6]\n\n result_z = float((fy*1.65)/(result_bev_center_y-cy))\n result_x = (result_bev_center_x*result_z-cx*result_z)/fx\n \n label_bev = np.array([label_bev_x,label_bev_y])\n distance_center = np.sqrt(np.sum(np.square(label_bev - result_bev_center[:2])))\n #distance = math.sqrt((label_x - result_bev_center_x )^2 + (label_y - result_bev_center_y )^2)\n #distance = (label_x - result_bev_center_x )^2 + (label_y - result_bev_center_y )^2\n distance_perpective = np.linalg.norm(label_bev - result_bev_center)\n if(distance_center < min_distance):\n min_distance = distance_center\n result_one = [key, label_bev_x,label_bev_y,result_bev_center_x,result_bev_center_y,distance_perpective,result_z,result_x,gt]\n # delete wrong match \n wrong_match_threshold = 20\n distance_index = 5\n if(result_one[distance_index]>wrong_match_threshold):\n continue\n result_list.append(result_one)\n #import pdb;pdb.set_trace() \n return result_list\n \ndef str_key(key):\n if(len(key)==1):\n key = '00000'+str(key)\n if(len(key)==2):\n key = '0000'+str(key)\n if(len(key)==3):\n key = '000'+str(key)\n if(len(key)==4):\n key = '00'+str(key)\n if(len(key)==5):\n key = '0'+str(key)\n return key\n\ndef list_To_dict(list):\n ret_dict={}\n for ele in list:\n key = str_key(ele[0])\n if(key not in ret_dict.keys()):\n ret_dict[key] = []\n ret_dict[key].append([ele[1],ele[2],ele[3],ele[4]])\n else:\n ret_dict[key].append([ele[1],ele[2],ele[3],ele[4]])\n return ret_dict\n \n \ndef compare2dcenternet(datadict):\n cent_x = []\n cent_y = []\n perspective_x=[]\n perspective_y=[]\n index_name = []\n distance = []\n\n for key, value in result_data.items():\n if(value == []):\n continue\n count = len(value)\n for index in range(count):\n # loop for list # loop for dict\n index_name.append(key+'_{}'.format(index))\n print(value[index])\n cent_x.append(value[index][3])\n cent_y.append(value[index][4])\n perspective_x.append(value[index][5])\n perspective_y.append(value[index][6])\n\n ce = np.array([float(value[index][3]),float(value[index][4])])\n pe = np.array([float(value[index][5]),float(value[index][6])])\n dis = np.linalg.norm(ce-pe)\n distance.append(dis)\n \n\n newtable = pd.DataFrame(columns=['index','centx','centy','per_x','per_y'])\n\n\n newtable['index'] = index_name\n newtable['centx'] = cent_x\n newtable['centy'] = cent_y\n newtable['per_x'] = perspective_x\n newtable['per_y'] = perspective_y\n newtable['dis'] = distance\n\n\n return newtable\n \nif __name__ == \"__main__\":\n\n \n # label_path = \"C:\\\\Users\\\\rockywin.wang\\\\Desktop\\\\01_prj\\\\10_3d\\\\1pic\\\\label_2\\\\\"\n # calib_path = \"C:\\\\Users\\\\rockywin.wang\\\\Desktop\\\\01_prj\\\\10_3d\\\\1pic\\\\calib\\\\\"\n # result_path =\"C:\\\\Users\\\\rockywin.wang\\\\Desktop\\\\01_prj\\\\10_3d\\\\1pic\\\\result\\\\\"\n # new_path=\"C:\\\\Users\\\\rockywin.wang\\\\Desktop\\\\01_prj\\\\10_3d\\\\bev_06.csv\"\n \n label_path = \"/mnt/nfs/zzwu/04_centerNet/CenterNet-master_lyc/data/kitti/training/label_2/\"\n image_root_path = \"/mnt/nfs/zzwu/04_centerNet/CenterNet-master_lyc/data/kitti/training/image_2/\"\n calib_path = \"/mnt/nfs/zzwu/04_centerNet/CenterNet-master_lyc/data/kitti/training/calib/\"\n #result_path =\"/mnt/nfs/zzwu/04_centerNet/CenterNet-master/CenterNet-master/exp/ddd/3dop/results/\"\n #/mnt/nfs/zzwu/04_centerNet/CenterNet-master_lyc/exp/ddd/results\n #result_path =\"/mnt/nfs/zzwu/04_centerNet/xjy/master-thesis/CenterNet-master/CenterNet-master/exp/ddd/3dop/results/\"\n result_path=\"/mnt/nfs/zzwu/04_centerNet/xjy/master_thesis/CenterNet-master/CenterNet-master/exp/ddd/3dop_resnet_18/results/\"\n new_path =\"/mnt/nfs/zzwu/04_centerNet/xjy/master_thesis/CenterNet-master/CenterNet-master/exp/ddd/3dop_resnet_18/result.csv\"\n save_root_path =\"/mnt/nfs/zzwu/04_centerNet/wxq_3d/CenterNet-master_vector_crop/exp/vis/372/\"\n npa = \"/mnt/nfs/zzwu/04_centerNet/xjy/master_thesis/CenterNet-master/CenterNet-master/exp/label.csv\"\n label_file_list = load_txt(label_path)\n\n calib_file_list = load_txt(calib_path)\n result_file_list = load_txt(result_path)\n \n label_data = data_to_dict(label_path,label_file_list)\n statstic_label_to_csv(label_data,npa)\n assert 0\n print('label suc')\n calib_data = data_to_dict(calib_path,calib_file_list)\n print('cal suc')\n result_data = data_to_dict(result_path,result_file_list)\n print('reusl suc')\n #label_bev_uv = world_to_uv(label_data,calib_data)\n #label_center = world_to_uv_center(label_data,calib_data)\n #label_center = world_to_uv(label_data, calib_data)\n label_center = world_to_uv_center(label_data, calib_data)\n \n result_list = compare_data_2d(label_center,result_data)\n\n write_csv_2d(result_list,new_path)\n\n\n # dic = list_To_dict(result_list)\n\n \n\n\n # for key, value in dic.items():\n # if(value == []):\n # continue\n # uv_point_list = []\n # count = len(value)\n # # control one picture\n # # if(key != '001512'):\n # # continue\n # image_name = str(key) + \".png\"\n # #image_name = '000001.png'\n # image_path = image_root_path + image_name\n # save_path = save_root_path + str(image_name)\n # #image_path = './image_2_01/' + str(image_name)\n # image = cv2.imread(str(image_path))\n # print(save_path)\n # for index in range(count): # loop for result\n # label_x = int(float(value[index][0])) # bev\n # label_y = int(float(value[index][1]))\n # point_bev = (label_x,label_y)\n # predict_u = int(float(value[index][2]))\n # predict_v = int(float(value[index][3]))\n # point_predit = (predict_u,predict_v)\n # point_size = 2\n # line_color_bev = (0,255,255) # bgr\n # line_color_165 = (255,0,0)\n # line_color_center = (0,255,0)\n # thickness = 2 #cv2.line(image, point_16, point_15,point_color,thickness)\n # # draw bev to 2d center\n # cv2.line(image, point_bev, point_predit,line_color_165,thickness)\n # print('hhhhh')\n # #cv2.line(image, point_center, point_15,point_color_1,thickness)\n # radius = 3\n # point_color_2d_center = (0,0,255)\n # point_color_165 = (0,255,255)\n # cv2.circle(image, point_bev, radius,point_color_165,thickness)\n # cv2.circle(image, point_predit,radius,point_color_2d_center,thickness)\n # cv2.imwrite(save_path,image)\n\n\n\n\n\n\n\n","sub_path":"exp/eval_tools/outlabel.py","file_name":"outlabel.py","file_ext":"py","file_size_in_byte":24349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"403432554","text":"import garble\nfrom types import SimpleNamespace\nfrom pathlib import Path\n\n\ndef do_garble(source_file, secret_file, output_dir, schema_dir, output_zip):\n print(\"source_file: \" + source_file)\n print(\"secret_file: \" + secret_file)\n print(\"output_dir: \" + output_dir)\n print(\"schema_dir: \" + schema_dir)\n print(\"output_zip: \" + output_zip)\n args = {\n \"sourcefile\": source_file,\n \"secretfile\": secret_file,\n \"schemadir\": schema_dir,\n \"outputdir\": output_dir,\n \"outputzip\": output_zip\n }\n args = SimpleNamespace(**args)\n clks = garble.garble_pii(args)\n garble.create_clk_zip(clks, args)\n\n\n# def do_garble_household(source_file, secret_file, output_dir, schemafile, output_zip, mappingfile):\n# args = {\n# \"sourcefile\": source_file,\n# \"secretfile\": secret_file,\n# \"schemafile\": schemafile,\n# \"outputdir\": output_dir,\n# \"outputzip\": output_zip,\n# \"mappingfile\": mappingfile,\n# \"testrun\": \"t\"\n# }\n# print(\"\")\n# print(\"-------------------\")\n# print(\"ARGS:\")\n# print(str(args))\n# print(\"-------------------\")\n# print(\"\")\n# args = SimpleNamespace(**args)\n# clks = gh.garble_households(args)\n# gh.create_clk_zip(clks, args)\n\n\n","sub_path":"util/garble/garble_util.py","file_name":"garble_util.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"297819835","text":"# from google.colab import drive\r\n# drive.mount('/content/drive')\r\n\r\n# !pip install sklearn_crfsuite\r\n\r\nfrom nltk.corpus import conll2002\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nfrom sklearn.linear_model import Perceptron, SGDClassifier, RidgeClassifier\r\nfrom sklearn.metrics import precision_recall_fscore_support\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nimport string\r\nimport re\r\nimport nltk\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.corpus import stopwords\r\nimport sklearn_crfsuite\r\nimport pickle\r\nimport numpy as np\r\nfrom sklearn.metrics import f1_score\r\nfrom nltk.corpus.reader import ConllCorpusReader\r\nimport nltk\r\nimport matplotlib.pyplot as plt\r\n\r\nnltk.download('conll2002')\r\n\r\nregex = re.compile(\r\n r'^(?:http|ftp)s?://' # http:// or https://\r\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain...\r\n r'localhost|' # localhost...\r\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\r\n r'(?::\\d+)?' # optional port\r\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\r\n\r\ndef wordshape(text):\r\n\r\n t1 = re.sub('[A-Z]', 'X',text)\r\n t2 = re.sub('[a-z]', 'x', t1)\r\n return re.sub('[0-9]', 'd', t2)\r\n\r\ndef getfeats(word, o):\r\n \"\"\" This takes the word in question and\r\n the offset with respect to the instance\r\n word \"\"\"\r\n features = [\r\n (str(o) + 'word', word)\r\n # TODO: add more features here.\r\n ]\r\n return features\r\n\r\ndef gettag(tag, o):\r\n features = [ (str(o) +\"tag\", tag) ]\r\n return features\r\n\r\ndef gethyphen(word, o):\r\n if('-' in word):\r\n features = [(str(o) +\"hyphen\", 1)]\r\n else:\r\n features = [(str(o) +\"hyphen\", 0)]\r\n return features\r\n \r\ndef capletter(word, o):\r\n if(word[0].isupper):\r\n features = [(str(o) +\"first_upper\", 1)]\r\n else:\r\n features = [(str(o) +\"first_upper\", 0)]\r\n return features\r\n\r\ndef noun_suffix(word, o):\r\n if(word.endswith('o') or word.endswith('or') or word.endswith('a') or word.endswith('ora')):\r\n features = [(str(o) +\"common_suffix\", 1)]\r\n else:\r\n features = [(str(o) +\"common_suffix\", 0)]\r\n return features\r\n\r\ndef get_wordshape(word, o):\r\n feature = [(str(o) +\"word_shape\", wordshape(word))]\r\n return feature\r\n\r\ndef all_upper(word, o):\r\n if(word.isupper()):\r\n return [(str(o) +\"all_upper\", 1)]\r\n else:\r\n return [(str(o) +\"all_upper\", 0)]\r\n\r\ndef all_lower(word, o):\r\n if(word.islower()):\r\n return [(str(o) +\"all_lower\", 1)]\r\n else:\r\n return [(str(o) +\"all_lower\", 0)]\r\n\r\ndef has_apostrophe(word, o):\r\n if(\"'\" in word):\r\n return [(str(o) +\"apostrophe\", 1)]\r\n else:\r\n return [(str(o) +\"apostrophe\", 0)]\r\n\r\ndef isEnglish(s):\r\n try:\r\n s.encode(encoding='utf-8').decode('ascii')\r\n except UnicodeDecodeError:\r\n return False\r\n else:\r\n return True\r\n\r\ndef special_characters(word, o):\r\n if(isEnglish(word)):\r\n return [(str(o) +\"special_characters\", 0)]\r\n else:\r\n return [(str(o) +\"special_characters\", 1)]\r\n\r\ndef onlynum(word, o):\r\n if(word.isdigit()):\r\n return [(str(o) +\"onlynum\", 1)]\r\n else:\r\n return [(str(o) +\"onlynum\", 0)]\r\n\r\ndef contains_num(word, o):\r\n if(any(char.isdigit() for char in word)):\r\n return [(str(o) +\"contains_num\", 1)]\r\n else:\r\n return [(str(o) +\"contains_num\", 0)]\r\n\r\ndef ending_fullstop(word, o):\r\n if(word[-1] == '.'):\r\n return [(str(o) +\"fullstop\", 1)]\r\n else:\r\n return [(str(o) +\"fullstop\", 0)]\r\n\r\ndef minlen(word, o):\r\n if(len(word)>=2):\r\n return [(str(o) +\"minlen\", 1)]\r\n else:\r\n return [(str(o) +\"minlen\", 0)]\r\n\r\ndef punctuation(word, o):\r\n for i in word: \r\n if i in string.punctuation: \r\n return [(str(o) +\"punctuation\", 1)]\r\n return [(str(o) +\"punctuation\", 0)]\r\n\r\ndef all_punctuation(word, o):\r\n count = 0\r\n for i in word: \r\n if i in string.punctuation: \r\n count = count +1\r\n if(count == len(word)):\r\n return [(str(o) +\"punctuation\", 1)]\r\n else:\r\n return [(str(o) +\"punctuation\", 0)]\r\n\r\ndef is_stopword(word, o):\r\n stop_words = set(stopwords.words('spanish'))\r\n if(word in stop_words):\r\n return([(str(o) +\"is_stop\", 1)])\r\n else:\r\n return([(str(o) +\"is_stop\", 0)])\r\n\r\n\r\ndef isRomanNumeral(word, o):\r\n numeral = word.upper()\r\n validRomanNumerals = [\"M\", \"D\", \"C\", \"L\", \"X\", \"V\", \"I\"]\r\n for letters in numeral:\r\n if letters not in validRomanNumerals:\r\n return ([(str(o) +\"is_roman\", 0)])\r\n\r\n return ([(str(o) + \"is_roman\", 1)])\r\n\r\n\r\ndef contains_dots(word, o):\r\n if word.find('.')==-1:\r\n return ([(str(o) + \"has_dot\", 0)])\r\n\r\n return ([(str(o) + \"has_dot\", 1)])\r\n\r\n\r\ndef single_char(word, o):\r\n if len(word)==1:\r\n return ([(str(o) + \"is_char\", 1)])\r\n\r\n return ([(str(o) + \"is_char\", 0)])\r\n\r\ndef is_url(word, o):\r\n if re.match(regex, word) is not None:\r\n return ([(str(o) + \"is_url\", 1)])\r\n return ([(str(o) + \"is_url\", 0)])\r\n\r\n\r\ndef word2features(sent, i):\r\n \"\"\" The function generates all features\r\n for the word at position i in the\r\n sentence.\"\"\"\r\n features = []\r\n # the window around the token\r\n for o in [-4,-3, -2,-1,0,1,2, 3, 4]:\r\n if i+o >= 0 and i+o < len(sent):\r\n word = sent[i+o][0]\r\n tag = sent[i+o][1]\r\n featlist = getfeats(word, o)\r\n features.extend(featlist)\r\n featlist = gettag(tag, o)\r\n features.extend(featlist)\r\n\r\n featlist = gethyphen(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = capletter(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = noun_suffix(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = get_wordshape(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = all_upper(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = all_lower(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = has_apostrophe(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = special_characters(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = onlynum(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = contains_num(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = ending_fullstop(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = isRomanNumeral(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = contains_dots(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = single_char(word, o)\r\n features.extend(featlist)\r\n\r\n featlist = is_url(word, o)\r\n features.extend(featlist)\r\n\r\n \r\n word = sent[i][0]\r\n tag = sent[i][1]\r\n\r\n features.extend([(\"word_lower\", word.lower())])\r\n\r\n features.extend([(\"word_len\", len(word))])\r\n\r\n if (i == 0):\r\n features.extend([(\"firstword\", 1)])\r\n else:\r\n features.extend([(\"firstword\", 0)])\r\n\r\n features.extend([(\"bias\", 1)])\r\n \r\n return dict(features)\r\n\r\nif __name__ == \"__main__\":\r\n train = ConllCorpusReader('', 'training_data', ['words', 'pos', 'ignore', 'chunk'])\r\n dev = ConllCorpusReader('', 'validation_data', ['words', 'pos', 'ignore', 'chunk'])\r\n test = ConllCorpusReader('', 'testing_data', ['words', 'pos', 'ignore', 'chunk'])\r\n\r\n train_sents = list(train.iob_sents())\r\n dev_sents = list(dev.iob_sents())\r\n test_sents = list(test.iob_sents())\r\n\r\n # train_sents = list(conll2002.iob_sents('esp.train'))\r\n # dev_sents = list(conll2002.iob_sents('esp.testa'))\r\n # test_sents = list(conll2002.iob_sents('esp.testb'))\r\n\r\n\r\n X_train = []\r\n y_train = []\r\n\r\n train_feats = []\r\n train_labels = []\r\n\r\n for sent in train_sents:\r\n train_feats = []\r\n train_labels = []\r\n for i in range(len(sent)):\r\n feats = word2features(sent, i)\r\n train_feats.append(feats)\r\n train_labels.append(sent[i][-1])\r\n X_train.append(train_feats)\r\n y_train.append(train_labels)\r\n\r\n X_test = []\r\n y_test = []\r\n\r\n test_feats = []\r\n test_labels = []\r\n\r\n for sent in test_sents:\r\n test_feats = []\r\n test_labels = []\r\n for i in range(len(sent)):\r\n feats = word2features(sent, i)\r\n test_feats.append(feats)\r\n test_labels.append(sent[i][-1])\r\n X_test.append(test_feats)\r\n y_test.append(test_labels)\r\n\r\n\r\n X_train = np.array(X_train)\r\n X_test = np.array(X_test)\r\n y_train = np.array(y_train)\r\n y_test = np.array(y_test)\r\n\r\n true_y_test = []\r\n for li in y_test:\r\n for label in li:\r\n true_y_test.append(label)\r\n true_y_test=np.array(true_y_test)\r\n\r\n samples = []\r\n item = 0\r\n for i in range(0, 120):\r\n item += 20\r\n samples.append(item)\r\n for i in range(0, 62):\r\n item += 200\r\n samples.append(item)\r\n samples.append(len(train_sents))\r\n\r\n\r\n all_f1_scores = []\r\n\r\n for no_of_samples in samples:\r\n model = sklearn_crfsuite.CRF(\r\n algorithm='lbfgs',\r\n c1=0.5,\r\n c2=0.1,\r\n max_iterations=100,\r\n all_possible_transitions=True\r\n )\r\n\r\n #random_sample_indices = np.random.randint(0, len(y_train), size=no_of_samples)\r\n\r\n lengths = np.array([len(x) for x in train_sents])\r\n ind = np.argpartition(lengths, -no_of_samples)[-no_of_samples:]\r\n for index in ind:\r\n for i in range(0, len(train_sents[index])):\r\n print(train_sents[index][i][0], end = \" \")\r\n print()\r\n model.fit(X_train[ind], y_train[ind])\r\n y_pred = model.predict(X_test)\r\n\r\n true_y_pred = []\r\n for li in y_pred:\r\n for label in li:\r\n true_y_pred.append(label)\r\n true_y_pred=np.array(true_y_pred) \r\n \r\n\r\n \r\n f1_score_of_test = f1_score(true_y_test, true_y_pred, average=\"macro\")\r\n print(\"No of samples = \", no_of_samples, \" f1 score = \", f1_score_of_test)\r\n all_f1_scores.append(f1_score_of_test)\r\n\r\n plt.plot(samples, all_f1_scores)\r\n plt.show()\r\n\r\n with open(\"length_baseline_english_results.txt\", \"w\") as f:\r\n for i in range(len(samples)):\r\n f.write(str(samples[i])+\" \"+str(all_f1_scores[i])+\"\\n\")","sub_path":"length_sampling_english.py","file_name":"length_sampling_english.py","file_ext":"py","file_size_in_byte":10653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"388655290","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef read_matrix(N):\n matrix = []\n for _ in range(N):\n matrix.append([int(num) for num in sys.stdin.readline().split()])\n return matrix\n\ndef rotate_matrix(matrix, N):\n final = ''\n for i in range(N):\n for row in reversed(matrix):\n final += str(row[i]) + ' '\n final = final.strip() + '\\n'\n print(final)\n\ndef main():\n for line in sys.stdin:\n N = int(line)\n if N == 0:\n break\n matrix = read_matrix(N)\n rotate_matrix(matrix, N)\n\nif __name__ == '__main__':\n main()\n","sub_path":"prog-challenges/challenge00.py","file_name":"challenge00.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"638046233","text":"import pygame\nfrom pygame.locals import *\n\nfrom camera import GameCamera\n\nimport random\n\ncollidables = []\n\nclass Collidable(object):\n\tdef __init__(self, posx=0, posy=0, size=10):\n\t\tself.posx = posx\n\t\tself.posy = posy\n\t\tself.size = size\n\t\tself.velocityx = 0\n\t\tself.velocityy = 0\n\t\tself.register()\n\n\t# Adds collidable to collidable register\n\tdef register(self):\n\t\tself.rect = pygame.Rect(0, 0, 0, 0)\n\t\tself.posx = self.posx or 0\n\t\tself.posy = self.posy or 0\n\t\tself.velocityx = self.velocityx or 0\n\t\tself.velocityy = self.velocityy or 0\n\t\tself.size = self.size or 0\n\t\tcollidables.append(self)\n\n\t# Adds collidable to collidable register\n\tdef deregister(self):\n\t\tif self in collidables:\n\t\t\tcollidables.remove(self)\n\n\t# Updates position based on delta and checks for collisions\n\tdef update(self, dt):\n\t\tself.posx += self.velocityx * dt\n\t\tself.posy += self.velocityy * dt\n\n\t\tself.rect = pygame.Rect(GameCamera.adjust_pos(self.posx, self.posy), (self.size, self.size))\n\n\t\tfor b in collidables:\n\t\t\tif (b != self) and self.rect.colliderect(b.rect):\n\t\t\t\tif hasattr(self, \"on_collide\") or hasattr(b, \"on_collide\"):\n\t\t\t\t\tif hasattr(self, \"on_collide\"):\n\t\t\t\t\t\tself.on_collide(b)\n\t\t\t\t\tif hasattr(b, \"on_collide\"):\n\t\t\t\t\t\tb.on_collide(self)\n\t\t\t\telse:\n\t\t\t\t\tself.default_collide(b)\n\n\t# Called on collision with another object\n\tdef default_collide(self, b):\n\t\tprint(\"colliding: \" + str(self) + str(b))\n","sub_path":"notss-cl/collidable.py","file_name":"collidable.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"404835063","text":"from unittest import TestCase\n\nfrom ....lib import Grep, GrepLambda, Str\n\nclass T(TestCase):\n def test0identical_map(self):\n class G0(Grep):\n def __call__(self, y):\n return 2 <= y\n G1 = lambda: GrepLambda(lambda y: 2 <= y)\n iterable = range(10)\n expected = 2\n self._test(G0, G1, iterable, expected)\n def test1str_decoration(self):\n class G0(Grep):\n def __call__(self, y):\n return y == '2' # is '2'?\n G1 = lambda: GrepLambda(lambda y: y == '2') # is '2'?\n iterable = range(10)\n iterable = map(Str, iterable)\n expected = '2'\n self._test(G0, G1, iterable, expected)\n def test2enumerate1str_decoration(self):\n class G0(Grep):\n def __call__(self, i, y):\n return y == '2' # is '2'\n G1 = lambda: GrepLambda(lambda i, y: y == '2') # is '2'?\n iterable = range(10)\n iterable = map(Str, iterable)\n iterable = enumerate(iterable)\n expected = 2, '2'\n self._test(G0, G1, iterable, expected)\n def _test(self, g0, g1, iterable, expected):\n iterable = tuple(iterable)\n expected = expected, expected\n g0, g1 = (g().find_quit(iterable) for g in (g0, g1))\n actual = g0, g1\n self.assertEqual(expected, actual)\n","sub_path":"apymake/zztest/lib/grep/t1find_quit.py","file_name":"t1find_quit.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"385914413","text":"#!/usr/bin/python3\n\n# This can either be run from a command line with python3 alphabeta.py or imported with\n# from alphabeta import alphabeta\n\n# USAGE:\n# alphabeta(input, start, lower, upper)\n#\n# Where:\n# input is a list form input tree. See example in this file.\n# start is the root node number. So, either 0 or 1 (0 if root is MAX, 1 if root is MIN)\n# upper is the upper limit for beta. Set this to something higher than any value in your tree\n# lower is the lower limit for alpha. Set this to something less than any value in your tree\n#\n# The function returns the root alpha and beta values, as well as the result, and the number of\n# 'prunings' that took place.\n\n# This is the tree we are working with\ntree = [[[5, 1, 2], [8, -8, -9]], [[9, 4, 5], [-3, 4, 3]]]\nroot = 0\npruned = 0\n\ndef children(branch, depth, alpha, beta):\n global tree\n global root\n global pruned\n i = 0\n for child in branch:\n if type(child) is list:\n (nalpha, nbeta) = children(child, depth + 1, alpha, beta)\n if depth % 2 == 1:\n beta = nalpha if nalpha < beta else beta\n else:\n alpha = nbeta if nbeta > alpha else alpha\n branch[i] = alpha if depth % 2 == 0 else beta\n i += 1\n else:\n if depth % 2 == 0 and alpha < child:\n alpha = child\n if depth % 2 == 1 and beta > child:\n beta = child\n if alpha >= beta:\n pruned += 1\n break\n if depth == root:\n tree = alpha if root == 0 else beta\n return (alpha, beta)\n\ndef alphabeta(in_tree=tree, start=root, lower=-15, upper=15):\n global tree\n global pruned\n global root\n\n (alpha, beta) = children(tree, start, lower, upper)\n \n if __name__ == \"__main__\":\n print (\"(alpha, beta): \", alpha, beta)\n print (\"Result: \", tree)\n print (\"Times pruned: \", pruned)\n\n return (alpha, beta, tree, pruned)\n\nif __name__ == \"__main__\":\n alphabeta()\n","sub_path":"all-gists/1446104/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"427011618","text":"import tkinter as tk\n\n\nclass Editable:\n def set_data_type(self, dataType):\n if dataType == \"string\":\n self.var = tk.StringVar()\n elif dataType == \"int\":\n self.var = tk.IntVar()\n elif dataType == \"double\":\n self.var = tk.DoubleVar()\n\n def get_value(self):\n # print(self.var.get())\n return self.var.get()\n\n def set_value(self, data):\n self.var.set(data)\n\n\nEditable.STRING = \"string\"\nEditable.INT = \"int\"\nEditable.DOUBLE = \"double\"\n","sub_path":"framework/GUI/tkinter/components/interfaces/Editable.py","file_name":"Editable.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"638575899","text":"import logging\nimport os\nimport numpy as np\nfrom keras import Sequential\nfrom keras.layers import LSTM, Dropout, Dense\nfrom keras.utils import np_utils\nfrom random import randint\n\n\ndef load_text(load_dir):\n \"\"\"\n :param load_dir: the directory where the text to be loaded is stored\n :return: loaded text in lowercase format\n \"\"\"\n if os.path.exists(load_dir):\n text_to_load = (open(load_dir).read())\n text_to_load = text_to_load.lower()\n return text_to_load\n else:\n logging.critical(f\"{load_dir} could not be found in directory.\")\n return None\n\n\ndef create_characters(text_to_convert):\n \"\"\"\n :param text_to_convert: text to convert to a set of characters\n :return: the set of characters present in the set\n \"\"\"\n return sorted(list(set(text_to_convert)))\n\n\ndef create_maps(chars):\n \"\"\"\n :param chars: characters to map\n :return: two maps, number:character and character:number\n \"\"\"\n n_map = {n: char for n, char in enumerate(chars)}\n char_map = {char: n for n, char in enumerate(chars)}\n return n_map, char_map\n\n\ndef process_data(characters, text_length, sequence_length, process_text, char_to_n, X, Y):\n \"\"\"\n :param text_length: the length of the text to be processed\n :param sequence_length: the length of the sequence to be produced\n :param process_text: the text we are processing\n :return: X - training matrix, Y - target matrix\n \"\"\"\n for i in range(text_length - sequence_length):\n sequence = process_text[i:i + sequence_length]\n label = process_text[i + sequence_length]\n X.append([char_to_n[char] for char in sequence])\n Y.append(char_to_n[label])\n\n x_modified = np.reshape(X, (len(X), sequence_length, 1))\n x_modified = x_modified / float(len(characters))\n y_modified = np_utils.to_categorical(Y)\n\n return x_modified, y_modified\n\n\ndef create_model(X_mod, Y_mod):\n \"\"\"\n :return: the model used to train (or loads model if already exists\n \"\"\"\n model = Sequential()\n model.add(LSTM(700, input_shape=(X_mod.shape[1], X_mod.shape[2]), return_sequences=True))\n model.add(Dropout(0.2))\n model.add(LSTM(700))\n model.add(Dropout(0.2))\n\n model.add(Dense(Y_mod.shape[1], activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n if os.path.exists(\"models/generation_model.h5\"):\n model.load_weights(\"models/generation_model.h5\")\n logging.info(\"Model Loaded.\")\n else:\n model.fit(X_mod, Y_mod, epochs=30, batch_size=200)\n model.save_weights(\"models/generation_model.h5\")\n\n return model\n\n\n# generating characters\ndef generate_text(string_mapping, characters, model, n_to_char, full_string):\n \"\"\"\n :param string_mapping: the mapping of characters in the string\n :return: generated text that will be used as a tweet\n \"\"\"\n for i in range(200):\n x = np.reshape(string_mapping, (1, len(string_mapping), 1))\n x = x / float(len(characters))\n\n pred_index = np.argmax(model.predict(x, verbose=0))\n full_string.append(n_to_char[pred_index])\n\n string_mapping.append(pred_index)\n string_mapping = string_mapping[1:len(string_mapping)]\n\n txt = \"\"\n for char in full_string:\n txt = txt + char\n\n with open(\"tweet_text.txt\", \"w\") as f:\n f.write(txt)\n logging.info(txt)\n\n return txt\n\n\ndef create_tweet(text_dir):\n X = []\n Y = []\n text = load_text(text_dir)\n characters = create_characters(text)\n length = len(text)\n seq_length = 50\n n_to_char, char_to_n = create_maps(create_characters(text))\n logging.info(\"Mappings created.\")\n X_mod, Y_mod = process_data(characters, length, seq_length, text, char_to_n, X, Y)\n logging.info(\"Data processed.\")\n model = create_model(X_mod, Y_mod)\n logging.info(\"Model created/loaded successfully.\")\n\n try:\n string_mapped = X[randint(0, 2000)]\n full_string = [n_to_char[value] for value in string_mapped]\n text_returned = generate_text(string_mapped, characters, model, n_to_char, full_string)\n logging.info(f\"Text created for tweet: {text_returned}.\")\n except IndexError as e:\n logging.info(str(e))\n pass\n\n\nif __name__ == '__main__':\n create_tweet()\n","sub_path":"src/nlp/create_text.py","file_name":"create_text.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"219687872","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.views.generic import FormView \nfrom django.contrib.auth.forms import UserCreationForm \nfrom django.contrib.auth import login, authenticate \nfrom common.forms import ProfileCreationForm \nfrom django.http.response import HttpResponseRedirect \nfrom django.urls import reverse_lazy \nfrom common.models import UserProfile\nfrom allauth.socialaccount.models import SocialAccount\n\ndef index(request):\n context = {}\n if request.user.is_authenticated:\n context['username'] = request.user.username\n try:\n context['age'] = UserProfile.objects.get(user=request.user).age\n context['books'] = UserProfile.objects.get(user=request.user).books\n context['devlang'] = UserProfile.objects.get(user=request.user).devlang\n except:\n context['age'] = None\n context['books'] = None\n context['devlang'] = None\n try:\n context['github_url'] = SocialAccount.objects.get(provider='github', user=request.user).extra_data['html_url']\n except:\n context['github_url'] = None\n\n return render(request, 'index.html', context)\n\n\nclass RegisterView(FormView):\n\n form_class = UserCreationForm\n\n def form_valid(self, form):\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n login(self.request, authenticate(username=username, password=raw_password))\n return super(RegisterView, self).form_valid(form)\n\n\nclass CreateUserProfile(FormView):\n\n form_class = ProfileCreationForm\n template_name = 'profile-create.html'\n success_url = reverse_lazy('common:index')\n\n def dispatch(self, request, *args, **kwargs):\n if self.request.user.is_anonymous:\n return HttpResponseRedirect(reverse_lazy('common:login'))\n return super(CreateUserProfile, self).dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n instance = form.save(commit=False)\n instance.user = self.request.user\n instance.save()\n return super(CreateUserProfile, self).form_valid(form)\n \nclass EditUserProfile(FormView):\n \n form_class = ProfileCreationForm\n template_name = 'profile-update.html'\n success_url = reverse_lazy('common:index')\n\n \n def dispatch(self, request, *args, **kwargs):\n if self.request.user.is_anonymous:\n return HttpResponseRedirect(reverse_lazy('common:login'))\n return super(EditUserProfile, self).dispatch(request, *args, **kwargs)\n \n \n def form_valid(self, form):\n try:\n instance = self.request.user\n user = UserProfile.objects.get(user=instance)\n user.age = form.cleaned_data.get('age')\n user.books = form.cleaned_data.get('books')\n user.devlang = form.cleaned_data.get('devlang')\n user.save()\n except:\n instance = form.save(commit=False)\n instance.user = self.request.user\n instance.save()\n return super(EditUserProfile, self).form_valid(form)","sub_path":"common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"241528473","text":"import sys\nfrom mainwindow import *\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n# Импортируем модуль threading для работы с потоками\nimport threading\nfrom subprocess import Popen, PIPE, STDOUT\n\nlisten=''\noutputtext=''\n\n# Функции для сигналов между потоками\ndef signal_handler(signal, frame):\n global interrupted\n interrupted = True \ndef interrupt_callback():\n global interrupted\n return interrupted\n\n# Отдельная функция-заготовка для вынесения \n# последующих функций в отдельный поток\ndef thread(my_func):\n def wrapper(*args, **kwargs):\n my_thread = threading.Thread(target=my_func, args=args, kwargs=kwargs)\n my_thread.start()\n return wrapper\n\ndef undotIPv4 (dotted):\n return sum (int (octet) << ( (3 - i) << 3) for i, octet in enumerate (dotted.split ('.') ) )\n\ndef dotIPv4 (addr):\n return '.'.join (str (addr >> off & 0xff) for off in (24, 16, 8, 0) )\n\ndef rangeIPv4 (start, stop):\n for addr in range (undotIPv4 (start), undotIPv4 (stop) ):\n yield dotIPv4 (addr)\n\n \n\nclass MyWin(QtWidgets.QMainWindow):\n my_listen = QtCore.pyqtSignal(list, name='my_listen')\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n qss_file = open('style_file.qss').read()\r\n self.setStyleSheet(qss_file)\n self.ui.lineEdit.setText('37.151.22.0 - 37.151.22.255')\n # Здесь прописываем событие нажатия на кнопку Scan \n self.ui.pushButton.clicked.connect(self.scanips)\n global listen\n listen=self.my_listen\n self.my_listen.connect(self.mylisten, QtCore.Qt.QueuedConnection)\n\n @thread\n def gocommand(self):\n global listen\n p = Popen('masscan -p80,21 -iL masin.txt --rate=300 -oL masout.txt', stdout = PIPE, stderr = STDOUT, shell = True)\n while p.poll() is None:\n line = p.stdout.readline()\n listen.emit([line])\n p = Popen('awk \\'{ print $4 }\\' masout.txt > nmapin.txt', stdout = PIPE, stderr = STDOUT, shell = True)\n while p.poll() is None:\n line = p.stdout.readline()\n listen.emit([line])\n p = Popen('nmap -p 80,21 --script \"http-title\",\"ftp-anon\" -iL nmapin.txt -oN output.txt', stdout = PIPE, stderr = STDOUT, shell = True)\n while p.poll() is None:\n line = p.stdout.readline()\n listen.emit([line])\n p = Popen('python3 nparse.py', stdout = PIPE, stderr = STDOUT, shell = True)\n while p.poll() is None:\n line = p.stdout.readline()\n listen.emit([line])\n\n # Функция которая выполняется при нажатии на кнопку Scan \n def scanips(self):\n # Получаем значения из строки ввода IP диапазона\n ips = self.ui.lineEdit.text()\n # Удаляем из строки с диапазоном пробелы\n ips=ips.replace(' ', '')\n # Делим строку по разделителю тире на начальный и конечный IP адреса\n ip = ips.split('-')\n # Объявляем массив в который поместим все IP из указанного диапазона\n ipmas=[]\n # В цикле помещаем в массив все IP из диапазона\n for x in rangeIPv4 (ip[0], ip[1]):\n ipmas.append(x)\n # Добавляем в массив последний IP с диапазона\n ipmas.append(ip[1])\n # Сохраняем список айпишек в файл\n f=open(u'masin.txt', 'w')\n for s in ipmas:\n f.write(s+'\\n')\n f.close()\n self.gocommand()\n \n def mylisten(self, data):\n global outputtext\n t=str(data[0])\n t=t.replace('b\\'\\'','')\n t=t.replace('b\\'','')\n t=t.replace('\\\\n\\'','')\n t=t.replace('\\\\r','
')\n outputtext+='
'+t\n self.ui.textEdit.setText(outputtext)\n self.ui.textEdit.update()\n self.ui.textEdit.moveCursor(QtGui.QTextCursor.End)\n self.ui.textEdit.ensureCursorVisible()\n \n \n\nif __name__==\"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n myapp = MyWin()\n myapp.show()\n sys.exit(app.exec_())\n","sub_path":"solscan.py","file_name":"solscan.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"434334227","text":"import torch\nimport torch.nn as nn\n\nfrom graph import *\nfrom utils import *\n\nclass GraphConvNet3D(nn.Module):\n def __init__(self, in_channels, out_channels, s_kernel_size=1, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True):\n super(GraphConvNet3D, self).__init__()\n\n self.s_kernel_size = s_kernel_size\n self.conv = nn.Conv3d(in_channels, out_channels*s_kernel_size,\n kernel_size=(t_kernel_size, 1, 1),\n padding=(t_padding, 0, 0),\n stride=(t_stride, 1, 1),\n dilation=(t_dilation, 1, 1),\n bias=bias)\n\n \n def forward(self, x, A):\n assert A.size(1) == self.s_kernel_size\n\n x = self.conv(x)\n\n n, kc, t, u, v = x.size()\n x = x.view(n, self.s_kernel_size, kc // self.s_kernel_size, t, u, v)\n\n # only consider one s kernel:\n # x = x.sum(dim=1, keepdim=True)\n\n x = torch.einsum('nkctuv, nkvw->nctuw', (x, A))\n\n return x.contiguous(), A\n\n\nclass ST_GCN3D(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, dropout=0, residual=True):\n super(ST_GCN3D, self).__init__()\n\n assert len(kernel_size) == 2\n assert kernel_size[0] % 2 == 1\n padding = ((kernel_size[0]-1) // 2, 0, 0)\n\n self.gcn = GraphConvNet3D(in_channels, out_channels, kernel_size[1])\n\n self.tcn = nn.Sequential(\n nn.BatchNorm3d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv3d(\n out_channels,\n out_channels,\n (kernel_size[0], 1, 1),\n (stride, 1, 1),\n padding\n ),\n nn.BatchNorm3d(out_channels),\n nn.Dropout(dropout, inplace=True)\n )\n\n if not residual:\n self.residual = lambda x: 0\n elif (in_channels == out_channels) and (stride == 1):\n self.residual = lambda x: x\n else:\n self.residual = nn.Sequential(\n nn.Conv3d(\n in_channels,\n out_channels,\n kernel_size=1,\n stride=(stride, 1, 1)\n ),\n nn.BatchNorm3d(out_channels)\n )\n \n self.relu = nn.ReLU()\n\n \n def forward(self, x, A):\n res = self.residual(x)\n x, A = self.gcn(x, A)\n x = self.relu(self.tcn(x)+res)\n\n return x, A\n\n\nclass STGCN3DModule(nn.Module):\n def __init__(self, in_channels, cell_input_dim, spatial_kernel_size, temporal_kernel_size, dropout=0, residual=True):\n super(STGCN3DModule, self).__init__()\n\n kernel_size = (temporal_kernel_size, spatial_kernel_size)\n\n self.st_gcn3d_modules = nn.ModuleList((\n ST_GCN3D(in_channels, 64, kernel_size, stride=1, dropout=0, residual=False),\n ST_GCN3D(64, 64, kernel_size, stride=1, dropout=dropout, residual=residual),\n #ST_GCN3D(64, 64, kernel_size, stride=1, dropout=dropout, residual=residual),\n #ST_GCN3D(64, 64, kernel_size, stride=1, dropout=dropout, residual=residual),\n ST_GCN3D(64, 128, kernel_size, stride=2, dropout=dropout, residual=residual),\n ST_GCN3D(128, 128, kernel_size, stride=1, dropout=dropout, residual=residual),\n #ST_GCN3D(128, 128, kernel_size, stride=1, dropout=dropout, residual=residual),\n #ST_GCN3D(128, cell_input_dim, kernel_size, stride=2, dropout=dropout, residual=residual),\n #ST_GCN3D(cell_input_dim, cell_input_dim, kernel_size, stride=1, dropout=dropout, residual=residual),\n #ST_GCN3D(cell_input_dim, cell_input_dim, kernel_size, stride=1, dropout=dropout, residual=residual)\n ))\n\n \n def forward(self, x, A):\n N, C, T, U, V = x.size()\n # x = x.permute(0, 3, 4, 1, 2).contiguous()\n # x = x.view(N, U*V*C, T)\n # data_bn = nn.BatchNorm1d(U*V*C, affine=False).to(x)\n # x = data_bn(x) \n # x = x.view(N, U, V, C, T)\n # x = x.permute(0, 3, 4, 1, 2).contiguous()\n\n for gcn in self.st_gcn3d_modules:\n x, _ = gcn(x, A)\n \n _, C, T, _, _ = x.size()\n x = x.permute(0, 4, 1, 2, 3).contiguous()\n x = x.view(N*V, C, T, U)\n\n data_pool = nn.AvgPool2d((T, U)).to(x)\n x = data_pool(x)\n x = x.view(-1, V, C)\n\n return x\n","sub_path":"Models/stgcn3d_gep_alt1/st_gcn3d.py","file_name":"st_gcn3d.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"48569089","text":"# Draw Rose\nimport turtle\n\ndef spiral(size, turn, color, width, speed):\n t = turtle.Turtle()\n t.color(color)\n t.width(width)\n t.speed(speed)\n for n in range(size):\n t.forward(n)\n t.right(turn)\n\n turtle.done()\n\nspiral(200, 70, \"green\", 1, 0)\nturtle.done()","sub_path":"2d_lesson.py","file_name":"2d_lesson.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"646054558","text":"from flask import Flask, render_template, jsonify\nfrom common import External\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return render_template('layout.html')\n\n\n@app.route(\"/graph//\", defaults={'gender': 'ALL'})\n@app.route(\"/graph//\")\ndef graph(length, gender):\n if length == '10km':\n e = External()\n e.request(External.KEY_10KM, gender)\n e.calculate(5, 16, 0, 30)\n elif length == '21km':\n e = External()\n e.request(External.KEY_21KM, gender)\n e.calculate(5, 24, 1, 5)\n else:\n e = External()\n e.request(External.KEY_42KM, gender)\n e.calculate(10, 20, 2, 40)\n\n labels = []\n series = []\n for c in e.ranges:\n labels.append(c.label())\n series.append(c.count)\n\n return jsonify(labels=labels, series=[series])\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"80896528","text":"import matplotlib.pyplot as plt\nfrom matplotlib import *\nfrom numpy import *\nfrom matplotlib.animation import *\n\nname = \"Log Spiral\"\ndef shape(fig, edge_c, edge_w, grid, radius):\n\tdef r_(u):\n\t\tr = exp(a *u)\n\t\treturn r\n\n\ta = radius\n\tu = linspace(-5 *pi, pi,1000)\n\tr = r_(u)\n\n\tplt.subplot(111, projection='polar')\n\tplt.plot(u, r)\n","sub_path":"src/GUI/compile_space/log_spiral.py","file_name":"log_spiral.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"493151718","text":"import os, sys\n\ndef prn(s):\n sys.stdout.write(s)\nif __name__ == '__main__':\n if (len(sys.argv) != 2):\n print(\"usage: process file.html\")\n sys.exit(-1)\n f = open(sys.argv[1], 'r')\n for line in f:\n t = line.strip()\n if (t[0:8] != '#include'):\n prn(line)\n continue\n textf = t[8:].strip()\n incf = open(textf, 'r')\n for line in incf:\n prn(line)\n f.close()\n \n","sub_path":"page-gen/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"601563517","text":"#!/bin/python3\n\n# https://www.hackerrank.com/challenges/py-if-else/problem\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef py_if_else(n):\n if N % 2 > 0:\n print('Weird')\n elif 2 <= N <= 5:\n print('Not Weird')\n elif 6 <= N <= 20:\n print('Weird')\n elif N >= 20:\n print('Not Weird')\n\n\nif __name__ == '__main__':\n N = int(input())\n py_if_else(N)\n","sub_path":"hackerrank/py_if_else.py","file_name":"py_if_else.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"128632124","text":"import os\nimport filecmp\nfrom dateutil.relativedelta import *\nfrom datetime import date\n\n\ndef getData(file):\n# get a list of dictionary objects from the file\n#Input: file name\n#Ouput: return a list of dictionary objects where\n#the keys are from the first row in the data. and the values are each of the other rows\n\topenfile = open(file, \"r\")\n\tfirstline = openfile.readline()\n\tcleanfline = firstline.strip(\"\\n\")\n\tflinelist = cleanfline.split(\",\")\n\n\tmylist = []\n\n\tlines = openfile.readlines()\n\topenfile.close()\n\n\tmylist = []\n\n\tfor currentline in lines:\n\t\tdic = {}\n\t\tcleanline = currentline.strip(\"\\n\")\n\t\tvalues = cleanline.split(\",\")\n\n\t\tfirst = values[0]\n\t\tlast = values[1]\n\t\temail = values[2]\n\t\twhatclass = values[3]\n\t\tdob = values [4]\n\n\t\tdic[flinelist[0]] = first\n\t\tdic[flinelist[1]] = last\n\t\tdic[flinelist[2]] = email\n\t\tdic[flinelist[3]] = whatclass\n\t\tdic[flinelist[4]] = dob\n\n\t\tmylist.append(dic)\n\n\treturn(mylist)\n\ndef mySort(data,col):\n# Sort based on key/column\n#Input: list of dictionaries and col (key) to sort on\n#Output: Return the first item in the sorted list as a string of just: firstName lastName\n\tsorteddata = sorted(data, key=lambda x:x[col])\n\titem = sorteddata[0]\n\n\treturn (item[\"First\"] + \" \" + item[\"Last\"])\n\n\ndef classSizes(data):\n# Create a histogram\n# Input: list of dictionaries\n# Output: Return a list of tuples sorted by the number of students in that class in\n# descending order\n# [('Senior', 26), ('Junior', 25), ('Freshman', 21), ('Sophomore', 18)]\n\t#sorteddata = sorted(data, key=lambda x:x[\"Class\"] )\n\tclassdic = {}\n\tclassdic[\"Freshman\"] = 0\n\tclassdic[\"Sophomore\"] = 0\n\tclassdic[\"Junior\"] = 0\n\tclassdic[\"Senior\"] = 0\n\n\tfor x in data:\n\t\tif x[\"Class\"] == \"Freshman\":\n\t\t\tclassdic[\"Freshman\"] += 1\n\t\telif x[\"Class\"] == \"Sophomore\":\n\t\t\tclassdic[\"Sophomore\"] += 1\n\t\telif x[\"Class\"] == \"Junior\":\n\t\t\tclassdic[\"Junior\"] += 1\n\t\telif x[\"Class\"]\t== \"Senior\":\n\t\t\tclassdic[\"Senior\"] += 1\n\n\tsorted_classlist = sorted(classdic.items(), key=lambda x:x[1], reverse=True)\n\treturn (sorted_classlist)\n\n\ndef findMonth(a):\n# Find the most common birth month form this data\n# Input: list of dictionaries\n# Output: Return the month (1-12) that had the most births in the data\n\tDOB_dic = {}\n\n\tfor x in a:\n\t\tsplit_DOB = x[\"DOB\"].split(\"/\")\n\t\tmonth = int(split_DOB[0])\n\t\tif month not in DOB_dic:\n\t\t\tDOB_dic[month] = 1\n\t\telse:\n\t\t\tDOB_dic[month] += 1\n\n\tsorted_DOB = sorted(DOB_dic.items(), key=lambda x:x[1], reverse=True)\n\n\treturn (sorted_DOB[0][0])\n\ndef mySortPrint(a,col,fileName):\n#Similar to mySort, but instead of returning single\n#Student, the sorted data is saved to a csv file.\n# as fist,last,email\n#Input: list of dictionaries, col (key) to sort by and output file name\n#Output: No return value, but the file is written\n\toutFile = open(fileName, \"w\")\n\tsorteddata = sorted(a, key=lambda x:x[col])\n\tfor x in sorteddata:\n\t\tfirst = x[\"First\"]\n\t\tlast = x[\"Last\"]\n\t\temail = x[\"Email\"]\n\t\toutFile.write(first + \",\" + last + \",\" + email + \"\\n\")\n\n\toutFile.close()\n\ndef findAge(a):\n# def findAge(a):\n# Input: list of dictionaries\n# Output: Return the average age of the students and round that age to the nearest\n# integer. You will need to work with the DOB and the current date to find the current\n# age in years.\n\tpass\n\n\n\n################################################################\n## DO NOT MODIFY ANY CODE BELOW THIS\n################################################################\n\n## We have provided simple test() function used in main() to print what each function returns vs. what it's supposed to return.\ndef test(got, expected, pts):\n score = 0;\n if got == expected:\n score = pts\n print(\" OK \", end=\" \")\n else:\n print (\" XX \", end=\" \")\n print(\"Got: \",got, \"Expected: \",expected)\n return score\n\n\n# Provided main() calls the above functions with interesting inputs, using test() to check if each result is correct or not.\ndef main():\n\ttotal = 0\n\tprint(\"Read in Test data and store as a list of dictionaries\")\n\tdata = getData('P1DataA.csv')\n\tdata2 = getData('P1DataB.csv')\n\ttotal += test(type(data),type([]),50)\n\n\tprint()\n\tprint(\"First student sorted by First name:\")\n\ttotal += test(mySort(data,'First'),'Abbot Le',25)\n\ttotal += test(mySort(data2,'First'),'Adam Rocha',25)\n\n\tprint(\"First student sorted by Last name:\")\n\ttotal += test(mySort(data,'Last'),'Elijah Adams',25)\n\ttotal += test(mySort(data2,'Last'),'Elijah Adams',25)\n\n\tprint(\"First student sorted by Email:\")\n\ttotal += test(mySort(data,'Email'),'Hope Craft',25)\n\ttotal += test(mySort(data2,'Email'),'Orli Humphrey',25)\n\n\tprint(\"\\nEach grade ordered by size:\")\n\ttotal += test(classSizes(data),[('Junior', 28), ('Senior', 27), ('Freshman', 23), ('Sophomore', 22)],25)\n\ttotal += test(classSizes(data2),[('Senior', 26), ('Junior', 25), ('Freshman', 21), ('Sophomore', 18)],25)\n\n\tprint(\"\\nThe most common month of the year to be born is:\")\n\ttotal += test(findMonth(data),3,15)\n\ttotal += test(findMonth(data2),3,15)\n\n\tprint(\"\\nSuccessful sort and print to file:\")\n\tmySortPrint(data,'Last','results.csv')\n\tif os.path.exists('results.csv'):\n\t\ttotal += test(filecmp.cmp('outfile.csv', 'results.csv'),True,20)\n\n\tprint(\"\\nTest of extra credit: Calcuate average age\")\n\ttotal += test(findAge(data), 40, 5)\n\ttotal += test(findAge(data2), 42, 5)\n\n\tprint(\"Your final score is \" + str(total))\n\n# Standard boilerplate to call the main() function that tests all your code\nif __name__ == '__main__':\n main()\n","sub_path":"project1-206.py","file_name":"project1-206.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"442480660","text":"\nfrom rest_framework import serializers\n\nfrom goods.models import Goods\n\n\nclass GoodsSerializer(serializers.ModelSerializer):\n\n class Meta:\n # 指定学历化的模型\n model = Goods\n # 指定序列化哪些字段\n fields = ['id', 'category', 'name', 'goods_sn',\n 'click_nums', 'sold_nums', 'fav_nums',\n 'goods_nums', 'market_price', 'shop_price',\n 'goods_brief', 'goods_desc', 'ship_free',\n 'goods_front_image', 'is_new', 'is_hot', 'add_time']","sub_path":"5.django/fresh_shop_back/goods/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"137704599","text":"#! python3\n# File: autobackup.py\n\nimport dirsync\nimport os\n\nsrc = \"Path_To_Folder_Being_Backedup\"\ndest = \"Path_To_External_Drive\"\n\nif os.path.exists(dest):\n dirsync.sync(src, dest, 'sync', purge=True)\nelse:\n print(\"Backup storage not found. Exiting...\")\n","sub_path":"autobackup.py","file_name":"autobackup.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"161507644","text":"# Copyright 2019 The Pontem Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cloud Key Management Service utility Module.\"\"\"\n\nimport google.api_core.gapic_v1.client_info\nimport google.auth\nfrom google.cloud import exceptions\nfrom google.cloud import kms_v1\nfrom google.cloud.kms_v1 import enums\n\nfrom google.cloud.pontem.sql.replicator.util import gcp_api_util\n\n\nclass KeyNotEnabledError(ValueError):\n \"\"\"Raised if the key specified is not enabled.\"\"\"\n\n\ndef build_kms_client(credentials=None):\n \"\"\"Builds Authorized Storage.Client with custom user agent.\n\n Args:\n credentials (google.auth.Credentials): credentials to authorize client.\n\n Returns:\n KeyManagementServiceClient: returns authorized KMS client with CloudSQL\n Replicator user agent.\n \"\"\"\n default_credentials, _ = google.auth.default()\n\n client_info = google.api_core.gapic_v1.client_info.ClientInfo(\n client_library_version=gcp_api_util.get_user_agent())\n\n kms_client = kms_v1.KeyManagementServiceClient(\n credentials=credentials or default_credentials,\n client_info=client_info)\n\n return kms_client\n\n\ndef create_key_ring(key_ring_id, location='global',\n project=None, credentials=None):\n \"\"\"Create a key ring at the specified location\n\n Args:\n key_ring_id (str): Unique identifier for keyring.\n location (str): Location where the key will be created.\n project (str): Project ID where keyring will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n Returns:\n JSON: Keyring that was created.\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n parent = kms_client.location_path(project or default_project, location)\n keyring_name = kms_client.key_ring_path(project or default_project,\n location, key_ring_id)\n keyring = {'name': keyring_name}\n\n response = kms_client.create_key_ring(parent, key_ring_id, keyring)\n return response\n\n\ndef create_key(key_id, key_ring_id, location='global',\n project=None, credentials=None):\n \"\"\"Creates a crypto key at the specified location\n\n Args:\n key_id (str): Unique id for the key to be created.\n key_ring_id (str): Unique identifier for keyring.\n location (str): Location where the key will be created.\n project (str): Project ID where keyring will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n\n Returns:\n JSON: Key that was created.\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n parent = kms_client.key_ring_path(project or default_project,\n location, key_ring_id)\n\n purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT\n crypto_key = {'purpose': purpose}\n\n response = kms_client.create_crypto_key(parent, key_id, crypto_key)\n return response\n\n\ndef encrypt(plaintext, key_id, key_ring_id,\n location='global', project=None, credentials=None):\n \"\"\"Encrypts plain text.\n\n Args:\n plaintext (str): Plain text to encrypt.\n key_id (str): Unique id for the key to be created.\n key_ring_id (str): Unique identifier for keyring.\n location (str): Location where the key will be created.\n project (str): Project ID where keyring will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n Returns:\n str: Encrypted cipher text.\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n\n name = kms_client.crypto_key_path_path(project or default_project, location,\n key_ring_id, key_id)\n response = kms_client.encrypt(name, plaintext)\n return response.ciphertext\n\n\ndef decrypt(ciphertext, key_id, key_ring_id,\n location='global', project=None, credentials=None):\n \"\"\"Decrypts cipher text.\n\n Args:\n ciphertext (str): Cipher text to decrypt.\n key_id (str): Unique id for the key to be created.\n key_ring_id (str): Unique identifier for keyring.\n location (str): Location where the key will be created.\n project (str): Project ID where key will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n Returns:\n str: Decrypted plain text.\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n\n name = kms_client.crypto_key_path_path(project or default_project, location,\n key_ring_id, key_id)\n response = kms_client.decrypt(name, ciphertext)\n return response.plaintext\n\n\ndef add_member_to_crypto_key_policy(member, role, key_id, key_ring_id,\n location='global', project=None,\n credentials=None):\n \"\"\"Adds a member of a role to crypto key policy.\n\n Args:\n member (str): Email address of member to add.\n role (str): Role member is in.\n key_id (str): Unique identifier for key.\n key_ring_id (str): Unique identifier for keyring.\n location (str): Where key exists.\n project (str): Project ID where keyring will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n\n Returns:\n JSON: Modified policy object\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n resource = kms_client.crypto_key_path_path(project or default_project,\n location, key_ring_id, key_id)\n policy = kms_client.get_iam_policy(resource)\n policy.bindings.add(\n role=role,\n members=[member])\n\n kms_client.set_iam_policy(resource, policy)\n return policy\n\n\ndef key_ring_exists(key_ring_id, location='global',\n project=None, credentials=None):\n \"\"\"Checks if a key ring exists.\n\n Args:\n key_ring_id (str): Unique identifier for keyring.\n location (str): Where key exists.\n project (str): Project ID where keyring will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n\n Returns:\n bool: Returns True if key exists, False otherwise.\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n key_ring_path = kms_client.key_ring_path(project or default_project,\n location, key_ring_id)\n try:\n _ = kms_client.get_key_ring(key_ring_path)\n except exceptions.NotFound:\n return False\n return True\n\n\ndef key_exists(key_id, key_ring_id, version_id='1',\n location='global', project=None, credentials=None):\n \"\"\"Checks if a key exists.\n\n Args:\n key_id (str): Unique identifier for key.\n key_ring_id (str): Unique identifier for keyring.\n version_id (str): Version of key to check\n location (str): Where key exists.\n project (str): Project ID where keyring will be created.\n credentials (google.auth.Credentials): credentials to authorize client.\n\n Raises:\n KeyNotEnabledError: Raises error if key exists, but is not enabled.\n\n Returns:\n bool: Returns True if key exists, False otherwise.\n \"\"\"\n _, default_project = google.auth.default()\n kms_client = build_kms_client(credentials)\n key_version_path = (\n kms_client.crypto_key_version_path(project or default_project,\n location, key_ring_id,\n key_id, version_id)\n )\n try:\n key_version = kms_client.get_crypto_key_version(key_version_path)\n if (key_version.CryptoKeyVersionState ==\n enums.CryptoKeyVersion.CryptoKeyVersionState.ENABLED):\n raise KeyNotEnabledError(\n 'Key at {} is not enabled.'.format(key_version_path)\n )\n except exceptions.NotFound:\n return False\n return True\n","sub_path":"CloudSQLReplicator/google/cloud/pontem/sql/replicator/util/kms.py","file_name":"kms.py","file_ext":"py","file_size_in_byte":8754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"389901258","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('archive', '0013_auto_20151124_0942'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='accession',\n name='affiliation',\n field=models.CharField(max_length=5, choices=[(b'STU', b'Student'), (b'FAC', b'Faculty'), (b'STA', b'Staff'), (b'ALU', b'Alumni'), (b'OTH', b'Other')]),\n ),\n ]\n","sub_path":"archive/migrations/0014_auto_20151210_1625.py","file_name":"0014_auto_20151210_1625.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"219365547","text":"from setuptools import setup, find_packages\n\nVERSION = '1.1.0'\nDESCRIPTION = 'The package allows to download, process and visualize climatological data from reliable sources'\nREADME = open('README.md', 'r', encoding='utf8').read()\n\nsetup(\n name='cloupy',\n version=VERSION,\n description=DESCRIPTION,\n long_description=README,\n long_description_content_type='text/markdown',\n author='Kamil Grala',\n author_email='kamil.grala32466@gmail.com',\n url='https://github.com/pdGruby/cloupy',\n license='MIT',\n packages=['cloupy'],\n install_requires=[\n 'pandas>=1.3.3,<=1.3.5',\n 'matplotlib>=3.4.3,<=3.5.1',\n 'requests>=2.26.0,<=2.27.1',\n 'beautifulsoup4>=4.9.3,<=4.10.0',\n 'numpy>=1.21.4,<=1.22.1',\n 'pyshp==2.1.3',\n 'pyproj>=3.2.1,<=3.3.0',\n 'scipy>=1.7.2,<=1.7.3',\n 'Pillow>=8.4.0,<=9.0.0',\n 'cycler==0.11.0'\n ],\n tests_require=[\n 'pytest>=6.2.5',\n 'mock>=4.0.3'\n ],\n package_data={\n 'cloupy': [\n 'data_processing/*',\n 'maps/*',\n 'maps/world/*',\n 'scraping/*',\n 'diagrams/*',\n 'test/test_integration/*',\n 'test/test_unit/*',\n ],\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: POSIX :: Linux',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Framework :: Matplotlib',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Visualization'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"79206147","text":"import sys\nimport time\n\nfrom PyQt5.QtCore import *\n\nfrom PyQt5.QtWidgets import *\n\nfrom playsound import playsound\n\n#\napp =QApplication(sys.argv)\n\n\ndue = input (\"Enter time for alert : (hh:mm) : \")\n\n\nmessage = input(\"Enter the message for alert : \")\n\ntry:\n\t# split the time string to hours and seconds\n\thours, mins= due.split(\":\")\n\t# convert hours and mins to integers and pass it to the Qtime function\n\tdue=QTime(int(hours), int(mins))\n\n\tif not due.isValid():\n\t\traise ValueError\n\nexcept ValueError:\n\tmessage=\"The value entered is not valid\"\n\n# checks the time from due and checks it with the current time. if current time is less than the due time\n# it helps to share resourses for cpu , instead of just allocating it to this program\nwhile QTime.currentTime() \"+ message + \" None:\n super().__post_init__()\n self._client_method = self.api_identifier + \"s\"\n self._get_identifier = self.api_identifier\n self._list_identifier = self.api_identifier\n self._update_identifier = self.api_identifier\n self._patch_identifier = self.api_identifier\n self._delete_identifier = self.api_identifier\n self._set_label_identifier = self.api_identifier\n self._check_region_resource()\n\n def _check_region_resource(self):\n \"\"\"Checks if the resource is a regional or a zonal one.\n\n If the resource has no zone but a region assigned and is part of\n the list of `regional_resources` above we will update the\n client method name as regional resources have their own API\n methods.\n \"\"\"\n if (\n self.id != \"\"\n and self.zone().name == \"undefined\"\n and self.region().name != \"undefined\"\n and self.kind in regional_resources\n ):\n self._client_method = (\n \"region\" + self._client_method[0].upper() + self._client_method[1:]\n )\n\n def delete(self, graph) -> bool:\n return delete_resource(self)\n\n def update_tag(self, key, value) -> bool:\n return update_label(self, key, value)\n\n def delete_tag(self, key) -> bool:\n return update_label(self, key, None)\n\n\n@dataclass(eq=False)\nclass GCPProject(GCPResource, BaseAccount):\n kind: ClassVar[str] = \"gcp_project\"\n api_identifier: ClassVar[str] = \"project\"\n\n\n@dataclass(eq=False)\nclass GCPZone(GCPResource, BaseZone):\n kind: ClassVar[str] = \"gcp_zone\"\n api_identifier: ClassVar[str] = \"zone\"\n zone_status: Optional[str] = None\n\n\n@dataclass(eq=False)\nclass GCPRegion(GCPResource, BaseRegion):\n kind: ClassVar[str] = \"gcp_region\"\n api_identifier: ClassVar[str] = \"region\"\n region_status: Optional[str] = None\n quotas: InitVar[List[str]] = None\n\n def __post_init__(self, quotas: List[str]) -> None:\n super().__post_init__()\n if quotas is not None:\n self._quotas = quotas\n else:\n self._quotas = []\n\n\n@dataclass(eq=False)\nclass GCPDiskType(GCPResource, BaseVolumeType):\n kind: ClassVar[str] = \"gcp_disk_type\"\n api_identifier: ClassVar[str] = \"diskType\"\n\n\n@dataclass(eq=False)\nclass GCPDisk(GCPResource, BaseVolume):\n kind: ClassVar[str] = \"gcp_disk\"\n api_identifier: ClassVar[str] = \"disk\"\n\n volume_status_map: ClassVar[Dict[str, VolumeStatus]] = {\n \"CREATING\": VolumeStatus.BUSY,\n \"RESTORING\": VolumeStatus.BUSY,\n \"FAILED\": VolumeStatus.ERROR,\n \"READY\": VolumeStatus.IN_USE,\n \"AVAILABLE\": VolumeStatus.AVAILABLE,\n \"DELETING\": VolumeStatus.BUSY,\n \"busy\": VolumeStatus.BUSY,\n \"in-use\": VolumeStatus.IN_USE,\n \"available\": VolumeStatus.AVAILABLE,\n \"error\": VolumeStatus.ERROR,\n \"deleted\": VolumeStatus.DELETED,\n }\n\n last_attach_timestamp: Optional[datetime] = None\n last_detach_timestamp: Optional[datetime] = None\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._set_label_identifier = \"resource\"\n self.last_attach_timestamp = make_valid_timestamp(self.last_attach_timestamp)\n self.last_detach_timestamp = make_valid_timestamp(self.last_detach_timestamp)\n\n # last_activity = (\n # self.last_detach_timestamp\n # if self.last_detach_timestamp > self.last_attach_timestamp\n # else self.last_attach_timestamp\n # )\n # if self.volume_status == \"available\":\n # self.atime = self.mtime = last_activity\n\n if isinstance(self.volume_type, BaseResource):\n self.volume_type = self.volume_type.name\n\n @property\n def last_attach(self) -> timedelta:\n now = datetime.utcnow().replace(tzinfo=timezone.utc)\n return now - self.last_attach_timestamp\n\n @property\n def last_detach(self) -> timedelta:\n now = datetime.utcnow().replace(tzinfo=timezone.utc)\n return now - self.last_detach_timestamp\n\n def _volume_status_setter(self, value: str) -> None:\n self._volume_status = self.volume_status_map.get(value, VolumeStatus.UNKNOWN)\n\n\nGCPDisk.volume_status = property(\n GCPDisk._volume_status_getter, GCPDisk._volume_status_setter\n)\n\n\n@dataclass(eq=False)\nclass GCPInstance(GCPResource, BaseInstance):\n kind: ClassVar[str] = \"gcp_instance\"\n api_identifier: ClassVar[str] = \"instance\"\n\n instance_status_map: ClassVar[Dict[str, InstanceStatus]] = {\n \"PROVISIONING\": InstanceStatus.BUSY,\n \"STAGING\": InstanceStatus.BUSY,\n \"RUNNING\": InstanceStatus.RUNNING,\n \"STOPPING\": InstanceStatus.BUSY,\n \"SUSPENDING\": InstanceStatus.BUSY,\n \"SUSPENDED\": InstanceStatus.STOPPED,\n \"REPAIRING\": InstanceStatus.BUSY,\n \"TERMINATED\": InstanceStatus.TERMINATED,\n \"busy\": InstanceStatus.BUSY,\n \"running\": InstanceStatus.RUNNING,\n \"stopped\": InstanceStatus.STOPPED,\n \"terminated\": InstanceStatus.TERMINATED,\n }\n\n network_interfaces: Optional[str] = None\n machine_type_link: InitVar[str] = None\n machine_type: InitVar[BaseInstanceType] = None\n\n def __post_init__(\n self, machine_type_link: str, machine_type: BaseInstanceType\n ) -> None:\n super().__post_init__()\n self._machine_type_link = machine_type_link\n self._machine_type = machine_type\n\n def _instance_status_setter(self, value: str) -> None:\n self._instance_status = self.instance_status_map.get(\n value, InstanceStatus.UNKNOWN\n )\n\n @property\n def _machine_type(self) -> Optional[BaseInstanceType]:\n if hasattr(self, \"__machine_type\"):\n return self.__machine_type\n\n @_machine_type.setter\n def _machine_type(self, value: BaseInstanceType) -> None:\n if isinstance(value, BaseInstanceType):\n self.__machine_type = value\n self.instance_cores = value.instance_cores\n self.instance_memory = value.instance_memory\n self.instance_type = value.name\n\n\nGCPInstance.instance_status = property(\n GCPInstance._instance_status_getter, GCPInstance._instance_status_setter\n)\n\n\n@dataclass(eq=False)\nclass GCPNetwork(GCPResource, BaseNetwork):\n kind: ClassVar[str] = \"gcp_network\"\n api_identifier: ClassVar[str] = \"network\"\n\n\n@dataclass(eq=False)\nclass GCPSubnetwork(GCPResource, BaseSubnet):\n kind: ClassVar[str] = \"gcp_subnetwork\"\n api_identifier: ClassVar[str] = \"subnetwork\"\n\n\n@dataclass(eq=False)\nclass GCPVPNTunnel(GCPResource, BaseTunnel):\n kind: ClassVar[str] = \"gcp_vpn_tunnel\"\n api_identifier: ClassVar[str] = \"vpnTunnel\"\n\n\n@dataclass(eq=False)\nclass GCPVPNGateway(GCPResource, BaseGateway):\n kind: ClassVar[str] = \"gcp_vpn_gateway\"\n api_identifier: ClassVar[str] = \"vpnGateway\"\n\n\n@dataclass(eq=False)\nclass GCPTargetVPNGateway(GCPResource, BaseGateway):\n kind: ClassVar[str] = \"gcp_target_vpn_gateway\"\n api_identifier: ClassVar[str] = \"targetVpnGateway\"\n\n\n@dataclass(eq=False)\nclass GCPRouter(GCPResource, BaseGateway):\n kind: ClassVar[str] = \"gcp_router\"\n api_identifier: ClassVar[str] = \"router\"\n\n\n@dataclass(eq=False)\nclass GCPRoute(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_route\"\n api_identifier: ClassVar[str] = \"route\"\n\n\n@dataclass(eq=False)\nclass GCPInstanceTemplate(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_instance_template\"\n api_identifier: ClassVar[str] = \"instanceTemplate\"\n\n\n@dataclass(eq=False)\nclass GCPSecurityPolicy(GCPResource, BasePolicy):\n kind: ClassVar[str] = \"gcp_security_policy\"\n api_identifier: ClassVar[str] = \"securityPolicy\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._client_method = \"securityPolicies\"\n\n\n@dataclass(eq=False)\nclass GCPSnapshot(GCPResource, BaseSnapshot):\n kind: ClassVar[str] = \"gcp_snapshot\"\n api_identifier: ClassVar[str] = \"snapshot\"\n\n storage_bytes: int = 0\n\n def __post_init__(self) -> None:\n super().__post_init__()\n if isinstance(self.volume_id, BaseResource):\n self.volume_id = self.volume_id.name\n\n\n@dataclass(eq=False)\nclass GCPSSLCertificate(GCPResource, BaseCertificate):\n kind: ClassVar[str] = \"gcp_ssl_certificate\"\n api_identifier: ClassVar[str] = \"sslCertificate\"\n\n description: Optional[str] = None\n certificate: Optional[str] = None\n certificate_type: Optional[str] = None\n certificate_managed: Optional[Dict] = None\n subject_alternative_names: Optional[List[str]] = None\n\n\n@dataclass(eq=False)\nclass GCPMachineType(GCPResource, BaseInstanceType):\n kind: ClassVar[str] = \"gcp_machine_type\"\n api_identifier: ClassVar[str] = \"machineType\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self.instance_type = self.name\n\n\n@dataclass(eq=False)\nclass GCPNetworkEndpointGroup(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_network_endpoint_group\"\n api_identifier: ClassVar[str] = \"networkEndpointGroup\"\n\n default_port: int = -1\n neg_type: str = \"\"\n\n\n@dataclass(eq=False)\nclass GCPGlobalNetworkEndpointGroup(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_global_network_endpoint_group\"\n api_identifier: ClassVar[str] = \"globalNetworkEndpointGroup\"\n\n default_port: int = -1\n neg_type: str = \"\"\n\n\n@dataclass(eq=False)\nclass GCPInstanceGroup(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_instance_group\"\n api_identifier: ClassVar[str] = \"instanceGroup\"\n\n\n@dataclass(eq=False)\nclass GCPInstanceGroupManager(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_instance_group_manager\"\n api_identifier: ClassVar[str] = \"instanceGroupManager\"\n\n\n@dataclass(eq=False)\nclass GCPAutoscaler(GCPResource, BaseAutoScalingGroup):\n kind: ClassVar[str] = \"gcp_autoscaler\"\n api_identifier: ClassVar[str] = \"autoscaler\"\n\n\n@dataclass(eq=False)\nclass GCPHealthCheck(GCPResource, BaseHealthCheck):\n kind: ClassVar[str] = \"gcp_health_check\"\n api_identifier: ClassVar[str] = \"healthCheck\"\n\n\n@dataclass(eq=False)\nclass GCPHTTPHealthCheck(GCPResource, BaseHealthCheck):\n kind: ClassVar[str] = \"gcp_http_health_check\"\n api_identifier: ClassVar[str] = \"httpHealthCheck\"\n\n host: str = \"\"\n request_path: str = \"\"\n port: int = -1\n\n\n@dataclass(eq=False)\nclass GCPHTTPSHealthCheck(GCPHTTPHealthCheck):\n kind: ClassVar[str] = \"gcp_https_health_check\"\n api_identifier: ClassVar[str] = \"httpsHealthCheck\"\n\n\n@dataclass(eq=False)\nclass GCPUrlMap(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_url_map\"\n api_identifier: ClassVar[str] = \"urlMap\"\n\n\n@dataclass(eq=False)\nclass GCPTargetPool(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_pool\"\n api_identifier: ClassVar[str] = \"targetPool\"\n\n session_affinity: str = \"\"\n failover_ratio: float = -1.0\n\n\n@dataclass(eq=False)\nclass GCPTargetHttpProxy(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_http_proxy\"\n api_identifier: ClassVar[str] = \"targetHttpProxy\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._client_method = \"targetHttpProxies\"\n self._check_region_resource()\n\n\n@dataclass(eq=False)\nclass GCPTargetHttpsProxy(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_https_proxy\"\n api_identifier: ClassVar[str] = \"targetHttpsProxy\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._client_method = \"targetHttpsProxies\"\n self._check_region_resource()\n\n\n@dataclass(eq=False)\nclass GCPTargetSslProxy(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_ssl_proxy\"\n api_identifier: ClassVar[str] = \"targetSslProxy\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._client_method = \"targetSslProxies\"\n\n\n@dataclass(eq=False)\nclass GCPTargetTcpProxy(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_tcp_proxy\"\n api_identifier: ClassVar[str] = \"targetTcpProxy\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._client_method = \"targetTcpProxies\"\n\n\n@dataclass(eq=False)\nclass GCPTargetGrpcProxy(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_grpc_proxy\"\n api_identifier: ClassVar[str] = \"targetGrpcProxy\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self._client_method = \"targetGrpcProxies\"\n\n\n@dataclass(eq=False)\nclass GCPTargetInstance(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_target_instance\"\n api_identifier: ClassVar[str] = \"targetInstance\"\n\n\n@dataclass(eq=False)\nclass GCPQuota(GCPResource, BaseQuota):\n kind: ClassVar[str] = \"gcp_quota\"\n api_identifier: ClassVar[str] = \"dummy\"\n\n\n@dataclass(eq=False)\nclass GCPBackendService(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_backend_service\"\n api_identifier: ClassVar[str] = \"backendService\"\n\n\n@dataclass(eq=False)\nclass GCPForwardingRule(GCPResource, BaseLoadBalancer):\n kind: ClassVar[str] = \"gcp_forwarding_rule\"\n api_identifier: ClassVar[str] = \"forwardingRule\"\n\n ip_address: str = \"\"\n ip_protocol: str = \"\"\n load_balancing_scheme: str = \"\"\n network_tier: str = \"\"\n port_range: str = \"\"\n\n def __post_init__(self) -> None:\n super().__post_init__()\n self.lb_type = \"gcp\"\n\n\n@dataclass(eq=False)\nclass GCPGlobalForwardingRule(GCPForwardingRule):\n kind: ClassVar[str] = \"gcp_global_forwarding_rule\"\n api_identifier: ClassVar[str] = \"globalForwardingRule\"\n\n\n@dataclass(eq=False)\nclass GCPBucket(GCPResource, BaseBucket):\n kind: ClassVar[str] = \"gcp_bucket\"\n api_identifier: ClassVar[str] = \"bucket\"\n client = \"storage\"\n\n bucket_location: str = \"\"\n bucket_location_type: str = \"\"\n storage_class: str = \"\"\n zone_separation: bool = False\n\n def pre_delete(self, graph: Graph) -> bool:\n kwargs = {str(self._list_identifier): self.name}\n gs = gcp_service(self, graph=graph)\n for document in paginate(\n gcp_resource=gs.objects(),\n method_name=\"list\",\n items_name=\"items\",\n **kwargs,\n ):\n log.debug(\n f\"Removing {document['name']} in {self.rtdname} before resource cleanup\"\n )\n request = gs.objects().delete(object=document[\"name\"], **kwargs)\n request.execute()\n return True\n\n def delete(self, graph: Graph) -> bool:\n kwargs = {str(self._delete_identifier): self.name}\n gr = gcp_resource(self, graph=graph)\n request = gr.delete(**kwargs)\n request.execute()\n return True\n\n def update_tag(self, key, value) -> bool:\n kwargs = {str(self._patch_identifier): self.name}\n gr = gcp_resource(self)\n labels = dict(self.tags)\n labels[key] = value\n kwargs[\"body\"] = {\"labels\": labels}\n request = gr.patch(**kwargs)\n request.execute()\n return True\n\n def delete_tag(self, key) -> bool:\n return self.update_tag(key, None)\n\n\n@dataclass(eq=False)\nclass GCPDatabase(GCPResource, BaseDatabase):\n kind: ClassVar[str] = \"gcp_database\"\n api_identifier: ClassVar[str] = \"instance\"\n client: ClassVar[str] = \"sqladmin\"\n api_version: ClassVar[str] = \"v1beta4\"\n resource_args: ClassVar[List[str]] = [\"project\"]\n\n def update_tag(self, key, value) -> bool:\n kwargs = {str(self._patch_identifier): self.name}\n common_kwargs = common_resource_kwargs(self)\n kwargs.update(common_kwargs)\n gr = gcp_resource(self)\n labels = dict(self.tags)\n labels[key] = value\n kwargs[\"body\"] = {\"settings\": {\"userLabels\": labels}}\n request = gr.patch(**kwargs)\n request.execute()\n return True\n\n def delete_tag(self, key) -> bool:\n return self.update_tag(key, None)\n\n\n@dataclass(eq=False)\nclass GCPService(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_service\"\n api_identifier: ClassVar[str] = \"service\"\n client: ClassVar[str] = \"cloudbilling\"\n api_version: ClassVar[str] = \"v1\"\n resource_args: ClassVar[List[str]] = []\n\n\n@dataclass(eq=False)\nclass GCPServiceSKU(GCPResource, BaseResource):\n kind: ClassVar[str] = \"gcp_service_sku\"\n api_identifier: ClassVar[str] = \"service\"\n client: ClassVar[str] = \"cloudbilling\"\n api_version: ClassVar[str] = \"v1\"\n resource_args: ClassVar[List[str]] = []\n\n service: str = \"\"\n resource_family: Optional[str] = \"\"\n resource_group: Optional[str] = \"\"\n usage_type: Optional[str] = \"\"\n pricing_info: List = field(default_factory=list)\n service_provider_name: Optional[str] = \"\"\n geo_taxonomy_type: Optional[str] = None\n geo_taxonomy_regions: List = field(default_factory=list)\n\n def __post_init__(self) -> None:\n super().__post_init__()\n if self.pricing_info is None:\n self.pricing_info = []\n if self.geo_taxonomy_regions is None:\n self.geo_taxonomy_regions = []\n self.usage_unit_nanos = -1\n if len(self.pricing_info) > 0:\n tiered_rates = (\n self.pricing_info[0].get(\"pricingExpression\", {}).get(\"tieredRates\", [])\n )\n cost = -1\n if len(tiered_rates) == 1:\n cost = tiered_rates[0].get(\"unitPrice\", {}).get(\"nanos\", -1)\n else:\n for tiered_rate in tiered_rates:\n if tiered_rate.get(\"startUsageAmount\", -1) > 0:\n cost = tiered_rate.get(\"unitPrice\", {}).get(\"nanos\", -1)\n break\n if cost > -1:\n self.usage_unit_nanos = cost\n","sub_path":"plugins/gcp/cloudkeeper_plugin_gcp/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":19484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"480603023","text":"import asyncio\nimport pytest\nimport numpy as np\nimport sys\nimport time\nimport threading\nimport gc\nimport random\n\nfrom collections import Counter\n\nfrom unittest.mock import patch, Mock\n\nimport ray\nfrom ray._private.test_utils import wait_for_condition\nfrom ray.experimental.state.api import list_objects, list_actors\nfrom ray._raylet import StreamingObjectRefGenerator, ObjectRefStreamEndOfStreamError\nfrom ray.cloudpickle import dumps\nfrom ray.exceptions import WorkerCrashedError\n\nRECONSTRUCTION_CONFIG = {\n \"health_check_failure_threshold\": 10,\n \"health_check_period_ms\": 100,\n \"health_check_timeout_ms\": 100,\n \"health_check_initial_delay_ms\": 0,\n \"max_direct_call_object_size\": 100,\n \"task_retry_delay_ms\": 100,\n \"object_timeout_milliseconds\": 200,\n \"fetch_warn_timeout_milliseconds\": 1000,\n}\n\n\ndef assert_no_leak():\n gc.collect()\n core_worker = ray._private.worker.global_worker.core_worker\n ref_counts = core_worker.get_all_reference_counts()\n print(ref_counts)\n for rc in ref_counts.values():\n assert rc[\"local\"] == 0\n assert rc[\"submitted\"] == 0\n assert core_worker.get_memory_store_size() == 0\n\n\nclass MockedWorker:\n def __init__(self, mocked_core_worker):\n self.core_worker = mocked_core_worker\n\n def reset_core_worker(self):\n \"\"\"Emulate the case ray.shutdown is called\n and the core_worker instance is GC'ed.\n \"\"\"\n self.core_worker = None\n\n def check_connected(self):\n return True\n\n\n@pytest.fixture\ndef mocked_worker():\n mocked_core_worker = Mock()\n mocked_core_worker.try_read_next_object_ref_stream.return_value = None\n mocked_core_worker.delete_object_ref_stream.return_value = None\n mocked_core_worker.create_object_ref_stream.return_value = None\n mocked_core_worker.peek_object_ref_stream.return_value = [], []\n worker = MockedWorker(mocked_core_worker)\n yield worker\n\n\ndef test_streaming_object_ref_generator_basic_unit(mocked_worker):\n \"\"\"\n Verify the basic case:\n create a generator -> read values -> nothing more to read -> delete.\n \"\"\"\n with patch(\"ray.wait\") as mocked_ray_wait:\n with patch(\"ray.get\") as mocked_ray_get:\n c = mocked_worker.core_worker\n generator_ref = ray.ObjectRef.from_random()\n generator = StreamingObjectRefGenerator(generator_ref, mocked_worker)\n\n # Test when there's no new ref, it returns a nil.\n new_ref = ray.ObjectRef.from_random()\n c.peek_object_ref_stream.return_value = new_ref\n mocked_ray_wait.return_value = [], [new_ref]\n ref = generator._next_sync(timeout_s=0)\n assert ref.is_nil()\n\n # When the new ref is available, next should return it.\n for _ in range(3):\n new_ref = ray.ObjectRef.from_random()\n c.peek_object_ref_stream.return_value = new_ref\n mocked_ray_wait.return_value = [new_ref], []\n c.try_read_next_object_ref_stream.return_value = new_ref\n ref = generator._next_sync(timeout_s=0)\n assert new_ref == ref\n\n # When try_read_next_object_ref_stream raises a\n # ObjectRefStreamEndOfStreamError, it should raise a stop iteration.\n new_ref = ray.ObjectRef.from_random()\n c.peek_object_ref_stream.return_value = new_ref\n mocked_ray_wait.return_value = [new_ref], []\n c.try_read_next_object_ref_stream.side_effect = (\n ObjectRefStreamEndOfStreamError(\"\")\n ) # noqa\n mocked_ray_get.return_value = None\n with pytest.raises(StopIteration):\n ref = generator._next_sync(timeout_s=0)\n # Make sure we cannot serialize the generator.\n with pytest.raises(TypeError):\n dumps(generator)\n\n del generator\n c.delete_object_ref_stream.assert_called()\n\n\ndef test_streaming_object_ref_generator_task_failed_unit(mocked_worker):\n \"\"\"\n Verify when a task is failed by a system error,\n the generator ref is returned.\n \"\"\"\n with patch(\"ray.get\") as mocked_ray_get:\n with patch(\"ray.wait\") as mocked_ray_wait:\n c = mocked_worker.core_worker\n generator_ref = ray.ObjectRef.from_random()\n generator = StreamingObjectRefGenerator(generator_ref, mocked_worker)\n\n # Simulate the worker failure happens.\n next_ref = ray.ObjectRef.from_random()\n c.peek_object_ref_stream.return_value = next_ref\n mocked_ray_wait.return_value = [next_ref], []\n mocked_ray_get.side_effect = WorkerCrashedError()\n\n c.try_read_next_object_ref_stream.side_effect = (\n ObjectRefStreamEndOfStreamError(\"\")\n ) # noqa\n ref = generator._next_sync(timeout_s=0)\n # If the generator task fails by a systsem error,\n # meaning the ref will raise an exception\n # it should be returned.\n assert ref == generator_ref\n\n # Once exception is raised, it should always\n # raise stopIteration regardless of what\n # the ref contains now.\n with pytest.raises(StopIteration):\n ref = generator._next_sync(timeout_s=0)\n\n\ndef test_generator_basic(shutdown_only):\n ray.init(num_cpus=1)\n\n \"\"\"Basic cases\"\"\"\n print(\"Test basic case\")\n\n @ray.remote\n def f():\n for i in range(5):\n yield i\n\n gen = f.options(num_returns=\"streaming\").remote()\n i = 0\n for ref in gen:\n print(ray.get(ref))\n assert i == ray.get(ref)\n del ref\n i += 1\n\n \"\"\"Exceptions\"\"\"\n print(\"Test exceptions\")\n\n @ray.remote\n def f():\n for i in range(5):\n if i == 2:\n raise ValueError\n yield i\n\n gen = f.options(num_returns=\"streaming\").remote()\n print(ray.get(next(gen)))\n print(ray.get(next(gen)))\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n print(ray.get(next(gen)))\n with pytest.raises(StopIteration):\n ray.get(next(gen))\n with pytest.raises(StopIteration):\n ray.get(next(gen))\n\n \"\"\"Generator Task failure\"\"\"\n print(\"Test task failures\")\n\n @ray.remote\n class A:\n def getpid(self):\n import os\n\n return os.getpid()\n\n def f(self):\n for i in range(5):\n time.sleep(1)\n yield i\n\n a = A.remote()\n gen = a.f.options(num_returns=\"streaming\").remote()\n i = 0\n for ref in gen:\n if i == 2:\n ray.kill(a)\n if i == 3:\n with pytest.raises(ray.exceptions.RayActorError) as e:\n ray.get(ref)\n assert \"The actor is dead because it was killed by `ray.kill`\" in str(\n e.value\n )\n break\n assert i == ray.get(ref)\n del ref\n i += 1\n for _ in range(10):\n with pytest.raises(StopIteration):\n next(gen)\n\n \"\"\"Retry exceptions\"\"\"\n print(\"Test retry exceptions\")\n\n @ray.remote\n class Actor:\n def __init__(self):\n self.should_kill = True\n\n def should_kill(self):\n return self.should_kill\n\n async def set(self, wait_s):\n await asyncio.sleep(wait_s)\n self.should_kill = False\n\n @ray.remote(retry_exceptions=[ValueError], max_retries=10)\n def f(a):\n for i in range(5):\n should_kill = ray.get(a.should_kill.remote())\n if i == 3 and should_kill:\n raise ValueError\n yield i\n\n a = Actor.remote()\n gen = f.options(num_returns=\"streaming\").remote(a)\n assert ray.get(next(gen)) == 0\n assert ray.get(next(gen)) == 1\n assert ray.get(next(gen)) == 2\n a.set.remote(3)\n assert ray.get(next(gen)) == 3\n assert ray.get(next(gen)) == 4\n with pytest.raises(StopIteration):\n ray.get(next(gen))\n\n \"\"\"Cancel\"\"\"\n print(\"Test cancel\")\n\n @ray.remote\n def f():\n for i in range(5):\n time.sleep(5)\n yield i\n\n gen = f.options(num_returns=\"streaming\").remote()\n assert ray.get(next(gen)) == 0\n ray.cancel(gen)\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n assert ray.get(next(gen)) == 1\n assert \"was cancelled\" in str(e.value)\n with pytest.raises(StopIteration):\n next(gen)\n\n\ndef test_streaming_generator_bad_exception_not_failing(shutdown_only, capsys):\n \"\"\"This test verifies when a return value cannot be stored\n e.g., because it holds a lock) if it handles failures gracefully.\n\n Previously, when it happens, there was a check failure. This verifies\n the check failure doesn't happen anymore.\n \"\"\"\n ray.init()\n\n class UnserializableException(Exception):\n def __init__(self):\n self.lock = threading.Lock()\n\n @ray.remote\n def f():\n raise UnserializableException\n yield 1 # noqa\n\n for ref in f.options(num_returns=\"streaming\").remote():\n with pytest.raises(ray.exceptions.RayTaskError):\n ray.get(ref)\n captured = capsys.readouterr()\n lines = captured.err.strip().split(\"\\n\")\n\n # Verify check failure doesn't happen because we handle the error\n # properly.\n for line in lines:\n assert \"Check failed:\" not in line\n\n\n@pytest.mark.parametrize(\"crash_type\", [\"exception\", \"worker_crash\"])\ndef test_generator_streaming_no_leak_upon_failures(\n monkeypatch, shutdown_only, crash_type\n):\n with monkeypatch.context() as m:\n m.setenv(\n \"RAY_testing_asio_delay_us\",\n \"CoreWorkerService.grpc_server.ReportGeneratorItemReturns=100000:1000000\",\n )\n ray.init(num_cpus=1)\n\n @ray.remote\n def g():\n try:\n gen = f.options(num_returns=\"streaming\").remote()\n for ref in gen:\n print(ref)\n ray.get(ref)\n except Exception:\n print(\"exception!\")\n del ref\n\n del gen\n gc.collect()\n\n # Only the ref g is alive.\n def verify():\n print(list_objects())\n return len(list_objects()) == 1\n\n wait_for_condition(verify)\n return True\n\n @ray.remote\n def f():\n for i in range(10):\n time.sleep(0.2)\n if i == 4:\n if crash_type == \"exception\":\n raise ValueError\n else:\n sys.exit(9)\n yield 2\n\n for _ in range(5):\n ray.get(g.remote())\n\n\n@pytest.mark.parametrize(\"use_actors\", [False, True])\n@pytest.mark.parametrize(\"store_in_plasma\", [False, True])\ndef test_generator_streaming(shutdown_only, use_actors, store_in_plasma):\n \"\"\"Verify the generator is working in a streaming fashion.\"\"\"\n ray.init()\n remote_generator_fn = None\n if use_actors:\n\n @ray.remote\n class Generator:\n def __init__(self):\n pass\n\n def generator(self, num_returns, store_in_plasma):\n for i in range(num_returns):\n if store_in_plasma:\n yield np.ones(1_000_000, dtype=np.int8) * i\n else:\n yield [i]\n\n g = Generator.remote()\n remote_generator_fn = g.generator\n else:\n\n @ray.remote(max_retries=0)\n def generator(num_returns, store_in_plasma):\n for i in range(num_returns):\n if store_in_plasma:\n yield np.ones(1_000_000, dtype=np.int8) * i\n else:\n yield [i]\n\n remote_generator_fn = generator\n\n \"\"\"Verify num_returns=\"streaming\" is streaming\"\"\"\n gen = remote_generator_fn.options(num_returns=\"streaming\").remote(\n 3, store_in_plasma\n )\n i = 0\n for ref in gen:\n id = ref.hex()\n if store_in_plasma:\n expected = np.ones(1_000_000, dtype=np.int8) * i\n assert np.array_equal(ray.get(ref), expected)\n else:\n expected = [i]\n assert ray.get(ref) == expected\n\n del ref\n\n wait_for_condition(\n lambda id=id: len(list_objects(filters=[(\"object_id\", \"=\", id)])) == 0\n )\n i += 1\n\n\ndef test_generator_dist_chain(ray_start_cluster):\n \"\"\"E2E test to verify chain of generator works properly.\"\"\"\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0, object_store_memory=1 * 1024 * 1024 * 1024)\n ray.init()\n cluster.add_node(num_cpus=1)\n cluster.add_node(num_cpus=1)\n cluster.add_node(num_cpus=1)\n cluster.add_node(num_cpus=1)\n\n @ray.remote\n class ChainActor:\n def __init__(self, child=None):\n self.child = child\n\n def get_data(self):\n if not self.child:\n for _ in range(10):\n time.sleep(0.1)\n yield np.ones(5 * 1024 * 1024)\n else:\n for data in self.child.get_data.options(\n num_returns=\"streaming\"\n ).remote():\n yield ray.get(data)\n\n chain_actor = ChainActor.remote()\n chain_actor_2 = ChainActor.remote(chain_actor)\n chain_actor_3 = ChainActor.remote(chain_actor_2)\n chain_actor_4 = ChainActor.remote(chain_actor_3)\n\n for ref in chain_actor_4.get_data.options(num_returns=\"streaming\").remote():\n assert np.array_equal(np.ones(5 * 1024 * 1024), ray.get(ref))\n print(\"getting the next data\")\n del ref\n\n\ndef test_generator_slow_pinning_requests(monkeypatch, shutdown_only):\n \"\"\"\n Verify when the Object pinning request from the raylet\n is reported slowly, there's no refernece leak.\n \"\"\"\n with monkeypatch.context() as m:\n m.setenv(\n \"RAY_testing_asio_delay_us\",\n \"CoreWorkerService.grpc_server.PubsubLongPolling=1000000:1000000\",\n )\n\n @ray.remote\n def f():\n yield np.ones(5 * 1024 * 1024)\n\n for ref in f.options(num_returns=\"streaming\").remote():\n del ref\n\n print(list_objects())\n\n\n@pytest.mark.parametrize(\"store_in_plasma\", [False, True])\ndef test_actor_streaming_generator(shutdown_only, store_in_plasma):\n \"\"\"Test actor/async actor with sync/async generator interfaces.\"\"\"\n ray.init()\n\n @ray.remote\n class Actor:\n def f(self, ref):\n for i in range(3):\n yield i\n\n async def async_f(self, ref):\n for i in range(3):\n await asyncio.sleep(0.1)\n yield i\n\n def g(self):\n return 3\n\n a = Actor.remote()\n if store_in_plasma:\n arr = np.random.rand(5 * 1024 * 1024)\n else:\n arr = 3\n\n def verify_sync_task_executor():\n generator = a.f.options(num_returns=\"streaming\").remote(ray.put(arr))\n # Verify it works with next.\n assert isinstance(generator, StreamingObjectRefGenerator)\n assert ray.get(next(generator)) == 0\n assert ray.get(next(generator)) == 1\n assert ray.get(next(generator)) == 2\n with pytest.raises(StopIteration):\n ray.get(next(generator))\n\n # Verify it works with for.\n generator = a.f.options(num_returns=\"streaming\").remote(ray.put(3))\n for index, ref in enumerate(generator):\n assert index == ray.get(ref)\n\n def verify_async_task_executor():\n # Verify it works with next.\n generator = a.async_f.options(num_returns=\"streaming\").remote(ray.put(arr))\n assert isinstance(generator, StreamingObjectRefGenerator)\n assert ray.get(next(generator)) == 0\n assert ray.get(next(generator)) == 1\n assert ray.get(next(generator)) == 2\n\n # Verify it works with for.\n generator = a.f.options(num_returns=\"streaming\").remote(ray.put(3))\n for index, ref in enumerate(generator):\n assert index == ray.get(ref)\n\n async def verify_sync_task_async_generator():\n # Verify anext\n async_generator = a.f.options(num_returns=\"streaming\").remote(ray.put(arr))\n assert isinstance(async_generator, StreamingObjectRefGenerator)\n for expected in range(3):\n ref = await async_generator.__anext__()\n assert await ref == expected\n with pytest.raises(StopAsyncIteration):\n await async_generator.__anext__()\n\n # Verify async for.\n async_generator = a.f.options(num_returns=\"streaming\").remote(ray.put(arr))\n expected = 0\n async for ref in async_generator:\n value = await ref\n assert expected == value\n expected += 1\n\n async def verify_async_task_async_generator():\n async_generator = a.async_f.options(num_returns=\"streaming\").remote(\n ray.put(arr)\n )\n assert isinstance(async_generator, StreamingObjectRefGenerator)\n for expected in range(3):\n ref = await async_generator.__anext__()\n assert await ref == expected\n with pytest.raises(StopAsyncIteration):\n await async_generator.__anext__()\n\n # Verify async for.\n async_generator = a.async_f.options(num_returns=\"streaming\").remote(\n ray.put(arr)\n )\n expected = 0\n async for ref in async_generator:\n value = await ref\n assert expected == value\n expected += 1\n\n verify_sync_task_executor()\n verify_async_task_executor()\n asyncio.run(verify_sync_task_async_generator())\n asyncio.run(verify_async_task_async_generator())\n\n\ndef test_streaming_generator_exception(shutdown_only):\n # Verify the exceptions are correctly raised.\n # Also verify the followup next will raise StopIteration.\n ray.init()\n\n @ray.remote\n class Actor:\n def f(self):\n raise ValueError\n yield 1 # noqa\n\n async def async_f(self):\n raise ValueError\n yield 1 # noqa\n\n a = Actor.remote()\n g = a.f.options(num_returns=\"streaming\").remote()\n with pytest.raises(ValueError):\n ray.get(next(g))\n\n with pytest.raises(StopIteration):\n ray.get(next(g))\n\n with pytest.raises(StopIteration):\n ray.get(next(g))\n\n g = a.async_f.options(num_returns=\"streaming\").remote()\n with pytest.raises(ValueError):\n ray.get(next(g))\n\n with pytest.raises(StopIteration):\n ray.get(next(g))\n\n with pytest.raises(StopIteration):\n ray.get(next(g))\n\n\ndef test_threaded_actor_generator(shutdown_only):\n ray.init()\n\n @ray.remote(max_concurrency=10)\n class Actor:\n def f(self):\n for i in range(30):\n time.sleep(0.1)\n yield np.ones(1024 * 1024) * i\n\n @ray.remote(max_concurrency=20)\n class AsyncActor:\n async def f(self):\n for i in range(30):\n await asyncio.sleep(0.1)\n yield np.ones(1024 * 1024) * i\n\n async def main():\n a = Actor.remote()\n asy = AsyncActor.remote()\n\n async def run():\n i = 0\n async for ref in a.f.options(num_returns=\"streaming\").remote():\n val = ray.get(ref)\n print(val)\n print(ref)\n assert np.array_equal(val, np.ones(1024 * 1024) * i)\n i += 1\n del ref\n\n async def run2():\n i = 0\n async for ref in asy.f.options(num_returns=\"streaming\").remote():\n val = await ref\n print(ref)\n print(val)\n assert np.array_equal(val, np.ones(1024 * 1024) * i), ref\n i += 1\n del ref\n\n coroutines = [run() for _ in range(10)]\n coroutines = [run2() for _ in range(20)]\n\n await asyncio.gather(*coroutines)\n\n asyncio.run(main())\n\n\ndef test_generator_dist_gather(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0, object_store_memory=1 * 1024 * 1024 * 1024)\n ray.init()\n cluster.add_node(num_cpus=1)\n cluster.add_node(num_cpus=1)\n cluster.add_node(num_cpus=1)\n cluster.add_node(num_cpus=1)\n\n @ray.remote(num_cpus=1)\n class Actor:\n def __init__(self, child=None):\n self.child = child\n\n def get_data(self):\n for _ in range(10):\n time.sleep(0.1)\n yield np.ones(5 * 1024 * 1024)\n\n async def all_gather():\n actor = Actor.remote()\n async for ref in actor.get_data.options(num_returns=\"streaming\").remote():\n val = await ref\n assert np.array_equal(np.ones(5 * 1024 * 1024), val)\n del ref\n\n async def main():\n await asyncio.gather(all_gather(), all_gather(), all_gather(), all_gather())\n\n asyncio.run(main())\n summary = ray._private.internal_api.memory_summary(stats_only=True)\n print(summary)\n\n\ndef test_generator_wait(shutdown_only):\n \"\"\"\n Make sure the generator works with ray.wait.\n \"\"\"\n ray.init(num_cpus=8)\n\n @ray.remote\n def f(sleep_time):\n for i in range(2):\n time.sleep(sleep_time)\n yield i\n\n @ray.remote\n def g(sleep_time):\n time.sleep(sleep_time)\n return 10\n\n gen = f.options(num_returns=\"streaming\").remote(1)\n\n \"\"\"\n Test basic cases.\n \"\"\"\n for expected_rval in [0, 1]:\n s = time.time()\n r, ur = ray.wait([gen], num_returns=1)\n print(time.time() - s)\n assert len(r) == 1\n assert ray.get(next(r[0])) == expected_rval\n assert len(ur) == 0\n\n # Should raise a stop iteration.\n for _ in range(3):\n s = time.time()\n r, ur = ray.wait([gen], num_returns=1)\n print(time.time() - s)\n assert len(r) == 1\n with pytest.raises(StopIteration):\n assert next(r[0]) == 0\n assert len(ur) == 0\n\n gen = f.options(num_returns=\"streaming\").remote(0)\n # Wait until the generator task finishes\n ray.get(gen._generator_ref)\n for i in range(2):\n r, ur = ray.wait([gen], timeout=0)\n assert len(r) == 1\n assert len(ur) == 0\n assert ray.get(next(r[0])) == i\n\n \"\"\"\n Test the case ref is mixed with regular object ref.\n \"\"\"\n gen = f.options(num_returns=\"streaming\").remote(0)\n ref = g.remote(3)\n ready, unready = [], [gen, ref]\n result_set = set()\n while unready:\n ready, unready = ray.wait(unready)\n print(ready, unready)\n assert len(ready) == 1\n for r in ready:\n if isinstance(r, StreamingObjectRefGenerator):\n try:\n ref = next(r)\n print(ref)\n print(ray.get(ref))\n result_set.add(ray.get(ref))\n except StopIteration:\n pass\n else:\n unready.append(r)\n else:\n result_set.add(ray.get(r))\n\n assert result_set == {0, 1, 10}\n\n \"\"\"\n Test timeout.\n \"\"\"\n gen = f.options(num_returns=\"streaming\").remote(3)\n ref = g.remote(1)\n ready, unready = ray.wait([gen, ref], timeout=2)\n assert len(ready) == 1\n assert len(unready) == 1\n\n \"\"\"\n Test num_returns\n \"\"\"\n gen = f.options(num_returns=\"streaming\").remote(1)\n ref = g.remote(1)\n ready, unready = ray.wait([ref, gen], num_returns=2)\n assert len(ready) == 2\n assert len(unready) == 0\n\n\ndef test_generator_wait_e2e(shutdown_only):\n ray.init(num_cpus=8)\n\n @ray.remote\n def f(sleep_time):\n for i in range(2):\n time.sleep(sleep_time)\n yield i\n\n @ray.remote\n def g(sleep_time):\n time.sleep(sleep_time)\n return 10\n\n gen = [f.options(num_returns=\"streaming\").remote(1) for _ in range(4)]\n ref = [g.remote(2) for _ in range(4)]\n ready, unready = [], [*gen, *ref]\n result = []\n start = time.time()\n while unready:\n ready, unready = ray.wait(unready, num_returns=len(unready), timeout=0.1)\n for r in ready:\n if isinstance(r, StreamingObjectRefGenerator):\n try:\n ref = next(r)\n result.append(ray.get(ref))\n except StopIteration:\n pass\n else:\n unready.append(r)\n else:\n result.append(ray.get(r))\n elapsed = time.time() - start\n assert elapsed < 3\n assert 2 < elapsed\n\n assert len(result) == 12\n result = Counter(result)\n assert result[0] == 4\n assert result[1] == 4\n assert result[10] == 4\n\n\n@pytest.mark.parametrize(\"delay\", [True])\ndef test_reconstruction(monkeypatch, ray_start_cluster, delay):\n with monkeypatch.context() as m:\n if delay:\n m.setenv(\n \"RAY_testing_asio_delay_us\",\n \"CoreWorkerService.grpc_server.\"\n \"ReportGeneratorItemReturns=10000:1000000\",\n )\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=RECONSTRUCTION_CONFIG,\n enable_object_reconstruction=True,\n )\n ray.init(address=cluster.address)\n # Node to place the initial object.\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(num_returns=\"streaming\", max_retries=2)\n def dynamic_generator(num_returns):\n for i in range(num_returns):\n yield np.ones(1_000_000, dtype=np.int8) * i\n\n @ray.remote\n def fetch(x):\n return x[0]\n\n # Test recovery of all dynamic objects through re-execution.\n gen = ray.get(dynamic_generator.remote(10))\n refs = []\n\n for i in range(5):\n refs.append(next(gen))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n\n for i, ref in enumerate(refs):\n print(\"first trial.\")\n print(\"fetching \", i)\n assert ray.get(fetch.remote(ref)) == i\n\n # Try second retry.\n cluster.remove_node(node_to_kill, allow_graceful=False)\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n\n for i in range(4):\n refs.append(next(gen))\n\n for i, ref in enumerate(refs):\n print(\"second trial\")\n print(\"fetching \", i)\n assert ray.get(fetch.remote(ref)) == i\n\n # third retry should fail.\n cluster.remove_node(node_to_kill, allow_graceful=False)\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n\n for i in range(1):\n refs.append(next(gen))\n\n for i, ref in enumerate(refs):\n print(\"third trial\")\n print(\"fetching \", i)\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n ray.get(fetch.remote(ref))\n assert \"the maximum number of task retries has been exceeded\" in str(e.value)\n\n\n@pytest.mark.parametrize(\"failure_type\", [\"exception\", \"crash\"])\ndef test_reconstruction_retry_failed(ray_start_cluster, failure_type):\n \"\"\"Test the streaming generator retry fails in the second retry.\"\"\"\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=0,\n _system_config=RECONSTRUCTION_CONFIG,\n enable_object_reconstruction=True,\n )\n ray.init(address=cluster.address)\n\n @ray.remote(num_cpus=0)\n class SignalActor:\n def __init__(self):\n self.crash = False\n\n def set(self):\n self.crash = True\n\n def get(self):\n return self.crash\n\n signal = SignalActor.remote()\n ray.get(signal.get.remote())\n\n # Node to place the initial object.\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n cluster.wait_for_nodes()\n\n @ray.remote(num_returns=\"streaming\")\n def dynamic_generator(num_returns, signal_actor):\n for i in range(num_returns):\n if i == 3:\n should_crash = ray.get(signal_actor.get.remote())\n if should_crash:\n if failure_type == \"exception\":\n raise Exception\n else:\n sys.exit(5)\n time.sleep(1)\n yield np.ones(1_000_000, dtype=np.int8) * i\n\n @ray.remote\n def fetch(x):\n return x[0]\n\n gen = ray.get(dynamic_generator.remote(10, signal))\n refs = []\n\n for i in range(5):\n refs.append(next(gen))\n\n cluster.remove_node(node_to_kill, allow_graceful=False)\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n\n for i, ref in enumerate(refs):\n print(\"first trial.\")\n print(\"fetching \", i)\n assert ray.get(fetch.remote(ref)) == i\n\n # Try second retry.\n cluster.remove_node(node_to_kill, allow_graceful=False)\n node_to_kill = cluster.add_node(num_cpus=1, object_store_memory=10**8)\n\n signal.set.remote()\n\n for ref in gen:\n refs.append(ref)\n\n for i, ref in enumerate(refs):\n print(\"second trial\")\n print(\"fetching \", i)\n print(ref)\n if i < 3:\n assert ray.get(fetch.remote(ref)) == i\n else:\n with pytest.raises(ray.exceptions.RayTaskError) as e:\n assert ray.get(fetch.remote(ref)) == i\n assert \"The worker died\" in str(e.value)\n\n\ndef test_ray_datasetlike_mini_stress_test(monkeypatch, ray_start_cluster):\n \"\"\"\n Test a workload that's like ray dataset + lineage reconstruction.\n \"\"\"\n with monkeypatch.context() as m:\n m.setenv(\n \"RAY_testing_asio_delay_us\",\n \"CoreWorkerService.grpc_server.\" \"ReportGeneratorItemReturns=10000:1000000\",\n )\n cluster = ray_start_cluster\n # Head node with no resources.\n cluster.add_node(\n num_cpus=1,\n resources={\"head\": 1},\n _system_config=RECONSTRUCTION_CONFIG,\n enable_object_reconstruction=True,\n )\n ray.init(address=cluster.address)\n\n @ray.remote(num_returns=\"streaming\", max_retries=-1)\n def dynamic_generator(num_returns):\n for i in range(num_returns):\n time.sleep(0.1)\n yield np.ones(1_000_000, dtype=np.int8) * i\n\n @ray.remote(num_cpus=0, resources={\"head\": 1})\n def driver():\n unready = [dynamic_generator.remote(10) for _ in range(5)]\n ready = []\n while unready:\n ready, unready = ray.wait(\n unready, num_returns=len(unready), timeout=0.1\n )\n for r in ready:\n try:\n ref = next(r)\n print(ref)\n ray.get(ref)\n except StopIteration:\n pass\n else:\n unready.append(r)\n return None\n\n ref = driver.remote()\n\n nodes = []\n for _ in range(4):\n nodes.append(cluster.add_node(num_cpus=1, object_store_memory=10**8))\n cluster.wait_for_nodes()\n\n for _ in range(10):\n time.sleep(0.1)\n node_to_kill = random.choices(nodes)[0]\n nodes.remove(node_to_kill)\n cluster.remove_node(node_to_kill, allow_graceful=False)\n nodes.append(cluster.add_node(num_cpus=1, object_store_memory=10**8))\n\n ray.get(ref)\n del ref\n\n assert_no_leak()\n\n\ndef test_generator_max_returns(monkeypatch, shutdown_only):\n \"\"\"\n Test when generator returns more than system limit values\n (100 million by default), it fails a task.\n \"\"\"\n with monkeypatch.context() as m:\n # defer for 10s for the second node.\n m.setenv(\n \"RAY_max_num_generator_returns\",\n \"2\",\n )\n\n @ray.remote(num_returns=\"streaming\")\n def generator_task():\n for _ in range(3):\n yield 1\n\n @ray.remote\n def driver():\n gen = generator_task.remote()\n for ref in gen:\n assert ray.get(ref) == 1\n\n with pytest.raises(ray.exceptions.RayTaskError):\n ray.get(driver.remote())\n\n\ndef test_return_yield_mix(shutdown_only):\n \"\"\"\n Test the case where yield and return is mixed within a\n generator task.\n \"\"\"\n\n @ray.remote\n def g():\n for i in range(3):\n yield i\n return\n\n generator = g.options(num_returns=\"streaming\").remote()\n result = []\n for ref in generator:\n result.append(ray.get(ref))\n\n assert len(result) == 1\n assert result[0] == 0\n\n\ndef test_task_name_not_changed_for_iteration(shutdown_only):\n \"\"\"Handles https://github.com/ray-project/ray/issues/37147.\n Verify the task_name is not changed for each iteration in\n async actor generator task.\n \"\"\"\n\n @ray.remote\n class A:\n async def gen(self):\n task_name = asyncio.current_task().get_name()\n for i in range(5):\n assert (\n task_name == asyncio.current_task().get_name()\n ), f\"{task_name} != {asyncio.current_task().get_name()}\"\n yield i\n\n assert task_name == asyncio.current_task().get_name()\n\n a = A.remote()\n for obj_ref in a.gen.options(num_returns=\"streaming\").remote():\n print(ray.get(obj_ref))\n\n\ndef test_async_actor_concurrent(shutdown_only):\n \"\"\"Verify the async actor generator tasks are concurrent.\"\"\"\n\n @ray.remote\n class A:\n async def gen(self):\n for i in range(5):\n await asyncio.sleep(1)\n yield i\n\n a = A.remote()\n\n async def co():\n async for ref in a.gen.options(num_returns=\"streaming\").remote():\n print(await ref)\n\n async def main():\n await asyncio.gather(co(), co(), co())\n\n s = time.time()\n asyncio.run(main())\n assert 4.5 < time.time() - s < 6.5\n\n\ndef test_no_memory_store_obj_leak(shutdown_only):\n \"\"\"Fixes https://github.com/ray-project/ray/issues/38089\n\n Verify there's no leak from in-memory object store when\n using a streaming generator.\n \"\"\"\n ray.init()\n\n @ray.remote\n def f():\n for _ in range(10):\n yield 1\n\n for _ in range(10):\n for ref in f.options(num_returns=\"streaming\").remote():\n del ref\n\n time.sleep(0.2)\n\n core_worker = ray._private.worker.global_worker.core_worker\n assert core_worker.get_memory_store_size() == 0\n assert_no_leak()\n\n for _ in range(10):\n for ref in f.options(num_returns=\"streaming\").remote():\n break\n\n time.sleep(0.2)\n\n del ref\n core_worker = ray._private.worker.global_worker.core_worker\n assert core_worker.get_memory_store_size() == 0\n assert_no_leak()\n\n\ndef test_python_object_leak(shutdown_only):\n \"\"\"Make sure the objects are not leaked\n (due to circular references) when tasks run\n for all the execution model in Ray actors.\n \"\"\"\n ray.init()\n\n @ray.remote\n class AsyncActor:\n def __init__(self):\n self.gc_garbage_len = 0\n\n def get_gc_garbage_len(self):\n return self.gc_garbage_len\n\n async def gen(self, fail=False):\n gc.set_debug(gc.DEBUG_SAVEALL)\n gc.collect()\n self.gc_garbage_len = len(gc.garbage)\n print(\"Objects: \", self.gc_garbage_len)\n if fail:\n print(\"exception\")\n raise Exception\n yield 1\n\n async def f(self, fail=False):\n gc.set_debug(gc.DEBUG_SAVEALL)\n gc.collect()\n self.gc_garbage_len = len(gc.garbage)\n print(\"Objects: \", self.gc_garbage_len)\n if fail:\n print(\"exception\")\n raise Exception\n return 1\n\n @ray.remote\n class A:\n def __init__(self):\n self.gc_garbage_len = 0\n\n def get_gc_garbage_len(self):\n return self.gc_garbage_len\n\n def f(self, fail=False):\n gc.set_debug(gc.DEBUG_SAVEALL)\n gc.collect()\n self.gc_garbage_len = len(gc.garbage)\n print(\"Objects: \", self.gc_garbage_len)\n if fail:\n print(\"exception\")\n raise Exception\n return 1\n\n def gen(self, fail=False):\n gc.set_debug(gc.DEBUG_SAVEALL)\n gc.collect()\n self.gc_garbage_len = len(gc.garbage)\n print(\"Objects: \", self.gc_garbage_len)\n if fail:\n print(\"exception\")\n raise Exception\n yield 1\n\n def verify_regular(actor, fail):\n for _ in range(100):\n try:\n ray.get(actor.f.remote(fail=fail))\n except Exception:\n pass\n assert ray.get(actor.get_gc_garbage_len.remote()) == 0\n\n def verify_generator(actor, fail):\n for _ in range(100):\n for ref in actor.gen.options(num_returns=\"streaming\").remote(fail=fail):\n try:\n ray.get(ref)\n except Exception:\n pass\n assert ray.get(actor.get_gc_garbage_len.remote()) == 0\n\n print(\"Test regular actors\")\n verify_regular(A.remote(), True)\n verify_regular(A.remote(), False)\n print(\"Test regular actors + generator\")\n verify_generator(A.remote(), True)\n verify_generator(A.remote(), False)\n\n # Test threaded actor\n print(\"Test threaded actors\")\n verify_regular(A.options(max_concurrency=10).remote(), True)\n verify_regular(A.options(max_concurrency=10).remote(), False)\n print(\"Test threaded actors + generator\")\n verify_generator(A.options(max_concurrency=10).remote(), True)\n verify_generator(A.options(max_concurrency=10).remote(), False)\n\n # Test async actor\n print(\"Test async actors\")\n verify_regular(AsyncActor.remote(), True)\n verify_regular(AsyncActor.remote(), False)\n print(\"Test async actors + generator\")\n verify_generator(AsyncActor.remote(), True)\n verify_generator(AsyncActor.remote(), False)\n assert len(list_actors()) == 12\n\n\nif __name__ == \"__main__\":\n import os\n\n if os.environ.get(\"PARALLEL_CI\"):\n sys.exit(pytest.main([\"-n\", \"auto\", \"--boxed\", \"-vs\", __file__]))\n else:\n sys.exit(pytest.main([\"-sv\", __file__]))\n","sub_path":"python/ray/tests/test_streaming_generator.py","file_name":"test_streaming_generator.py","file_ext":"py","file_size_in_byte":38553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"345263580","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.11-intel/egg/matchengine/settings.py\n# Compiled at: 2017-02-10 11:07:01\n\"\"\"Copyright 2016 Dana-Farber Cancer Institute\"\"\"\nimport os, sys, json, logging\nlogging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(message)s')\nTUMOR_TREE = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/tumor_tree.txt'))\nmonths = [\n 'January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'August', 'September', 'October', 'November', 'December']\nMONGO_URI = 'mongodb://localhost:27017/matchminer?replicaSet=rs0'\nuri_check = os.getenv('MONGO_URI', None)\nif uri_check:\n MONGO_URI = uri_check","sub_path":"pycfiles/matchengine-0.1.1-py2.7/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"84854533","text":"import torch\n\nEPS=1e-12\n\ndef ideal_binary_mask(input):\n \"\"\"\n Args:\n input (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n Returns:\n mask (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \"\"\"\n n_dim = input.dim()\n \n if n_dim == 3:\n n_sources, F_bin, T_bin = input.size()\n \n input = input.permute(1,2,0).contiguous()\n flatten_input = input.view(F_bin*T_bin, n_sources)\n flatten_idx = torch.arange(0, F_bin*T_bin*n_sources, n_sources)\n flatten_idx = flatten_idx + flatten_input.argmax(dim=1)\n flatten_mask = torch.zeros(F_bin*T_bin*n_sources)\n flatten_mask[flatten_idx] = 1\n \n mask = flatten_mask.view(F_bin, T_bin, n_sources)\n mask = mask.permute(2,0,1).contiguous()\n elif n_dim == 4:\n batch_size, n_sources, F_bin, T_bin = input.size()\n \n input = input.permute(0,2,3,1).contiguous()\n flatten_input = input.view(batch_size*F_bin*T_bin, n_sources)\n flatten_idx = torch.arange(0, batch_size*F_bin*T_bin*n_sources, n_sources)\n flatten_idx = flatten_idx + flatten_input.argmax(dim=1)\n flatten_mask = torch.zeros(batch_size*F_bin*T_bin*n_sources)\n flatten_mask[flatten_idx] = 1\n \n mask = flatten_mask.view(batch_size, F_bin, T_bin, n_sources)\n mask = mask.permute(0,3,1,2).contiguous()\n else:\n raise ValueError(\"Not support {}-dimension\".format(n_dim))\n \n return mask\n \ndef ideal_ratio_mask(input, eps=EPS):\n \"\"\"\n Args:\n input (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \n Returns:\n mask (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \"\"\"\n n_dim = input.dim()\n \n if n_dim == 3:\n norm = input.sum(dim=0, keepdim=True) # (1, F_bin, T_bin)\n elif n_dim == 4:\n norm = input.sum(dim=1, keepdim=True) # (batch_size, 1, F_bin, T_bin)\n else:\n raise ValueError(\"Not support {}-dimension\".format(n_dim))\n \n mask = input / (norm + eps) # (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \n return mask\n\ndef wiener_filter_mask(input, eps=EPS):\n \"\"\"\n Args:\n input (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \n Returns:\n mask (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \"\"\"\n n_dim = input.dim()\n power = input**2 # (n_sources, F_bin, T_bin) or (batch_size, n_sources, F_bin, T_bin)\n \n if n_dim == 3:\n norm = power.sum(dim=0, keepdim=True) # (1, F_bin, T_bin)\n elif n_dim == 4:\n norm = power.sum(dim=1, keepdim=True) # (batch_size, 1, F_bin, T_bin)\n else:\n raise ValueError(\"Not support {}-dimension\".format(n_dim))\n \n mask = power / (norm + eps)\n\n return mask\n\n\nif __name__ == '__main__':\n import numpy as np\n from scipy.signal import resample_poly\n \n from utils.utils_audio import read_wav, write_wav\n from stft import BatchSTFT, BatchInvSTFT\n \n torch.manual_seed(111)\n \n fft_size, hop_size = 1024, 256\n n_basis = 4\n \n source1, sr = read_wav(\"data/man-44100.wav\")\n source1 = resample_poly(source1, up=16000, down=sr)\n write_wav(\"data/man-16000.wav\", signal=source1, sr=16000)\n T = len(source1)\n \n source2, sr = read_wav(\"data/woman-44100.wav\")\n source2 = resample_poly(source2, up=16000, down=sr)\n write_wav(\"data/woman-16000.wav\", signal=source2, sr=16000)\n \n mixture = source1 + source2\n write_wav(\"data/mixture-16000.wav\", signal=mixture, sr=16000)\n \n stft = BatchSTFT(fft_size=fft_size, hop_size=hop_size)\n istft = BatchInvSTFT(fft_size=fft_size, hop_size=hop_size)\n \n mixture = torch.Tensor(mixture).unsqueeze(dim=0)\n source1 = torch.Tensor(source1).unsqueeze(dim=0)\n source2 = torch.Tensor(source2).unsqueeze(dim=0)\n \n spectrogram_mixture = stft(mixture)\n real = spectrogram_mixture[:,:fft_size//2+1]\n imag = spectrogram_mixture[:,fft_size//2+1:]\n power = real**2+imag**2\n amplitude_mixture = torch.sqrt(power)\n phase_mixture = torch.atan2(imag, real)\n \n spectrogram_source1 = stft(source1)\n real = spectrogram_source1[:,:fft_size//2+1]\n imag = spectrogram_source1[:,fft_size//2+1:]\n power = real**2+imag**2\n amplitude_source1 = torch.sqrt(power)\n \n spectrogram_source2 = stft(source2)\n real = spectrogram_source2[:,:fft_size//2+1]\n imag = spectrogram_source2[:,fft_size//2+1:]\n power = real**2+imag**2\n amplitude_source2 = torch.sqrt(power)\n\n amplitude = torch.cat([amplitude_source1, amplitude_source2], dim=0)\n \n # Ideal binary mask\n mask = ideal_binary_mask(amplitude)\n estimated_amplitude = amplitude * mask\n \n real, imag = estimated_amplitude * torch.cos(phase_mixture), estimated_amplitude * torch.sin(phase_mixture)\n estimated_spectrgram = torch.cat([real, imag], dim=1)\n estimated_signal = istft(estimated_spectrgram, T=T)\n estimated_signal = estimated_signal.detach().cpu().numpy()\n \n for signal, tag in zip(estimated_signal, ['man', 'woman']):\n write_wav(\"data/{}-estimated_IBM.wav\".format(tag), signal=signal, sr=16000)\n \n # Ideal ratio mask\n mask = ideal_ratio_mask(amplitude)\n estimated_amplitude = amplitude * mask\n \n real, imag = estimated_amplitude * torch.cos(phase_mixture), estimated_amplitude * torch.sin(phase_mixture)\n estimated_spectrgram = torch.cat([real, imag], dim=1)\n estimated_signal = istft(estimated_spectrgram, T=T)\n estimated_signal = estimated_signal.detach().cpu().numpy()\n \n for signal, tag in zip(estimated_signal, ['man', 'woman']):\n write_wav(\"data/{}-estimated_IRM.wav\".format(tag), signal=signal, sr=16000)\n \n # Wiener filter like mask\n mask = wiener_filter_mask(amplitude)\n estimated_amplitude = amplitude * mask\n \n real, imag = estimated_amplitude * torch.cos(phase_mixture), estimated_amplitude * torch.sin(phase_mixture)\n estimated_spectrgram = torch.cat([real, imag], dim=1)\n estimated_signal = istft(estimated_spectrgram, T=T)\n estimated_signal = estimated_signal.detach().cpu().numpy()\n \n for signal, tag in zip(estimated_signal, ['man', 'woman']):\n write_wav(\"data/{}-estimated_WFM.wav\".format(tag), signal=signal, sr=16000)\n","sub_path":"src/algorithm/ideal_mask.py","file_name":"ideal_mask.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"541397458","text":"from sklearn.datasets import make_classification\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom bayes_opt import BayesianOptimization\r\n# done\r\nfrom sklearn import svm\r\nfrom spectral import *\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\nfrom sklearn.decomposition import NMF\r\nfrom scipy.optimize import nnls\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.svm import SVC\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom sklearn.model_selection import validation_curve\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.impute import SimpleImputer\r\n\r\n\r\ndef inf_flt(data, label):\r\n p, q = data.shape\r\n for i in range(p-1):\r\n for j in range(q-1):\r\n if data[i, j] == float(\"inf\"):\r\n print('has NaN')\r\n data = np.delete(data, i, axis=0)\r\n label = np.delete(label, i, axis=0)\r\n return data, label\r\n\r\n\r\ndef inf_flt1(data):\r\n p, q = data.shape\r\n for i in range(p-1):\r\n for j in range(q-1):\r\n if data[i, j] == float(\"inf\"):\r\n data = np.delete(data, i, axis=0)\r\n return data\r\n\r\n\r\ndef classify(file_dir, hdr_dir, roi_dir1, roi_dir2, out_dir, key_scale=False):\r\n # time_start = time.time()\r\n if not os.path.isdir(out_dir):\r\n os.makedirs(out_dir)\r\n img = open_image(hdr_dir).load()\r\n m, n, l = img.shape\r\n k = m * n\r\n img_reshape = img.reshape((k, l))\r\n gt1 = cv2.imread(roi_dir1)\r\n gt1 = np.asarray(gt1[:, :, 0])\r\n gt2 = cv2.imread(roi_dir2)\r\n gt2 = np.asarray(gt2[:, :, 0])\r\n label1_value = 255\r\n label2_value = 0\r\n gt1_reshape = gt1.reshape((k, 1))\r\n img1_gt1_list = list(np.hstack((gt1_reshape, img_reshape)))\r\n data1_gt1_list = list(filter(lambda number: number[0] > 127, img1_gt1_list))\r\n data1_gt1_array = np.asarray(data1_gt1_list)\r\n data1 = data1_gt1_array[:, 1:]\r\n count1 = data1.shape[0]\r\n label1 = np.ones(count1)*label1_value\r\n gt2_reshape = gt2.reshape((k, 1))\r\n img2_gt2_list = list(np.hstack((gt2_reshape, img_reshape)))\r\n data2_gt2_list = list(filter(lambda number: number[0] > 127, img2_gt2_list))\r\n data2_gt2_array = np.asarray(data2_gt2_list)\r\n data2 = data2_gt2_array[:, 1:]\r\n count2 = data2.shape[0]\r\n label2 = np.ones(count2)*label2_value\r\n data = np.vstack((data1, data2))\r\n label = np.hstack((label1, label2))\r\n\r\n key_inf = np.isinf(data).any()\r\n if key_inf:\r\n data[np.isinf(data)] = np.nan\r\n # data_inf = np.isinf(data)\r\n # data[data_inf] = 1.2\r\n # print(np.isinf(data).any())\r\n key_nan2 = np.isnan(data).any()\r\n if key_nan2:\r\n my_imputer = SimpleImputer(missing_values=np.nan, strategy=\"mean\")\r\n data = my_imputer.fit_transform(data)\r\n count = data.shape[0]\r\n print('collect samples: ' + str(count))\r\n if key_scale is True:\r\n scaler1 = StandardScaler()\r\n scaler1.fit(data)\r\n data = scaler1.transform(data)\r\n '''\r\n gbdt = GradientBoostingClassifier()\r\n cross_val_score(gbdt, data, label, cv=20, scoring='roc_auc').mean()\r\n\r\n def gbdt_cv(n_estimators, min_samples_split, max_features, max_depth):\r\n res = cross_val_score(\r\n GradientBoostingClassifier(n_estimators=int(n_estimators),\r\n min_samples_split=int(min_samples_split),\r\n max_features=min(max_features, 0.999), # float\r\n max_depth=int(max_depth),\r\n random_state=2\r\n ),\r\n data, label, scoring='roc_auc', cv=20\r\n ).mean()\r\n return res\r\n\r\n gbdt_op = BayesianOptimization(\r\n gbdt_cv,\r\n {'n_estimators': (10, 250),\r\n 'min_samples_split': (2, 25),\r\n 'max_features': (0.1, 0.999),\r\n 'max_depth': (5, 15)}\r\n )\r\n gbdt_op.maximize()\r\n print(gbdt_op.max)\r\n n_estimators = int(gbdt_op.max['params']['n_estimators'])\r\n min_samples_split = int(gbdt_op.max['params']['min_samples_split'])\r\n max_features = min(gbdt_op.max['params']['max_features'], 0.999)\r\n max_depth = int(gbdt_op.max['params']['max_depth'])\r\n clf = GradientBoostingClassifier(n_estimators=n_estimators,\r\n min_samples_split=min_samples_split,\r\n max_features=max_features, # float\r\n max_depth=max_depth,\r\n )\r\n '''\r\n clf = GradientBoostingClassifier()\r\n\r\n clf.fit(data, label)\r\n\r\n for files in os.listdir(file_dir):\r\n # 当前文件夹所有文件\r\n if files.endswith('.hdr'): # 判断是否以.hdr结尾\r\n time_start = time.time()\r\n print('doing img:' + files)\r\n file = file_dir + '\\\\' + files\r\n img2 = open_image(file).load()\r\n m, n, l = img2.shape\r\n k = m * n\r\n img2_reshape = img2.reshape((k, l))\r\n\r\n key_inf2 = np.isinf(img2_reshape).any()\r\n if key_inf2:\r\n img2_reshape[np.isinf(img2_reshape)] = np.nan\r\n\r\n key_nan2 = np.isnan(img2_reshape).any()\r\n if key_nan2:\r\n my_imputer = SimpleImputer(missing_values=np.nan, strategy=\"mean\")\r\n img2_reshape = my_imputer.fit_transform(img2_reshape)\r\n # img2_reshape_inf = np.isinf(img2_reshape)\r\n # img2_reshape[img2_reshape_inf] = 1.2\r\n # img2_reshape_new = np.delete(img2_reshape, np.where(np.isnan(img2_reshape))[0], axis=0)\r\n if key_scale is True:\r\n scaler = StandardScaler()\r\n scaler.fit(img2_reshape)\r\n img2_reshape = scaler.transform(img2_reshape)\r\n clmap = clf.predict(img2_reshape)\r\n res = clmap.reshape((m, n))\r\n (filename, extension) = os.path.splitext(files)\r\n outputway = out_dir + r'\\{a}_GBDT_default_3.jpg'.format(a=filename)\r\n cv2.imwrite(outputway, res)\r\n time_end = time.time()\r\n print('time cost: ', time_end-time_start)\r\n\r\n\r\nif __name__ == '__main__':\r\n imgWay = r'E:\\HE+CAM5\\Cutresult' # 需要分类的图像路径\r\n hdrWay = r'E:\\HE+CAM5\\Cutresult\\HE_CAM52_mono_E_roi1_prePro.hdr' # 用于分类的图像hdr路径\r\n roiWay1 = r'E:\\HE+CAM5\\Cutresult\\roia.jpg' # 用于分类的图像掩膜路径\r\n roiWay2 = r'E:\\HE+CAM5\\Cutresult\\roiz.jpg'\r\n outWay = r'E:\\HE+CAM5\\Cutresult\\gbdtresulta' # 分类后的输出路径\r\n classify(imgWay, hdrWay, roiWay1, roiWay2, outWay)\r\n '''\r\n from bayes_opt import BayesianOptimization\r\n\r\n x, y = make_classification(n_samples=2500, n_features=10, n_classes=2)\r\n gbdt = GradientBoostingClassifier()\r\n cross_val_score(gbdt, x, y, cv=20, scoring='roc_auc').mean()\r\n\r\n\r\n def gbdt_cv(n_estimators, min_samples_split, max_features, max_depth):\r\n res = cross_val_score(\r\n GradientBoostingClassifier(n_estimators=int(n_estimators),\r\n min_samples_split=int(min_samples_split),\r\n max_features=min(max_features, 0.999), # float\r\n max_depth=int(max_depth),\r\n random_state=2\r\n ),\r\n x, y, scoring='roc_auc', cv=20\r\n ).mean()\r\n return res\r\n\r\n gbdt_op = BayesianOptimization(\r\n gbdt_cv,\r\n {'n_estimators': (10, 250),\r\n 'min_samples_split': (2, 25),\r\n 'max_features': (0.1, 0.999),\r\n 'max_depth': (5, 15)}\r\n )\r\n gbdt_op.maximize()\r\n print(gbdt_op.max)\r\n n_estimators = int(gbdt_op.max['params']['n_estimators'])\r\n min_samples_split = int(gbdt_op.max['params']['min_samples_split'])\r\n max_features = min(gbdt_op.max['params']['max_features'], 0.999)\r\n max_depth = int(gbdt_op.max['params']['max_depth'])\r\n clf1 = GradientBoostingClassifier(n_estimators=n_estimators,\r\n min_samples_split=min_samples_split,\r\n max_features=max_features, # float\r\n max_depth=max_depth,\r\n )\r\n clf2 = GradientBoostingClassifier()\r\n X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)\r\n # clf = GradientBoostingClassifier(random_state=0)\r\n clf1.fit(X_train, y_train)\r\n result1 = clf1.predict(X_test)\r\n print('Bayes: ', clf1.score(X_test, y_test))\r\n clf2.fit(X_train, y_train)\r\n result2 = clf2.predict(X_test)\r\n print('default: ', clf2.score(X_test, y_test))\r\n # 创建新的figure\r\n fig = plt.figure()\r\n ax = fig.add_subplot(221)\r\n ax.scatter(x[:, 0], x[:, 1], c=y)\r\n ax = fig.add_subplot(222)\r\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test)\r\n ax = fig.add_subplot(223)\r\n ax.scatter(X_test[:, 0], X_test[:, 1], c=result1)\r\n ax = fig.add_subplot(224)\r\n ax.scatter(X_test[:, 0], X_test[:, 1], c=result2)\r\n plt.show()\r\n '''\r\n\r\n\r\n\r\n","sub_path":"hyperClassifier/GradientBoostingClassifier.py","file_name":"GradientBoostingClassifier.py","file_ext":"py","file_size_in_byte":9193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"55012374","text":"#!/usr/bin/env python\n\n\"\"\"\nProvided by Nick Higginbottom. Thanks!\n\"\"\"\n\n\nimport sys\nimport numpy as np\nfrom astropy import constants as c\nimport pyPLUTO as pp\n\nGAMMA= 5.0 / 3.0\n\n\n#First get scaling factorz from the definitions file\n\ninp=open('definitions.h','ro')\nfor line in inp.readlines():\n\tdata=line.split()\n\tif len(data)>1:\n\t\tif data[1]=='UNIT_DENSITY':\n\t\t\tUNIT_DENSITY=float(data[2])\n\t\telif data[1]=='UNIT_LENGTH':\n\t\t\tUNIT_LENGTH=float(data[2])\n\t\telif data[1]=='UNIT_VELOCITY':\n\t\t\tUNIT_VELOCITY=float(data[2])\n\n#Compute deived scaling factors\n\nUNIT_MASS=(UNIT_DENSITY*UNIT_LENGTH*UNIT_LENGTH*UNIT_LENGTH)\nUNIT_ACCELERATION=(UNIT_VELOCITY*UNIT_VELOCITY/UNIT_LENGTH)\nUNIT_FORCE=(UNIT_MASS*UNIT_ACCELERATION)\nUNIT_TIME=(UNIT_LENGTH/UNIT_VELOCITY)\nUNIT_PRESSURE=(UNIT_DENSITY*UNIT_VELOCITY*UNIT_VELOCITY)\n\n#Compute the numeber that transforms from pressure to temperature\n\nKELVIN=UNIT_VELOCITY*UNIT_VELOCITY*c.m_p.cgs/c.k_B.cgs\n\n\ninp.close()\n\n\n#open the actual data file\n\nfname=int(sys.argv[1])\nD=pp.pload(fname)\n\n\n#Get the pressure and density\n\ndensity=np.transpose(D.rho)*UNIT_DENSITY\npressure=np.transpose(D.prs)*UNIT_PRESSURE\n\n#Compute the internal energy from the pressure\n\nenergy=pressure/(GAMMA - 1.)\n\n#Compute/get number densities\n\n#nd=density/(1.43*c.m_p.value)\ntry:\n\tne=np.transpose(D.ne)\n\tnh=np.transpose(D.nh)\nexcept:\n\tprint(\"No ne or nh fields, using 1.43 as scaling to nh\")\n\tnh=density/(1.43*c.m_p.cgs).value\n\tne=nh*1.21\n\n#Get the velocities\n\nv_r=np.transpose(D.vx1)*UNIT_VELOCITY\nv_t=np.transpose(D.vx2)*UNIT_VELOCITY\nv_p=np.transpose(D.vx3)*UNIT_VELOCITY\n\n#And compute the speed\n\nv=np.sqrt(v_r**2+v_t**2)\n\n#Get the cooling rates (if here)\n\ntry:\n\tline_c=np.transpose(D.line_c)\n\txray_h=np.transpose(D.xray_h)\n\tcomp_c=np.transpose(D.comp_c)\n\tcomp_h=np.transpose(D.comp_h)\n\tbrem_c=np.transpose(D.brem_c)\nexcept:\n\tprint(\"No cooling rate info\")\n\ntry:\n\tline_c_pre=np.transpose(D.line_c_pre)\n\txray_h_pre=np.transpose(D.xray_h_pre)\n\tcomp_c_pre=np.transpose(D.comp_c_pre)\n\tcomp_h_pre=np.transpose(D.comp_h_pre)\n\tbrem_c_pre=np.transpose(D.brem_c_pre)\nexcept:\n\tprint(\"No cooling rate prefactors\")\n\n\n#Get optcially thin ionization parameter if here\n\ntry:\n\txi=np.transpose(D.XI)\nexcept:\n\tprint(\"No ionization parameter\")\n\t\n#Get temperature - if present or calculate it\n\t\ntry:\n\ttemperature=np.transpose(D.T)\nexcept:\n\tprint(\"No temperature data - computing\")\n\ttemperature=pressure/UNIT_PRESSURE*KELVIN*0.6/(density/UNIT_DENSITY)\n\n#Get the geometric quantities\n\nr=D.x1*UNIT_LENGTH\ntheta=D.x2\n","sub_path":"pluto/pluto_open.py","file_name":"pluto_open.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"642371775","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-08-11 13:53:32\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os, sys, platform, time, json, re\nimport requests\nfrom requests.utils import cookiejar_from_dict\nfrom http.cookiejar import LWPCookieJar\nfrom termcolor import colored\nfrom bs4 import BeautifulSoup\n\n\nif platform.system() == \"Windows\":\n\timport colorama\n\tcolorama.init()\n\n\nclass Logging(object):\n\tflag = True\n\n\t@staticmethod\n\tdef error(msg):\n\t\tif Logging.flag:\n\t\t\tprint(\"\".join([colored(\"ERROR\", \"red\"), \": \", colored(msg, \"white\")]))\n\n\t@staticmethod\n\tdef warn(msg):\n\t\tif Logging.flag:\n\t\t\tprint(\"\".join([colored(\"WARN\", \"yellow\"), \": \", colored(msg, \"white\")]))\n\n\t@staticmethod\n\tdef info(msg):\n\t\tif Logging.flag:\n\t\t\tprint(\"\".join([colored(\"INFO\", \"magenta\"), \": \", colored(msg, \"white\")]))\n\n\t@staticmethod\n\tdef debug(msg):\n\t\tif Logging.flag:\n\t\t\tprint(\"\".join([colored(\"DEBUG\", \"magenta\"), \": \", colored(msg, \"white\")]))\n\n\t@staticmethod\n\tdef success(msg):\n\t\tif Logging.flag:\n\t\t\tprint(\"\".join([colored(\"SUCCESS\", \"magenta\"), \": \", colored(msg, \"white\")]))\n\n\n\nLogging.flag = True\n\n\nclass LoginPasswordError(Exception):\n\tdef __init__(self, message):\n\t\tself.message = \"账号密码错误\" if not isinstance(message, str) or message == \"\" else message\n\t\tLogging.error(self.message)\n\n\n\nclass NetworkError(Exception):\n\tdef __init__(self, message):\n\t\tself.message = \"网络异常\" if not isinstance(message, str) or message == \"\" else message\n\t\tLogging.error(self.message)\n\n\nclass Accounterror(Exception):\n\tdef __init__(self, message):\n\t\tself.message = \"账号类型错误\" if not isinstance(message, str) or message == \"\" else message\n\t\tLogging.error(self.message)\n\n\nclass Login(object):\n\tdef __init__(self):\n\t\tself._session = requests.session()\n\n\t\tself._headers = {\n\t\t\t\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36\",\n\t\t\t\"Host\": \"www.zhihu.com\",\n\t\t\t\"Origin\": \"http://www.zhihu.com\",\n\t\t\t\"Referer\": \"http://www.zhihu.com/\"\n\t\t}\n\t\tself._captcha = \"\"\n\n\tdef login(self, data, url=None):\n\t\tif re.match(\"^1{0-9}{10}$\", data[\"account\"]):\n\t\t\taccount_type = \"phone_num\"\n\t\t\turl_login = url or \"https://www.zhihu.com/login/phone_num\"\n\t\telif re.match(\".+@.+\\.com\", data[\"account\"]):\n\t\t\taccount_type = \"email\"\n\t\t\turl_login = url or \"https://www.zhihu.com/login/email\"\n\t\telse:\n\t\t\traise Accounterror(\"账号类型错误\")\n\n\t\tself._data = {\n\t\t\t\"_xsrf\": self._session.cookies.get(\"_xsrf\", \"\"),\n\t\t\t\"password\": data.get(\"password\", \"\"),\n\t\t\t\"captcha\": self._captcha,\n\t\t\t\"remember_me\": \"true\",\n\t\t\taccount_type: data.get(\"account\", \"\")\n\t\t}\n\t\tself._headers[\"X-Xsrftoken\"] = self._session.cookies.get(\"_xsrf\", \"\")\n\t\tself._r = self._session.post(url_login, data=self._data, headers=self._headers)\n\t\tif self._r.status_code != 200:\n\t\t\traise NetworkError(\"提交数据失败\")\n\t\telse:\n\t\t\tself._response_json = json.loads(self._r.content.decode(\"utf-8\"))\n\t\t\tif self._response_json[\"r\"] == 0:\n\t\t\t\tLogging.success(self._response_json[\"msg\"])\n\t\t\t\t# save cookies\n\t\t\t\tlwpcookie = LWPCookieJar('cookie.txt')\n\t\t\t\tcookiejar_from_dict({ c.name: c.value for c in self._session.cookies}, lwpcookie)\n\t\t\t\tlwpcookie.save(ignore_discard=True)\n\t\t\telse:\n\t\t\t\tif self._response_json[\"errcode\"] in [1991829, 100005]:\n\t\t\t\t\tLogging.error(self._response_json[\"msg\"])\n\t\t\t\t\tself.get_captcha()\n\t\t\t\t\tself.login()\n\t\t\t\telse:\n\t\t\t\t\tLogging.error(\"未知的错误\")\n\n\tdef get_captcha(self, url=None):\n\t\turl_captcha = url or \"https://www.zhihu.com/captcha.gif\"\n\t\tparams = {\n\t\t\t\"r\": str(int(time.time()*1000)),\n\t\t\t\"type\": \"login\"\n\t\t}\n\t\tself._r = self._session.get(url_captcha, params=params, headers=self._headers)\n\t\tif self._r.status_code != 200:\n\t\t\traise NetworkError(\"验证码请求失败\")\n\t\tcaptcha_name = \"captcha.png\"\n\t\twith open(captcha_name, \"wb\") as f:\n\t\t\tf.write(self._r.content)\n\t\tprint(\"验证码下载成功, 正在打开验证码...\")\n\n\t\t# 打开验证码图���\n\t\tif platform.system() == \"Linux\":\n\t\t\tLogging.info(\"Command: xdg-open %s &\" % captcha_name )\n\t\t\tos.system(\"xdg-open %s &\" % captcha_name )\n\t\telif platform.system() == \"Darwin\":\n\t\t\tLogging.info(\"Command: open %s &\" % captcha_name )\n\t\t\tos.system(\"open %s &\" % captcha_name )\n\t\telif platform.system() in (\"SunOS\", \"FreeBSD\", \"Unix\", \"OpenBSD\", \"NetBSD\"):\n\t\t\tos.system(\"open %s &\" % captcha_name )\n\t\telif platform.system() == \"Windows\":\n\t\t\tos.system(\"%s\" % captcha_name )\n\t\telse:\n\t\t\tprint(\"我们无法探测你的作业系统,请自行打开验证码 %s 文件,并输入验证码。\" % os.path.join(os.getcwd(), captcha_name))\n\t\t\n\t\tself._captcha = input(\"请输入验证码: \")\n\t\treturn self._captcha\n\n\tdef login_xsrf(self, url=None):\n\t\turl_xsrf = url or \"http://www.zhihu.com\"\n\t\tself._r = self._session.get(url_xsrf, headers=self._headers)\n\t\tif self._r.status_code != 200:\n\t\t\tprint(self._r.status_code)\n\t\t\traise NetworkError(\"访问知乎失败\")\n\n\nif __name__ == \"__main__\":\n\tdata={\n\t\t\"password\": \"xxx\",\n\t\t\"account\": \"xxx\"\n\t}\n\tlogin = Login()\n\tlogin.login_xsrf()\n\tlogin.get_captcha()\n\tlogin.login(data=data)\n\n","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"623307089","text":"\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import NamedStyle, Font, Alignment, PatternFill, Border, Side\nfrom openpyxl.formatting.rule import ColorScaleRule, CellIsRule, FormulaRule\nimport pandas as pd\nimport sys\nimport os\nimport configparser\n\ndef format(file,indices='auto'):\n def format_3x2(ignore_sheets='Sheet1'):\n pass\n\n def format_2x2(ignore_sheets='Sheet1'):\n pass\n\n config = configparser.ConfigParser()\n config.read('excel_style_formats.ini')\n\n # CONFIG PARAMETERS\n\n # numeric formats\n nb_format = config['data columns formatting']['format']\n \n # overall grandtotals formats\n current_section = config['overall grand totals formatting']\n overall_grandtotal_fill=PatternFill(start_color=current_section['fill_color'],\n fill_type=current_section['fill_type'])\n overall_grandtotal_font=Font(bold=bool(current_section['bold']),\n size=current_section['size'],\n color=current_section['color'])\n\n # grandtotals formats\n current_section = config['grand totals formatting']\n grandtotal_fill=PatternFill(start_color=current_section['fill_color'],\n fill_type=current_section['fill_type'])\n grandtotal_font=Font(bold=bool(current_section['bold']),\n size=current_section['size'],\n color=current_section['color'])\n\n # subtotals formats\n current_section = config['sub totals formatting']\n subtotal_fill=PatternFill(start_color=current_section['fill_color'],\n fill_type=current_section['fill_type'])\n subtotal_font=Font(bold=bool(current_section['bold']),\n size=current_section['size'],\n color=current_section['color'])\n\n # column widths\n data_width = int(config['widths']['data'])\n col1_width = int(config['widths']['col1'])\n col2_width = int(config['widths']['col2'])\n col3_width = int(config['widths']['col3'])\n\n # borders\n bd_thin = Side(style='thin', color='000000')\n\n # headers\n current_section = config['headers formatting']\n header_alignment = Alignment(wrapText=current_section['wrap_text'],\n horizontal=current_section['horizontal_align'],\n vertical=current_section['vertical_align'])\n header_font = Font(size=current_section['size'],bold=bool(current_section['bold']),color=current_section['color'])\n header_fill = PatternFill(start_color=current_section['fill'],fill_type=current_section['fill_type'])\n\n # sub headers\n current_section = config['sub headers formatting']\n sub_header_alignment = Alignment(wrapText=current_section['wrap_text'],\n horizontal=current_section['horizontal_align'],\n vertical=current_section['vertical_align'])\n sub_header_font = Font(size=current_section['size'],bold=bool(current_section['bold']),color=current_section['color'])\n sub_header_fill = PatternFill(start_color=current_section['fill'],fill_type=current_section['fill_type'])\n\n alphabet = [0,'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',\n 'AA','AB','AC','AD','AE','AF','AG','AH','AI','AJ','AK','AL','AM','AN','AO','AP','AQ','AR','AS','AT','AU']\n\n wb = load_workbook(filename = 'Final Forecasts_FY2020.xlsx')\n\n for sheet in wb.sheetnames:\n current_sheet = wb[sheet]\n # ignore raw data sheet\n if sheet == 'raw':\n continue\n\n if sheet in ['Final Forecast','Original SARIMA Predictions']:\n indices = (2,2)\n min_row = 4\n else:\n indices = (2,1)\n min_row = 2\n\n min_col = 3\n max_row = 1000\n\n # SET INDEX COLUMN WIDTHS\n current_sheet.column_dimensions[alphabet[1]].width = col1_width\n current_sheet.column_dimensions[alphabet[2]].width = col2_width\n\n\n # FIND NUMBER OF NUMERIC COLUMNS\n column_dict = {} # dictionary where key is numeric position of first numeric column, value is a list: [alphabetical representation of column position, desired format]\n c=0 # count through the columns and add 1 to align the indices\n for column in current_sheet.iter_cols(min_row=min_row,\n max_row=min_row, # only searching through the first row that has numeric data (non-column rows)\n min_col=min_col, # start where we know the row indices ends\n max_col=(len(alphabet)-1)): # end where the alphabet list ends until we know the actual number of columns\n if column[0].value != None:\n column_dict[min_col+c] = [alphabet[min_col+c],fy_format]\n c+=1\n else:\n column_dict[min_col+c-1] = [alphabet[min_col+c-1],yoy_format]\n break # stop after we found the end of the excel sheet\n\n # FORMAT NUMERIC CELLS\n c = min_col # begin with first numeric column\n max_col = list(column_dict.keys())[-1] # set flag where the columns end\n for column in current_sheet.iter_cols(min_row=min_row,\n max_row=max_row,\n min_col=min_col,\n max_col=max_col): \n for i in range(max_row-min_row):\n if column[i].value != None:\n column[i].number_format = column_dict[c][1] # format each numeric value appropriately\n else:\n break\n c+=1\n\n # BOLD AND COLOR ROWS WITH TOTALS\n r=min_row # iterate through rows\n max_row = i+min_row-1 # set flag where last row is in the sheet\n # this loop should accurately represent the actual sheet dimensions now that they are known\n for row in current_sheet.iter_rows(min_row=min_row,\n max_row=max_row,\n min_col=min_col-1,\n max_col=max_col):\n if row[0].value == 'Total':\n if sheet in ['Final Forecast','Original SARIMA Predictions']:\n # sub totals\n for i in range(max_col-min_col+1):\n row[i+1].font = subtotal_font\n row[i+1].fill = subtotal_fill\n \n if r == max_row:\n if sheet in ['Final Forecast','Original SARIMA Predictions']:\n # totals\n for i in range(max_col-min_col+1):\n row[i+1].font = overall_grandtotal_font\n row[i+1].fill = overall_grandtotal_fill\n for i in range(max_col-min_col+1):\n row[i+1].border = Border(bottom=bd_thin)\n r+=1\n\n # ADD BORDERS AROUND EVERY NUMERIC COLUMN\n c = 1\n for column in current_sheet.iter_cols(min_row=1,\n max_row=max_row,\n min_col=min_col,\n max_col=max_col):\n for r in range(max_row):\n if r == indices[1]-1:\n column[r].border = Border(bottom=bd_thin,top=bd_thin,left=bd_thin,right=bd_thin) \n elif r < max_row-1:\n column[r].border = Border(right=bd_thin)\n else:\n column[r].border = Border(bottom=bd_thin,right=bd_thin) \n c+=1\n\n # SPACING\n for i in column_dict:\n current_sheet.column_dimensions[column_dict[i][0]].width = data_width\n\n # HEADER FORMATS\n for column in current_sheet.iter_cols(min_row=1,\n max_row=1,\n min_col=1,\n max_col=max_col):\n column[0].alignment = header_alignment\n column[0].font = header_font\n column[0].fill = header_fill\n\n # SUB HEADER FORMATS\n if indices[1] > 1:\n for column in current_sheet.iter_cols(min_row=2,\n max_row=2,\n min_col=min_col,\n max_col=max_col):\n column[0].alignment = sub_header_alignment\n column[0].font = sub_header_font\n column[0].fill = sub_header_fill\n\n # MERGE ROWS\n for i in range(indices[0]):\n if current_sheet['{}{}'.format(alphabet[i+1],min_row-1)].value != None:\n current_sheet['{}{}'.format(alphabet[i+1],1)].value = current_sheet['{}{}'.format(alphabet[i+1],min_row-1)].value\n current_sheet.merge_cells('{}{}:{}{}'.format(alphabet[i+1],1,alphabet[i+1],min_row-1))\n\n current_sheet['A1'].value = 'LOB'\n if sheet in ['Final Forecast','Original SARIMA Predictions']:\n current_sheet['B1'].value = 'Store'\n else:\n current_sheet['B1'].value = 'Category'\n\n # ADD BORDERS AROUND THE INDEX COLUMNS\n c = 1\n for column in current_sheet.iter_cols(min_row=1,\n max_row=max_row,\n min_col=1,\n max_col=min_col-1):\n for r in range(max_row):\n column[r].border = Border(bottom=bd_thin,top=bd_thin,left=bd_thin,right=bd_thin) \n\n wb.save('Final Forecasts_FY2020.xlsx')\n\nif __name__ == '__main__':\n main()\n\ndef format_2x1(pivot_tbl):\n\tpass\ndef format_2x2(pivot_tbl):\n\tpass\ndef format_3x1(pivot_tbl):\n\tpass\ndef format_3x2(pivot_tbl):\n\tpass","sub_path":"format_excel_pivot_tbls/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":9469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"392485717","text":"from collections import deque\n# import sys\n# sys.stdin = open('input.txt')\n\nN, M, x, y, num = map(int, input().split())\n\narr = [list(map(int, input().split())) for _ in range(N)]\n\n# 처음엔 모든 면이 0\ndice = [0] * 6\n\n# 바닥, 좌, 위, 우, 앞, 뒤\n# 오른쪽으로 굴림 -> 앞, 뒤 고정\n# 위 -> 우, 좌-> 위, 우->바닥, 바닥->좌\n# 왼쪽은 오른쪽 반대\n# 위로 굴림 ->좌, 우 고정\n# 바닥 -> 앞, 위 -> 뒤, 앞-> 위, 뒤-> 바닥\ndef move(dir):\n if dir == 1:\n dice[0], dice[1], dice[2], dice[3] = dice[3], dice[0], dice[1], dice[2]\n elif dir == 2:\n dice[0], dice[1], dice[2], dice[3] = dice[1], dice[2], dice[3], dice[0]\n elif dir == 3:\n dice[0], dice[2], dice[4], dice[5] = dice[5], dice[4], dice[0], dice[2]\n else:\n dice[0], dice[2], dice[4], dice[5] = dice[4], dice[5], dice[2], dice[0]\n\ncommand = deque(list(map(int, input().split())))\ndelta = {1:(0, 1), 2:(0, -1), 3:(-1, 0), 4:(1, 0)}\nwhile command:\n dir = command.popleft()\n dx, dy = delta[dir]\n nx, ny = x + dx, y + dy\n if 0 <= nx < N and 0 <= ny < M:\n move(dir)\n if arr[nx][ny] == 0:\n arr[nx][ny] = dice[0]\n\n else:\n dice[0] = arr[nx][ny]\n arr[nx][ny] = 0\n x, y = nx, ny\n print(dice[2])","sub_path":"Algorithm/BOJ/[14499] 주사위 굴리기.py","file_name":"[14499] 주사위 굴리기.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"493756123","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2018。3.29\n@author: Wu.Xin\n主要用于win窗口的操作\n'''\n\nfrom ctypes import *\nimport datetime\nimport tempfile\nimport time\nfrom ubpa import encrypt\nfrom ubpa.iconstant import *\nfrom ubpa.ierror import *\nfrom ubpa.ilog import ILog, RpaServer\nfrom ubpa.itools.http_sender import *\nimport ubpa.base_native_ait as nit\nimport getpass\nimport json\nfrom urllib import request\nimport configparser\nimport chardet\nimport requests\n\n__logger = ILog(__file__)\n\ndll = windll.LoadLibrary(\"../Com.Isearch.Func.AutoIt/AutoItX3.dll\") # 调AutoItX3动态库\n\n#dll = windll.LoadLibrary(r\"D:\\svn\\isa\\branches\\ueba_5.0\\makesetup\\CdaSetupDate\\plugin\\Com.Isearch.Func.AutoIt\\AutoItX\\AutoItX3.dll\")\n\n'''\n 激活某个窗口\nwin_title :窗口标题\n返回: True:成功\n\n'''\ndef do_win_activate(win_title=\"\", win_text=\"\", waitfor=WAIT_FOR):\n __logger.debug('do_win_activate :[' + win_title + ']')\n try:\n starttime = time.time()\n dll.AU3_Opt(\"WinTitleMatchMode\", 2)\n while True:\n \n rst = dll.AU3_WinActivate(win_title, win_text)\n \n if rst == 1:\n return True\n else:\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('do_win_activate error:[' + win_title + ']')\n raise WinNotFoundError('do_win_activate error:[' + win_title + ']')\n time.sleep(TRY_INTERVAL)\n except Exception as e:\n raise e\n\n\n'''\n 检查指定窗口是否存在且被激活\nwin_title :窗口标题\n\n返回: True:成功 False:失败\n\n'''\ndef do_win_is_active(win_title=\"\", win_text=\"\", waitfor=1):\n __logger.debug('do_win_is_active:[' + win_title + ']')\n try:\n starttime = time.time()\n dll.AU3_Opt(\"WinTitleMatchMode\", 2)\n while True:\n rst = dll.AU3_WinActive(win_title, win_text)\n if rst == 1:\n return True\n else:\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('do win is not active :[' + win_title + ']')\n return False\n time.sleep(TRY_INTERVAL)\n except Exception as e:\n raise e\n\n'''\n 最大化窗口\nwin_title : 窗口标题\nwin_text :窗口文本\nwaitfor :超时\n返回: True:成功 False:失败\n\n'''\ndef do_win_maximize(win_title='', win_text=None, waitfor=WAIT_FOR):\n __logger.debug('do_win_maximize:[' + win_title + ']')\n\n try:\n starttime = time.time()\n dll.AU3_Opt(\"WinTitleMatchMode\", 2)\n while True:\n rst = dll.AU3_WinSetState(win_title, \"\", 3)\n if rst == 1:\n return True\n else:\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('window maximize error:[' + win_title + ']')\n raise WinNotFoundError('window maximize error:[' + win_title + ']')\n time.sleep(TRY_INTERVAL)\n except Exception as e:\n raise e\n\n\n'''\n 最小化窗口\nwin_title : 窗口标题\nwin_text :窗口文本\nwaitfor :超时\n返回: True:成功 False:失败\n\n'''\ndef do_win_minimize(win_title='', win_text=None, waitfor=WAIT_FOR):\n __logger.debug('do_win_minimize:[' + win_title + ']')\n try:\n starttime = time.time()\n dll.AU3_Opt(\"WinTitleMatchMode\", 2)\n while True:\n rst = dll.AU3_WinSetState(win_title, \"\", 6)\n if rst == 1:\n return True\n else:\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('do_win_minimize error:[' + win_title + ']')\n raise WinNotFoundError('do_win_minimize error:[' + win_title + ']')\n time.sleep(TRY_INTERVAL)\n except Exception as e:\n raise e\n\n\n'''\n 关闭窗口\nwin_title : 窗口标题\nwin_text :窗口文本\nwaitfor :超时\n返回: True:成功 False:失败\n\n'''\ndef do_win_close(win_title=\"\", win_text=None, waitfor=WAIT_FOR):\n __logger.debug('do_win_close:[' + win_title + ']')\n try:\n starttime = time.time()\n dll.AU3_Opt(\"WinTitleMatchMode\", 2)\n if win_title != \"\":\n while True:\n rst = dll.AU3_WinClose(win_title, \"\")\n if rst == 1:\n return True\n else:\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('do_win_close error:[' + win_title + ']')\n raise WinNotFoundError('do_win_close error:[' + win_title + ']')\n time.sleep(TRY_INTERVAL)\n else:\n __logger.debug('win_title is null')\n except Exception as e:\n raise e\n\n\n'''\n 强行关闭指定窗口\nwin_title : 窗口标题\nwin_text :窗口文本\nwaitfor :超时\n返回: True:成功 False:失败\n\n'''\ndef do_win_kill(win_title=\"\", win_text=None, waitfor=WAIT_FOR):\n __logger.debug('ready to excute:[' + win_title + ']')\n try:\n starttime = time.time()\n dll.AU3_Opt(\"WinTitleMatchMode\", 2)\n if win_title != \"\":\n while True:\n rst = dll.AU3_WinKill(win_title, \"\")\n if rst == 1:\n return True\n else:\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('do_win_kill error:[' + win_title + ']')\n raise WinNotFoundError('do_win_kill error:[' + win_title + ']')\n time.sleep(TRY_INTERVAL)\n else:\n __logger.debug('win_title is null')\n except Exception as e:\n raise e\n\n\n'''\n组装au3的代码\n'''\ndef pack_au3_data():\n pre_msg = \"#include \" \\\n + '\\n' + \"Local $str = ''\" \\\n + '\\n' + \"Local $aList = WinList()\" \\\n + '\\n' + \"For $i = 1 To $aList[0][0]\" \\\n + '\\n' + \" If $aList[$i][0] <> '' And BitAND(WinGetState($aList[$i][1]), 2) Then\" \\\n + '\\n' + \" $str = $str &','& $aList[$i][0]\" \\\n + '\\n' + \" EndIf\" \\\n + '\\n' + \"Next\" \\\n + '\\n' + 'ConsoleWrite($str)'\n return pre_msg\n\n\n'''\n遍历窗口标题\n'''\ndef do_win_list():\n __logger.debug('win_title_list')\n try:\n msg = pack_au3_data() ##组装生成au3所需的数据\n tmp_au3_file_path = nit.gen_au3_file(msg) # 生成XXX.au3文件并返回路径\n status, error_string, stdout_string = nit.run_autoit(tmp_au3_file_path)\n nit.cleanup(tmp_au3_file_path)\n plist = str(get_win_list_string(stdout_string))\n return plist\n except Exception as e:\n raise e\n\ndef get_win_list_string(msg_string):\n return u' '.join(\n line for line in msg_string.decode('utf-8').splitlines()\n ).strip()\n\ndef do_process_close(pcocess=None):\n '''\n 关闭进程\n '''\n __logger.debug('Ready to close the application')\n try:\n while True:\n p_exist = dll.AU3_ProcessExists(pcocess)\n if p_exist == 0 :\n break\n elif p_exist != 0 :\n rst = dll.AU3_ProcessClose(pcocess)\n if rst != 1:\n break\n except Exception as e:\n raise e\n\n\ndef unlock_screen(uname,upass,try_times=3, esc_wait_time=2000,next_wait_time=2000):\n ldll = windll.LoadLibrary(\"../Com.Isearch.Driver.WinIO/RpaAutoLogin.dll\")\n upass = encrypt.decrypt(upass)\n# ldll = windll.LoadLibrary(r\"D:\\svn\\isa\\branches\\ueba_5.0\\makesetup\\CdaSetupDate\\plugin\\Com.Isearch.Driver.WinIO\\RpaAutoLogin.dll\")\n result = ldll.do_autologin(uname,upass,try_times,esc_wait_time,next_wait_time)\n return result\n\n\ndef unlock_screen_remote(uname='',domain='',upass='',addr='',port=3389,try_interval=2, waitfor=60):\n try:\n screen_dll = windll.LoadLibrary(\"../Com.Isearch.Func.ScreenLock/ScreenLockCheck.dll\")\n char_name = screen_dll.GetCurrentUsername() # 调用dll 获取 uname\n uname = string_at(char_name, -1).decode('utf-8')\n screen_dll.FreePointer(char_name)\n\n char_domain = screen_dll.GetCurrentDomain()\n domain = string_at(char_domain, -1).decode('utf-8') # 调用dll 获取 domain\n screen_dll.FreePointer(char_domain)\n\n rpasever = RpaServer()\n agentUUID = rpasever.AgentUUID\n mainServer = rpasever.MainServer\n webServicePort = rpasever.WebServicePort\n\n starttime = time.time()\n has_send_http_flag = False\n while True:\n screen_status = is_screen_locked()\n if screen_status == 0:\n __logger.debug('The screen is now unlocked')\n return True\n else:\n if has_send_http_flag == False: # 未成功发送过http请求\n upass = encrypt.decrypt(upass)\n data = {\"msg_type\":\"rpa\", \"a\": \"conn_desk\", \"agent_no\": agentUUID, \"user_name\": uname, \"user_pass\": upass,\n \"addr\": addr, \"port\": str(port), \"scale\": \"100\", \"resolution\": \"widthXheight\",\n \"timeout\": \"60\",\"user_domain\":domain}\n http_url = 'http://' + str(mainServer) + \":\" + str(webServicePort)+'/wservice.action'\n whole_request_url = http_url + '?jsonStr=' + str(data)\n res = requests.get(whole_request_url)\n dict = json.loads(str(res.text))\n status = dict['status']\n if status == '0':\n has_send_http_flag = True\n __logger.debug('Has sent a screen request')\n else: # 成功发送过http请求\n runtime = time.time() - starttime\n if runtime >= waitfor:\n __logger.debug('Operation timeout')\n raise Exception\n time.sleep(try_interval)\n except Exception as e:\n raise e\n\n\ndef is_screen_locked():\n '''\n 调用dll 判断是否为锁屏状态\n :return: 0 (int) 未锁屏状态\n 1 (int) 锁屏状态\n '''\n screen_dll = windll.LoadLibrary(\"../Com.Isearch.Func.ScreenLock/ScreenLockCheck.dll\")\n result = screen_dll.IsScreenLock()\n return result\n\ndef oper_lock(tip_show=1,timeout=0):\n '''\n 鼠标键盘操作锁定\n NONE tip_show=0\n LEFT_TOP tip_show=1\n RIGHT_TOP tip_show=2\n LEFT_BOTTOM tip_show=3\n RIGHT_BOTTOM tip_show=4\n\n '''\n oper_lock_dll = windll.LoadLibrary(\"../Com.Isearch.DesktopOperLock/DesktopOperLock.dll\")\n oper_lock_dll.Lock(tip_show,timeout)\n\n\n\n# do_win_close(\"记事本\")","sub_path":"RPA/组件/i-search/ubpa/iwin.py","file_name":"iwin.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"331834843","text":"class Result:\r\n\r\n def __init__(self, bagrout, sechem, accepted, opt_bagrout=None):\r\n self.avg_bagrut_score = bagrout\r\n self.opt_bargout = opt_bagrout\r\n self.sechem_score = sechem\r\n self.accepted_profs = accepted\r\n\r\n def __str__(self):\r\n str = ''\r\n str += 'AVG Bagrut score is: ' + self.avg_bagrut_score\r\n if self.sechem_score:\r\n str += ', Sechem score is: ' + self.sechem_score\r\n if self.opt_bargout:\r\n str += 'Optimal AVG bagrut score is:' + self.opt_bargout\r\n str += ', list of accepted majors is: '\r\n # str += self.accepted_profs\r\n return str","sub_path":"Tziunim_server/Result.py","file_name":"Result.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"173272650","text":"from django.shortcuts import render, redirect\nfrom .models import Task\nfrom .forms import AddTask\nfrom django.views.generic import ListView\n\n# View for Tasks\n\nclass TaskListView(ListView):\n\tqueryset = Task.objects.all()\n\ttemplate_name = \"task_page.html\"\n\n\tdef get_queryset(self, *args, **kwargs):\n\t\trequest = self.request\n\t\treturn Task.objects.all()\n\n# User Add Task View\ndef add_task(request):\n\tif not request.user.is_authenticated():\n\t\tredirect('/home/')\n\n\tqueryset = Task.objects.all()\n\tif request.method=='POST':\n\t\tform = AddTask(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tsubject_choice = form.cleaned_data.get(\"subject_choice\")\n\t\t\ttitle = form.cleaned_data.get(\"title\")\n\t\t\tbook = form.cleaned_data.get(\"book\")\n\t\t\tfile = form.cleaned_data.get(\"file\")\n\t\t\tuploader = request.user.username\n\t\t\ttask = Task(title=title, subject_choice=subject_choice, book=book, uploader=uploader, file=request.FILES['file'])\n\t\t\ttask.save()\n# if form.cleaned_data.get(\"file\") is None:\n\n# else:\n\t\t\treturn redirect('/home/')\n\n\telse:\n\t\tform = AddTask()\n\n\treturn render(request, \"add_task.html\", { \"form\":form })","sub_path":"src/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"230221337","text":"# This file is used to install the dependencies for runPnoise and runBSIM.\n# You can get the dependencies using either of the following commands:\n# pip3 install --user .\n# or:\n# python3 setup.py install --user\n\nfrom setuptools import setup\n\ndependencies = \"\"\"\n docopt\n matplotlib\n numpy\n inform\n quantiphy>=2.12\n psf-utils>=0.3\n shlib\n svg_schematic>=0.7\n\"\"\"\n\nsetup(\n name=\"flicker-noise\",\n description=\"runs flicker noise simulations\",\n author=\"Geoffrey Coram, Colin McAndrew, Kiran Gullapalli and Ken Kundert\",\n author_email=\"ken@designers-guide.com\",\n version=\"1.2.0\",\n license=\"GPLv3+\",\n # script='runPnoise runBSIM'.split(),\n install_requires=dependencies.split(),\n python_requires=\">=3.6\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"98363467","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import api, fields, models, _, tools\n\n\nclass account_invoice_tax(models.Model):\n _inherit = \"account.invoice.tax\"\n\n @api.multi\n @api.depends('tax_id')\n def _get_tax_data(self):\n for tax_line in self:\n tax = 'tax_id' in tax_line.tax_id or False # If patch apply and module account_invoice_tax installed\n tax_name = (tax and tax.tax_category_id and \\\n tax.tax_category_id.code or tax_line.name).upper().replace('.', '').replace(' ', '').replace('-', '')\n tax_percent = (tax and tax.amount or False) # validate? type='percent'\n tax_percent = tax_percent or (tax_line.amount and tax_line.amount_base and \\\n tax_line.amount / abs(tax_line.amount_base) or 0.0)\n if 'IVA' in tax_name:\n tax_name = 'IVA'\n if not tax and tax_percent > 0:\n tax_percent = round(tax_percent, 0) # Hay problemas de decimales al calcular el iva, y hasta ahora el iva no tiene decimales\n elif 'ISR' in tax_name:\n tax_name = 'ISR'\n elif 'IEPS' in tax_name:\n tax_name = 'IEPS'\n tax_line.name2 = tax_name\n tax_line.tax_percent = tax_percent\n\n name2 = fields.Char(compute=_get_tax_data, string='Código Impuesto', store=True)\n tax_percent = fields.Float(compute=_get_tax_data, string='Porcentaje Impuesto', store=True)\n","sub_path":"extrasGDL/facturacion/l10n_mx_account_tax_category/invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"414553523","text":"import copy\nimport io\nimport json\nimport pytest\n\nfrom dcicutils import ff_mocks as ff_mocks_module, ff_utils as ff_utils_module\nfrom dcicutils.ff_utils import stuff_in_queues\nfrom dcicutils.ff_mocks import (\n AbstractIntegratedFixture, AbstractTestRecorder, IntegratedTestRecorder, TestScenarios,\n mocked_authorized_request_verbs, controlled_time_mocking,\n)\nfrom dcicutils.misc_utils import ignored, local_attrs, PRINT\nfrom dcicutils.qa_utils import MockResponse, MockFileSystem, ControlledTime, printed_output\nfrom unittest import mock\nfrom .helpers import using_fresh_legacy_state_for_testing\n\n\ndef test_abstract_integrated_fixture_no_server_fixtures():\n\n with mock.patch.object(ff_mocks_module, \"NO_SERVER_FIXTURES\"): # too late to set env variable, but this'll do.\n assert AbstractIntegratedFixture._initialize_class() == 'NO_SERVER_FIXTURES' # noQA - yes, it's protected\n assert AbstractIntegratedFixture.verify_portal_access('not-a-dictionary') == 'NO_SERVER_FIXTURES'\n\n\ndef test_abstract_integrated_fixture_misc():\n\n with mock.patch.object(AbstractIntegratedFixture, \"_initialize_class\"):\n fixture = AbstractIntegratedFixture(name='foo')\n fixture.S3_CLIENT = mock.MagicMock()\n\n sample_portal_access_key = {'key': 'abc', 'secret': 'shazam', 'server': 'http://genes.example.com/'}\n sample_higlass_access_key = {'key': 'xyz', 'secret': 'bingo', 'server': 'http://higlass.genes.example.com/'}\n\n fixture.S3_CLIENT.get_access_keys.return_value = sample_portal_access_key\n assert fixture.portal_access_key() == sample_portal_access_key\n\n env_name = 'fourfront-foobar'\n fixture.__class__.ENV_NAME = env_name\n fixture.S3_CLIENT.get_higlass_key.return_value = sample_higlass_access_key\n assert fixture.higlass_access_key() == sample_higlass_access_key\n\n fixture.INTEGRATED_FF_ITEMS = {'alpha': 'a', 'beta': 'b', 'some_key': '99999'}\n assert fixture['alpha'] == 'a'\n assert fixture['beta'] == 'b'\n with pytest.raises(Exception):\n ignored(fixture['gamma'])\n assert fixture['self'] == fixture\n\n with mock.patch.object(ff_mocks_module, \"id\", lambda _: \"1234\"):\n assert str(fixture) == (\"{'self': ,\"\n \" 'alpha': 'a',\"\n \" 'beta': 'b',\"\n \" 'some_key': \"\n \"}\")\n\n assert repr(fixture) == \"AbstractIntegratedFixture(name='foo')\"\n\n with mock.patch.object(ff_mocks_module, \"authorized_request\") as mock_authorized_request:\n # Code 418 = \"I'm a teapot\", which is at least good for testing.\n mock_authorized_request.return_value = MockResponse(status_code=418)\n with pytest.raises(Exception) as exc:\n assert fixture.verify_portal_access({'server': 'http://server.not.available'})\n assert str(exc.value) == (f'Environment {env_name} is not ready for integrated status.'\n f' Requesting the homepage gave status of: 418')\n\n class MyIntegratedFixture(AbstractIntegratedFixture):\n pass\n\n assert repr(MyIntegratedFixture('bar')) == \"MyIntegratedFixture(name='bar')\"\n\n\ndef test_abstract_test_recorder_context_managers():\n\n r = AbstractTestRecorder()\n\n with pytest.raises(NotImplementedError):\n with r.recorded_requests('foo', None):\n pass\n\n with pytest.raises(NotImplementedError):\n with r.replayed_requests('foo', None):\n pass\n\n\ndef test_abstract_test_recorder_recording_enabled_and_recording_level():\n\n r = AbstractTestRecorder()\n\n assert r.recording_level == 0\n assert not r.recording_enabled\n with r.creating_record():\n assert r.recording_level == 1\n assert not r.recording_enabled\n assert r.recording_level == 0\n assert not r.recording_enabled\n\n with local_attrs(r, recording_enabled=True):\n assert r.recording_level == 0\n assert r.recording_enabled\n with r.creating_record():\n assert r.recording_level == 1\n assert not r.recording_enabled\n assert r.recording_level == 0\n assert r.recording_enabled\n\n assert r.recording_level == 0\n assert not r.recording_enabled\n\n\n@pytest.mark.parametrize(\"recording_enabled\", [False, True])\ndef test_abstract_test_recorder_recording(recording_enabled):\n\n recordings_dir = 'my_recordings'\n test_name = 'foo'\n r = AbstractTestRecorder(recordings_dir=recordings_dir)\n output_stream = io.StringIO()\n r.recording_fp = output_stream\n r.recording_enabled = recording_enabled\n r.recording_level = 0\n\n dt = ControlledTime()\n\n initial_data = {'initial': 'data'}\n\n mfs = MockFileSystem()\n\n with printed_output() as printed:\n with mfs.mock_exists_open_remove():\n with r.setup_recording(test_name, initial_data):\n with mock.patch.object(ff_mocks_module, \"datetime\", dt):\n\n datum4, datum3, datum2, datum1 = data_server_stack = [\n {'verb': 'GET', 'url': 'http://any', 'data': None, 'duration': 17.0,\n 'error_type': RuntimeError, 'error_message': 'yikes'},\n {'verb': 'GET', 'url': 'http://baz', 'data': None, 'duration': 15.0, 'status': 400,\n 'result': 'sorry'},\n {'verb': 'GET', 'url': 'http://bar', 'data': None, 'duration': 20.0, 'status': 200,\n 'result': 'omega'},\n {'verb': 'GET', 'url': 'http://foo', 'data': None, 'duration': 10.0, 'status': 200,\n 'result': \"alpha\"},\n ]\n\n def simulate_actual_server():\n start_time = dt.just_now() - dt._tick_timedelta\n info = data_server_stack.pop()\n duration = info.get('duration')\n if duration:\n # We subtract 1 from the duration because 'just_now()' occurs\n # after the first measuring of time that will already have been done\n dt.set_datetime(start_time + dt._tick_timedelta * duration)\n if info.get('error_message'):\n error_type = info.get('error_type')\n raise error_type(info.get('error_message'))\n return MockResponse(status_code=info['status'], json=info['result'])\n\n response = r.do_mocked_record(action=simulate_actual_server, verb=datum1['verb'], url=datum1['url'])\n assert response.status_code == 200\n assert response.json() == datum1['result'] # 'alpha'\n\n response = r.do_mocked_record(action=simulate_actual_server, verb=datum2['verb'], url=datum2['url'])\n assert response.status_code == 200\n assert response.json() == datum2['result'] # 'omega'\n\n response = r.do_mocked_record(action=simulate_actual_server, verb=datum3['verb'], url=datum3['url'])\n assert response.status_code == 400\n assert response.json() == datum3['result'] # 'sorry'\n\n with pytest.raises(RuntimeError) as exc:\n r.do_mocked_record(action=simulate_actual_server, verb=datum4['verb'], url=datum4['url'])\n raise AssertionError(\"Should not get here.\")\n assert str(exc.value) == datum4['error_message'] # 'yikes'\n\n if recording_enabled:\n\n expected = {\n f\"{recordings_dir}/{test_name}\":\n f'{json.dumps(initial_data)}\\n'\n f'{json.dumps(datum1)}\\n'\n f'{json.dumps(datum2)}\\n'\n f'{json.dumps(datum3)}\\n'\n f'{json.dumps(datum4, default=lambda x: x.__name__)}\\n'.encode('utf-8')\n }\n else:\n expected = {f\"{recordings_dir}/{test_name}\": f\"{json.dumps(initial_data)}\\n\".encode('utf-8')}\n\n assert mfs.files == expected\n\n recording = \"Recording\" if recording_enabled else \"NOT recording\"\n assert printed.lines == [\n f'{recording} GET http://foo normal result',\n f'{recording} GET http://bar normal result',\n f'{recording} GET http://baz normal result',\n f'{recording} GET http://any error result'\n ]\n\n\ndef test_abstract_test_recorder_playback():\n\n r = AbstractTestRecorder()\n r.dt = ControlledTime()\n\n mfs = MockFileSystem()\n\n with printed_output() as printed:\n with mfs.mock_exists_open_remove():\n\n with mock.patch.object(r, \"get_next_json\") as mock_get_next_json:\n datum4, datum3, datum2, datum1 = data_stack = [\n {'verb': 'GET', 'url': 'http://any', 'data': None, 'duration': 17.0,\n 'error_type': RuntimeError, 'error_message': 'yikes'},\n {'verb': 'GET', 'url': 'http://baz', 'data': None, 'duration': 15.0, 'status': 400,\n 'result': 'sorry'},\n {'verb': 'GET', 'url': 'http://bar', 'data': None, 'duration': 20.0, 'status': 200,\n 'result': 'omega'},\n {'verb': 'GET', 'url': 'http://foo', 'data': None, 'duration': 10.0, 'status': 200,\n 'result': \"alpha\"},\n ]\n mock_get_next_json.side_effect = lambda: data_stack.pop()\n\n response = r.do_mocked_replay(datum1['verb'], datum1['url'])\n assert response.status_code == 200\n assert response.json() == datum1['result'] # 'alpha'\n\n response = r.do_mocked_replay(datum2['verb'], datum2['url'])\n assert response.status_code == 200\n assert response.json() == datum2['result'] # 'omega'\n\n response = r.do_mocked_replay(datum3['verb'], datum3['url'])\n assert response.status_code == 400\n assert response.json() == datum3['result'] # 'sorry'\n\n with pytest.raises(Exception) as exc:\n r.do_mocked_replay(datum4['verb'], datum4['url'])\n raise AssertionError(\"Should not get here.\")\n assert str(exc.value) == datum4['error_message'] # 'yikes'\n\n assert mfs.files == {} # no files created on playback\n\n assert printed.lines == [\n f\"Replaying GET {datum1['url']}\", # http://foo\n f\" from recording of normal result for GET {datum1['url']}\",\n f\"Replaying GET {datum2['url']}\", # http://bar\n f\" from recording of normal result for GET {datum2['url']}\",\n f\"Replaying GET {datum3['url']}\", # http://baz\n f\" from recording of normal result for GET {datum3['url']}\",\n f\"Replaying GET {datum4['url']}\", # http://any\n f\" from recording of error result for GET {datum4['url']}\",\n ]\n\n\n@pytest.mark.parametrize(\"recording_enabled\", [False, True])\n@pytest.mark.parametrize(\"sample_result\", [False, True])\n@pytest.mark.parametrize(\"check_secondary\", [False, True])\ndef test_mocked_recording_stuff_in_queues(recording_enabled, sample_result, check_secondary):\n\n dt = ControlledTime()\n r = AbstractTestRecorder()\n r.recording_enabled = recording_enabled\n r.dt = dt\n output_stream = io.StringIO()\n r.recording_fp = output_stream\n namespace = \"some_namespace\"\n with mock.patch.object(ff_mocks_module, \"datetime\", dt):\n with mock.patch.object(ff_utils_module, \"internal_compute_stuff_in_queues\") as mock_compute_stuff_in_queues:\n mock_compute_stuff_in_queues.return_value = sample_result\n r.mocked_recording_stuff_in_queues(ff_env_index_namespace='some_namespace', check_secondary=check_secondary)\n if recording_enabled:\n expected_result = {\n 'verb': 'stuff-in-queues',\n 'url': None,\n 'data': {'ff_env_index_namespace': namespace, \"check_secondary\": check_secondary},\n 'duration': 1.0,\n 'result': sample_result\n }\n assert output_stream.getvalue() == json.dumps(expected_result) + '\\n'\n else:\n assert output_stream.getvalue() == \"\"\n\n\ndef test_get_next_json():\n\n item1, item2 = [{\"item\": 1}, {\"item\": 2}]\n\n stream = io.StringIO()\n PRINT(json.dumps(item1), file=stream)\n PRINT(json.dumps(item2), file=stream)\n stream.seek(0)\n\n r = AbstractTestRecorder()\n r.recording_fp = stream\n\n assert r.get_next_json() == item1\n assert r.get_next_json() == item2\n with pytest.raises(AssertionError) as exc:\n r.get_next_json()\n assert str(exc.value) == \"Out of replayable records.\"\n\n\ndef test_copy_integrated_ff_masking_credentials():\n\n orig = {\n \"ff_key\": {\"key\": \"test-key\", \"secret\": \"test-secret\", \"server\": \"http://whatever\"},\n \"ff_env\": \"test_env\",\n \"ff_env_index_namespace\": \"test_namespace\",\n }\n copy1 = IntegratedTestRecorder.copy_integrated_ff_masking_credentials(orig)\n\n # Check that these two items were NOT copied\n assert copy1['ff_key']['key'] != orig['ff_key']['key']\n assert copy1['ff_key']['secret'] != orig['ff_key']['secret']\n\n # Check that the rest of the items WERE copied\n copy2 = copy.deepcopy(orig)\n del copy1['ff_key']['key']\n del copy2['ff_key']['key']\n del copy1['ff_key']['secret']\n del copy2['ff_key']['secret']\n assert copy1 == copy2\n\n\ndef test_mock_record_stuff_in_queues():\n\n result_for_testing = \"any-bool-for-testing\"\n\n class MyTestRecorder(AbstractTestRecorder):\n\n def mocked_recording_stuff_in_queues(self, ff_env_index_namespace, check_secondary):\n ignored(ff_env_index_namespace, check_secondary)\n return result_for_testing\n\n r = MyTestRecorder()\n\n with r.mock_record_stuff_in_queues():\n assert stuff_in_queues('example-namespace', check_secondary=True) == result_for_testing\n\n\ndef test_mocked_authorized_request_verbs():\n n_verbs = 0\n with mocked_authorized_request_verbs():\n for verb in ff_utils_module.REQUESTS_VERBS:\n n_verbs += 1\n with pytest.raises(AssertionError):\n ff_utils_module.REQUESTS_VERBS[verb]('anything')\n assert n_verbs == 5 # GET, POST, PATCH, PUT, DELETE\n\n def explicitly_mocked(*args, **kwargs):\n ignored(args, kwargs)\n return \"whatever\"\n\n to_mock = {\n 'GET': explicitly_mocked,\n 'DELETE': explicitly_mocked,\n }\n n_disabled = 0\n n_mocked = 0\n with mocked_authorized_request_verbs(**to_mock):\n for verb in ff_utils_module.REQUESTS_VERBS:\n if verb in to_mock:\n n_mocked += 1\n assert ff_utils_module.REQUESTS_VERBS[verb]('anything') == \"whatever\"\n else:\n n_disabled += 1\n with pytest.raises(AssertionError):\n ff_utils_module.REQUESTS_VERBS[verb]('anything')\n assert n_mocked == len(to_mock)\n assert n_disabled == n_verbs - n_mocked\n\n\n@pytest.mark.parametrize(\"enabled\", [False, True])\ndef test_controlled_time_mocking(enabled):\n\n # Test that whether enabled is true or false, we still get a ControlledTime back as dt\n with controlled_time_mocking(enabled=enabled) as dt:\n assert isinstance(dt, ControlledTime)\n\n\ndef test_mocked_replaying_stuff_in_queues():\n\n r = AbstractTestRecorder()\n r.dt = ControlledTime()\n\n # Test normal result\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'stuff-in-queues', 'url': None, 'data': None, 'duration': 10.0, 'status': 200,\n 'result': True}),\n file=stream)\n stream.seek(0)\n\n result = r.mocked_replaying_stuff_in_queues('any-env', check_secondary=True)\n assert result is True\n\n # Test error result\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'stuff-in-queues', 'url': None, 'data': None, 'duration': 10.0, 'status': 200,\n 'error_type': 'RuntimeError', 'error_message': 'yikes'}),\n file=stream)\n stream.seek(0)\n\n with pytest.raises(Exception) as exc:\n r.mocked_replaying_stuff_in_queues('any-env', check_secondary=True)\n assert not isinstance(exc, AssertionError)\n assert str(exc.value) == \"yikes\"\n\n # Test mismatched recording data\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'GET', 'url': 'http://any', 'data': None, 'duration': 17.0,\n 'error_type': 'RuntimeError', 'error_message': 'yikes'}),\n file=stream)\n stream.seek(0)\n\n with pytest.raises(AssertionError) as exc:\n r.mocked_replaying_stuff_in_queues('any-env', check_secondary=True)\n assert str(exc.value) == \"Actual call stuff-in-queues does not match expected call GET http://any\"\n\n\ndef test_mocked_replay():\n\n r = AbstractTestRecorder()\n r.dt = ControlledTime()\n\n # Test normal result\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'GET', 'url': \"http://foo\", 'data': None, 'duration': 10.0, 'status': 200,\n 'result': {\"some\": \"result\"}}),\n file=stream)\n stream.seek(0)\n\n response = r.do_mocked_replay(verb='GET', url=\"http://foo\")\n assert response.status_code == 200\n assert response.json() == {\"some\": \"result\"}\n\n # Test error result\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'GET', 'url': \"http://foo\", 'data': None, 'duration': 10.0, 'status': 200,\n 'error_type': 'RuntimeError', 'error_message': 'yikes'}),\n file=stream)\n stream.seek(0)\n\n with pytest.raises(Exception) as exc:\n r.do_mocked_replay(verb='GET', url=\"http://foo\")\n assert not isinstance(exc, AssertionError)\n assert str(exc.value) == \"yikes\"\n\n # Test mismatched recording call\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'POST', 'url': 'http://any', 'data': None, 'duration': 17.0,\n 'result': 'ignored-result'}),\n file=stream)\n stream.seek(0)\n\n with pytest.raises(AssertionError) as exc:\n r.do_mocked_replay(verb='GET', url=\"http://foo\")\n assert str(exc.value) == \"Actual call GET http://foo does not match expected call POST http://any\"\n\n # Test mismatched recording data\n stream = io.StringIO()\n r.recording_fp = stream\n PRINT(json.dumps({'verb': 'POST', 'url': 'http://foo', 'data': 'something', 'duration': 17.0,\n 'result': 'ignored-result'}),\n file=stream)\n stream.seek(0)\n\n with pytest.raises(AssertionError) as exc:\n r.do_mocked_replay(verb='POST', url=\"http://foo\", data='something-else')\n assert str(exc.value) == \"Data mismatch on call POST http://foo.\"\n\n\n@using_fresh_legacy_state_for_testing()\ndef test_test_scenario_methods():\n\n assert TestScenarios.mocked_auth_key('foo') == 'fookey'\n assert TestScenarios.mocked_auth_key('fourfront-foo') == 'fookey'\n assert TestScenarios.mocked_auth_key('fourfront-cgapfoo') == 'cgapfookey'\n assert TestScenarios.mocked_auth_key('cgap-foo') == 'cgap-fookey'\n\n assert TestScenarios.mocked_auth_secret('foo') == 'foosecret'\n assert TestScenarios.mocked_auth_secret('fourfront-foo') == 'foosecret'\n assert TestScenarios.mocked_auth_secret('fourfront-cgapfoo') == 'cgapfoosecret'\n assert TestScenarios.mocked_auth_secret('cgap-foo') == 'cgap-foosecret'\n\n assert TestScenarios.mocked_auth_server('foo') == 'http://foo.4dnucleome.org/'\n assert TestScenarios.mocked_auth_server('fourfront-foo') == 'http://foo.4dnucleome.org/'\n assert TestScenarios.mocked_auth_server('fourfront-cgapfoo') == 'http://cgapfoo.4dnucleome.org/'\n assert TestScenarios.mocked_auth_server('cgap-foo') == 'http://cgap-foo.4dnucleome.org/'\n\n assert TestScenarios.mocked_auth_key_secret_tuple('foo') == ('fookey', 'foosecret')\n assert TestScenarios.mocked_auth_key_secret_tuple('fourfront-foo') == ('fookey', 'foosecret')\n assert TestScenarios.mocked_auth_key_secret_tuple('fourfront-cgapfoo') == ('cgapfookey', 'cgapfoosecret')\n assert TestScenarios.mocked_auth_key_secret_tuple('cgap-foo') == ('cgap-fookey', 'cgap-foosecret')\n\n assert TestScenarios.mocked_auth_key_secret_server_dict('foo') == {\n 'key': 'fookey',\n 'secret': 'foosecret',\n 'server': 'http://foo.4dnucleome.org/',\n }\n assert TestScenarios.mocked_auth_key_secret_server_dict('fourfront-foo') == {\n 'key': 'fookey',\n 'secret': 'foosecret',\n 'server': 'http://foo.4dnucleome.org/',\n }\n assert TestScenarios.mocked_auth_key_secret_server_dict('fourfront-cgapfoo') == {\n 'key': 'cgapfookey',\n 'secret': 'cgapfoosecret',\n 'server': 'http://cgapfoo.4dnucleome.org/',\n }\n assert TestScenarios.mocked_auth_key_secret_server_dict('cgap-foo') == {\n 'key': 'cgap-fookey',\n 'secret': 'cgap-foosecret',\n 'server': 'http://cgap-foo.4dnucleome.org/',\n }\n","sub_path":"test/test_ff_mocks.py","file_name":"test_ff_mocks.py","file_ext":"py","file_size_in_byte":21411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"514837935","text":"import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport re\n\nurl = input('Enter: ')\ncount_num = int(input('Enter count: '))\npos = int(input('Enter position: '))\n\n\ndef parseHtml(url, pos):\n html = urllib.request.urlopen(url).read() # open the current supplied URL\n soup = BeautifulSoup(html, 'html.parser') # turn it into soup\n tags = soup('a') # grab just the a tags from the soup\n i = 0\n for tag in tags: # loop through the tags, looking for tag\n i += 1 # stored at given position\n if i == pos:\n return tag.get('href', None) # return just the url from that tag\n\ncurrent_num = 0\nwhile current_num < count_num: # loop from zero up to given count number\n print('Retrieving: ', url) # print which url is being looked at\n url = parseHtml(url, pos) # call the parse function to get next url\n current_num += 1 # increment count\n\nprint('The Last URL of this turn:', url) # print final result\n\n\n","sub_path":"chap_12_function_version.py","file_name":"chap_12_function_version.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"368631843","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom app.config import SQLALCHEMY_DATABASE_URI\n\n\nclass Metrics_DB:\n Session = None\n\n def init(self):\n self.engine = create_engine(SQLALCHEMY_DATABASE_URI)\n self.Session = sessionmaker(bind=self.engine)\n\n def save(self, obj):\n session = self.Session()\n\n try:\n session.add(obj)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n def update(self, obj, filters, **kwargs):\n session = self.Session()\n\n try:\n session_obj = session.query(obj).filter_by(**filters)\n session_obj.update(kwargs)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n","sub_path":"app/metrics_db.py","file_name":"metrics_db.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545997002","text":"import argparse\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport numpy as np\nfrom math import sqrt\nfrom scipy import stats\n\ndef main(args):\n data_file=args.data_file\n with open(data_file, 'r') as df:\n data = df.readlines()\n\n data = data[1:]\n data = np.array([[float(col) for col in row.split(',')] for row in data])\n #np.random.shuffle(data)\n #spliting data in training and testing part\n train_data= data[0:749,:]\n test_data= data[750:999,:]\n #spliting the train data in x and y part\n train_x=train_data[:,1:5]\n train_y=train_data[:,0]\n #spliting the test data in x and y part\n test_x=test_data[:,1:5]\n test_y=test_data[:,0]\n\n #fitting a linear model using scikit_learn linear_model module\n regr = linear_model.LinearRegression(fit_intercept =True)\n regr.fit(train_x, train_y)\n\n #prdiction by training model\n test_y_pred = regr.predict(test_x)\n params = np.append(regr.intercept_,regr.coef_)\n # The coefficients\n print('Coefficients: %s' % str(params))\n # The mean squared error\n print(\"RSE for test data: %f\" % sqrt(mean_squared_error(test_y, test_y_pred)))\n print(\"RSE for training data: %f\" % sqrt(mean_squared_error(train_y, regr.predict(train_x))))\n\n\n #--------code for calculating the p-values of the coefficients-------\n newX = np.append(np.ones((len(train_x),1)), train_x, axis=1)\n MSE = mean_squared_error(test_y, test_y_pred)\n\n var_b = MSE*(np.linalg.inv(np.dot(newX.T,newX)).diagonal())\n sd_b = np.sqrt(var_b)\n ts_b = params/ sd_b\n\n p_values =[2*(1-stats.t.cdf(np.abs(i),(len(newX)-1))) for i in ts_b]\n p_values=np.round(p_values,3)\n print(\"p-values: %s\" % str(p_values))\n\n\nif __name__==\"__main__\":\n #print(\"This code is part one of project, it performs 4-D Linear Regression on data and gaive different statistics\")\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--data-file\", action=\"store\", dest=\"data_file\", type=str, help=\"Data file\", default=\"ME781_dataset_160100022.csv\")\n args = parser.parse_args()\n\n main(args)","sub_path":"project1/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"614736904","text":"import json\nimport math\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom tqdm import tqdm\n\n\ndef matplotlib_imshow(img, one_channel=False):\n fig = plt.figure()\n if one_channel:\n img = img.mean(dim=0)\n plt.imshow(img.numpy(), cmap=\"Greys\")\n # plt.show()\n return img\n\n else:\n img = img.numpy().transpose(1, 2, 0)\n unnorm_img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255\n # unnorm_img = img * 255\n img = img.astype('uint8')\n unnorm_img = unnorm_img.astype('uint8')\n norm_image = torch.Tensor(img).permute(2, 0, 1)\n plt.imshow(unnorm_img)\n # plt.savefig(\"train_images.jpg\")\n # plt.show()\n return norm_image, fig\n\ndef plot_data_loader_image(data_loader):\n batch_size = data_loader.batch_size\n plot_num = min(batch_size, 4)\n\n json_path = './class_indices.json'\n assert os.path.exists(json_path), json_path + \" does not exist.\"\n json_file = open(json_path, 'r')\n class_indices = json.load(json_file)\n\n for data in data_loader:\n images, labels = data\n for i in range(plot_num):\n # [C, H, W] -> [H, W, C]\n img = images[i].numpy().transpose(1, 2, 0)\n # 反Normalize操作\n img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255\n label = labels[i].item()\n plt.subplot(1, plot_num, i+1)\n plt.xlabel(class_indices[str(label)])\n plt.xticks([]) # 去掉x轴的刻度\n plt.yticks([]) # 去掉y轴的刻度\n plt.imshow(img.astype('uint8'))\n plt.show()\n\ndef train_one_epoch(model, data_loader, device, optimizer, loss_function, epoch,scheduler):\n model.train()\n accu_loss = torch.zeros(1).to(device)\n accu_num = torch.zeros(1).to(device)\n optimizer.zero_grad()\n\n sample_num = 0\n # data_loader = tqdm(data_loader)\n for step, data in enumerate(data_loader):\n images, labels = data\n sample_num += images.shape[0]\n\n pred = model(images.to(device))\n pred_classes = torch.max(pred, dim=1)[1]\n accu_num += torch.eq(pred_classes, labels.to(device)).sum()\n\n loss = loss_function(pred, labels.to(device))\n loss.backward()\n accu_loss += loss.detach() # accu_loss+=loss.item()\n\n print(\n \"train epoch {} step {} train loss: {:.5f} train acc: {:.5f} lr: {:.7f}\".format(\n epoch, step + 1, accu_loss.item() / (step + 1), accu_num.item() / sample_num,\n optimizer.param_groups[0][\"lr\"]))\n if not torch.isfinite(loss):\n print('WARNING: non-finite loss, ending training ', loss)\n sys.exit(1)\n\n optimizer.step()\n optimizer.zero_grad()\n # scheduler.step()\n\n return accu_loss.item() / (step + 1), accu_num.item() / sample_num\n\n\n@torch.no_grad()\ndef evaluate(model, data_loader, device, loss_function, epoch):\n model.eval()\n accu_loss = torch.zeros(1).to(device)\n accu_num = torch.zeros(1).to(device)\n\n sample_num = 0\n # data_loader = tqdm(data_loader)\n for step, data in enumerate(data_loader):\n images, labels = data\n sample_num += images.shape[0]\n\n pred = model(images.to(device))\n pred_classes = torch.max(pred, dim=1)[1]\n\n accu_num += torch.eq(pred_classes, labels.to(device)).sum()\n\n loss = loss_function(pred, labels.to(device))\n accu_loss += loss\n\n print(\"[valid epoch {} step {}] val loss: {:.5f}, val acc: {:.5f}\".format(epoch, step + 1,\n accu_loss.item() / (step + 1),\n accu_num.item() / sample_num))\n return accu_loss.item() / (step + 1), accu_num.item() / sample_num\n\ndef create_lr_scheduler(optimizer,\n num_step: int,\n epochs: int,\n warmup=True,\n warmup_epochs=1,\n warmup_factor=1e-3,\n end_factor=1e-6):\n assert num_step > 0 and epochs > 0\n if warmup is False:\n warmup_epochs = 0\n\n def f(x):\n \"\"\"\n 根据step数返回一个学习率倍率因子,\n 注意在训练开始之前,pytorch会提前调用一次lr_scheduler.step()方法\n \"\"\"\n if warmup is True and x <= (warmup_epochs * num_step):\n alpha = float(x) / (warmup_epochs * num_step)\n # warmup过程中lr倍率因子从warmup_factor -> 1\n return warmup_factor * (1 - alpha) + alpha\n else:\n current_step = (x - warmup_epochs * num_step)\n cosine_steps = (epochs - warmup_epochs) * num_step\n # warmup后lr倍率因子从1 -> end_factor\n return ((1 + math.cos(current_step * math.pi / cosine_steps)) / 2) * (1 - end_factor) + end_factor\n\n return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=f)\n\n\ndef get_params_groups(model: torch.nn.Module, weight_decay: float = 1e-5):\n # 记录optimize要训练的权重参数\n parameter_group_vars = {\"decay\": {\"params\": [], \"weight_decay\": weight_decay},\n \"no_decay\": {\"params\": [], \"weight_decay\": 0.}}\n\n # 记录对应的权重名称\n parameter_group_names = {\"decay\": {\"params\": [], \"weight_decay\": weight_decay},\n \"no_decay\": {\"params\": [], \"weight_decay\": 0.}}\n\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n\n if len(param.shape) == 1 or name.endswith(\".bias\"):\n group_name = \"no_decay\"\n else:\n group_name = \"decay\"\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n\n print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n return list(parameter_group_vars.values())","sub_path":"classification/seNet/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"221329927","text":"########################################################################################################################\n# This is the first improvement using SVD from the exercise sheet 2 #\n# Searched through every possible value for truncating the diagonal SVD-matrix and looks for the one with the #\n# smallest RMSE-Error, keeping the first 8 singular values is best #\n########################################################################################################################\n\n# movie rating prediction based on given matrix entries\n# 10'000 Users (rows)\n# 1'000 Movies (columns)\n\nimport matplotlib.pyplot as plt\nfrom sarah.inputhandler import *\n\nnr_of_users = 10000\nnr_of_movies = 1000\n\n\n# the prediction for any missing value X[i,j] can be computed as the inner product of the ith row in U and the jth\n# column in V\ndef predict_values(asked_entries, u, v, matrix):\n for (i, j) in asked_entries:\n matrix[i][j] = int(round(u[i, :].dot(v[:, j])))\n return matrix\n\n\ndef truncate(s, to_truncate):\n n = s.shape[0]\n # print(s.shape)\n return np.append(s[:to_truncate], np.zeros(n-to_truncate))\n\n\ndef plot(values):\n y = []\n for i in range(len(values)):\n if values[i] > 1:\n y.append(values[i])\n x = [i for i in range(len(y))]\n plt.plot(x, y)\n plt.show()\n print(values)\n\n\ndef compute_svd(data_matrix, to_truncate):\n u, s, v_t = np.linalg.svd(data_matrix, full_matrices=True) # compute the svd\n # plot(s[1:])\n s = truncate(s, to_truncate) # specify how many singular values you want to keep\n s_filled = np.zeros((u.shape[0], v_t.shape[0])) # create a matrix for the eigenvalues\n s_filled[:min(u.shape[0], v_t.shape[0]), :min(u.shape[0], v_t.shape[0])] = np.diag(s) # fill sigma with EV\n # print(u.shape, s.shape, s_filled.shape, v_t.shape)\n u_ = np.dot(u, s_filled) # multiply with singular values matrix\n return u_, v_t\n\n\nloaded_matrix, given_ratings, mean = load_data('../input/data_train.csv')\nU, V = compute_svd(loaded_matrix, 8)\nasked_entries = get_asked_entries()\nresult_matrix = predict_values(asked_entries, U, V, loaded_matrix)\nstore_data(result_matrix)\n","sub_path":"sarah/svd.py","file_name":"svd.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"71194845","text":"# Standard imports\r\nimport concurrent.futures\r\nfrom urllib.parse import quote as urllib_quote\r\n\r\n# Third party imports\r\nimport requests\r\n\r\n# Local imports\r\nfrom database.db_errors import *\r\nimport database.db_tables as tables\r\nfrom database import Session\r\nfrom . import db_interface as db\r\nfrom sqlalchemy import func\r\n\r\n\r\nclass Analysis(object):\r\n def __init__(self, library):\r\n self.library = library\r\n self.session = Session()\r\n self.total_paper_count = self.session.query(tables.MainPaperInfo).count()\r\n\r\n self.without_dois = -1\r\n self.without_dois_count = -1\r\n self.without_pmids = -1\r\n self.without_pmids_count = -1\r\n self.without_files = -1\r\n self.without_files_count = -1\r\n self.without_file_info = -1\r\n self.without_file_info_count = -1\r\n self.duplicate_doi_dict = -1\r\n self.duplicate_entries = -1\r\n self.invalid_doi_count = -1\r\n\r\n\r\n if self.total_paper_count == 0:\r\n raise LookupError('No documents found in database. Please sync with library '\r\n 'or add documents before performing analysis.')\r\n\r\n def run(self):\r\n '''\r\n Performs all of the analysis methods.\r\n Returns nothing; instead sets class attributes.\r\n '''\r\n self.without_dois_count = self.missing_doi_search()\r\n self.without_pmids_count = self.missing_pmid_search()\r\n (self.without_files_count, self.without_file_info_count) = self.missing_file_search()\r\n self.duplicate_doi_count = self.duplicate_doi_search()\r\n self.invalid_doi_count = self.validate_dois()\r\n\r\n def missing_doi_search(self):\r\n without_dois = self.session.query(tables.MainPaperInfo).filter((tables.MainPaperInfo.in_lib == 1)\r\n & ((tables.MainPaperInfo.doi == None)\r\n | (tables.MainPaperInfo.doi == ''))).all()\r\n self.without_dois = without_dois\r\n\r\n without_dois_count = len(without_dois)\r\n return without_dois_count\r\n\r\n def missing_pmid_search(self):\r\n without_pmids = self.session.query(tables.MainPaperInfo).filter((tables.MainPaperInfo.pubmed_id == None)\r\n | (tables.MainPaperInfo.pubmed_id == '')).all()\r\n self.without_pmids = without_pmids\r\n\r\n without_pmids_count = len(without_pmids)\r\n return without_pmids_count\r\n\r\n def missing_file_search(self):\r\n # Documents where the has_file entry has been explicitly set to 0\r\n without_files = self.session.query(tables.MainPaperInfo).filter_by(has_file=0).all()\r\n without_files_count = len(without_files)\r\n\r\n self.without_files = without_files\r\n\r\n # Documents where the has_file entry has not been set\r\n without_file_info = self.session.query(tables.MainPaperInfo).filter_by(has_file=None).all()\r\n without_file_info_count = len(without_file_info)\r\n\r\n self.without_file_info = without_file_info\r\n\r\n return without_files_count, without_file_info_count\r\n\r\n def duplicate_doi_search(self):\r\n all_duplicates = self.session.query(tables.MainPaperInfo).\\\r\n having(func.count(tables.MainPaperInfo.doi) > 1).\\\r\n group_by(tables.MainPaperInfo.doi).all()\r\n\r\n doi_dict = {}\r\n # Create a dict matching DOIs to titles\r\n for entry in all_duplicates:\r\n doi = entry.doi\r\n if doi in doi_dict.keys():\r\n if isinstance(doi_dict[doi], list):\r\n doi_dict[doi].append(entry.title)\r\n else:\r\n new_list = [doi_dict[doi], entry.title]\r\n doi_dict[doi] = new_list\r\n else:\r\n doi_dict[entry.doi] = entry.title\r\n\r\n self.duplicate_doi_dict = doi_dict\r\n self.duplicate_entries = all_duplicates\r\n\r\n return len(doi_dict.keys())\r\n\r\n def validate_dois(self):\r\n docs_with_dois = self.session.query(tables.MainPaperInfo).filter((tables.MainPaperInfo.in_lib == 1)\r\n & (tables.MainPaperInfo.doi != None)\r\n & (tables.MainPaperInfo.doi != '')).all()\r\n\r\n for doc in docs_with_dois:\r\n self._doi_validate_helper(doc)\r\n\r\n # Update session with changes\r\n self.session.commit()\r\n\r\n invalid_doi_count = self.session.query(tables.MainPaperInfo).filter((tables.MainPaperInfo.in_lib == 1)\r\n & (tables.MainPaperInfo.valid_doi == 0)).count()\r\n\r\n self.invalid_doi_count = invalid_doi_count\r\n self.valid_doi_count = self.total_paper_count - invalid_doi_count\r\n\r\n return invalid_doi_count\r\n\r\n def _doi_validate_helper(self, db_object):\r\n doi = db_object.doi\r\n url = 'http://dx.doi.org/' + urllib_quote(doi)\r\n resp = requests.get(url)\r\n\r\n print('Validating DOIs in library')\r\n\r\n # Should probably get rid of these print statements\r\n # in favor of a progress bar?\r\n # print(resp.status_code)\r\n # print('Entered URL: ' + url)\r\n # print('Response URL: ' + resp.url)\r\n # print()\r\n\r\n if resp.ok:\r\n db_object.valid_doi = 1\r\n # ScienceDirect often does not allow requests to go directly to an article page.\r\n # It will return status code 404, but will still end up going to a ScienceDirect\r\n # URL if the DOI is valid and corresponds with a ScienceDirect-hosted paper\r\n elif resp.status_code == 404:\r\n if 'sciencedirect' in resp.url or 'ScienceDirect' in resp.content:\r\n db_object.valid_doi = 1\r\n else:\r\n db_object.valid_doi = 0\r\n else:\r\n db_object.valid_doi = 0\r\n\r\n def _sentence_case_fix(self):\r\n pass\r\n\r\n def __repr__(self):\r\n return u'' \\\r\n 'Total document count: %d\\n' % self.total_paper_count + \\\r\n 'Documents without DOIs: %d\\n' % self.without_dois_count + \\\r\n 'Documents without PMIDs: %d\\n' % self.without_pmids_count + \\\r\n 'Documents without files: %d\\n' % self.without_files_count + \\\r\n 'Documents that may be missing files: %d\\n' % self.without_file_info_count + \\\r\n 'DOIs that are duplicated: %d\\n' % self.duplicate_doi_count + \\\r\n 'DOIs that are invalid: %d\\n' % self.invalid_doi_count + \\\r\n 'If these categories return -1, please run Analysis.run()'\r\n","sub_path":"mendeley/integrity.py","file_name":"integrity.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335822794","text":"# -*- coding: utf-8 -*-\n# Time: 2019/6/12 21:50\n# Author: laugc\n# Email: hahalgc@gmail.com\n# File: py77_tcp_client.py\n\nimport socket\n\n\"\"\"\ntcp 编程\nSocket 是网络编程的一个抽象概念\n通常用一个 Socket 表示「打开了一个网络链接」,而打开一个 Socket 需要知道目标计算机的 IP 地址和端口号,再指定协议类型即可\n\"\"\"\n\n# 创建 socket\n# AF_INET 指定使用 IPv4 协议,AF_INET6 指定为 IPv6\n# SOCK_STREAM指 定使用面向流的 TCP 协议\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# 建立连接\n# 注意参数是一个 tuple\ns.connect(('www.sina.com.cn', 80))\n\n# 发送数据\ns.send(b'GET / HTTP/1.1\\r\\nHost: www.sina.com.cn\\r\\nConnection: close\\r\\n\\r\\n')\n\n# 接收数据\nbuffer = []\nwhile True:\n # 每次最多接收 1k 字节\n d = s.recv(1024)\n if d:\n buffer.append(d)\n else:\n break\n\ndata = b''.join(buffer)\nprint(data)\n\n# 接收到的数据包括 HTTP 头和网页本身,把 HTTP 头打印出来,网页内容保存到文件\nheader, html = data.split(b'\\r\\n\\r\\n', 1)\nprint(header.decode('utf-8'))\n# 把接收的数据写入文件\nwith open('sina.html', 'wb') as f:\n f.write(html)\n\n# 关闭连接\ns.close()\n","sub_path":"internet_protocol/py77_tcp_client.py","file_name":"py77_tcp_client.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551887485","text":"# Crie um programa que leia o ano de nascimento de sete pessoas.\n# No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.\n\nfrom datetime import date\natual = date.today().year\ntotmaior = 0\ntotmenor = 0\nfor pessoa in range(1, 7+1):\n nasc = int(input(f'Qual o ano de nascimento {pessoa}º pessoa? '))\n idade = atual - nasc\n if idade >= 21:\n totmaior = totmaior+1\n else:\n totmenor = totmenor+1\nprint(f'Temos {totmaior} pessoas maiores de idade\\nTemos {totmenor} pessoas menores de idade')\n\n\n\n\n\n","sub_path":"Exercicios/exercicio54.py","file_name":"exercicio54.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"33841183","text":"from typing import Iterable, Optional, Tuple\nfrom utils.sql_fetcher import SqlFetcher\nfrom utils.utils import decode_mention\nimport psycopg2.extensions as sql\n\n\nclass Categoriser:\n\tdef __init__(self, conn: sql.connection, sql_fetcher: SqlFetcher) -> None:\n\t\tself.conn = conn\n\t\tself.sql_fetcher = sql_fetcher\n\n\tdef categorise_person(\n\t\tself, person_id: int, channels: Iterable[str]\n\t) -> Tuple[bool, Optional[str]]:\n\t\t\"\"\"Adds the person to the categories linked to the channels mentioned. Returns whether or not it succeeded on the first retrun value, and an error message (or None) as the second value.\"\"\"\n\t\treturn self.__add_remove_categories(\n\t\t\t\"categorise_person.sql\", person_id, channels\n\t\t)\n\n\tdef decategorise_person(\n\t\tself, person_id: int, channels: Iterable[str]\n\t) -> Tuple[bool, Optional[str]]:\n\t\t\"\"\"Removes the person from the categories linked to the channels mentioned. Returns whether or not it succeeded on the first retrun value, and an error message (or None) as the second value.\"\"\"\n\t\treturn self.__add_remove_categories(\n\t\t\t\"decategorise_person.sql\", person_id, channels\n\t\t)\n\n\tdef __add_remove_categories(\n\t\tself, sql_file: str, person_id: int, channels: Iterable[str]\n\t) -> Tuple[bool, Optional[str]]:\n\t\tquery = self.sql_fetcher[sql_file]\n\t\twith self.conn as conn:\n\t\t\twith conn.cursor() as cursor:\n\t\t\t\tfor channel in channels:\n\t\t\t\t\tmention_type, channel_id = decode_mention(channel)\n\t\t\t\t\tif mention_type != \"channel\":\n\t\t\t\t\t\treturn (\n\t\t\t\t\t\t\tFalse,\n\t\t\t\t\t\t\tf'Expected a channel mention in place of \"{channel}\".',\n\t\t\t\t\t\t)\n\t\t\t\t\tcursor.execute(\n\t\t\t\t\t\tquery, {\"person_id\": person_id, \"channel_id\": channel_id}\n\t\t\t\t\t)\n\t\t\t\t\treturn True, None\n","sub_path":"modules/email_registry/categoriser.py","file_name":"categoriser.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"229710236","text":"import chess.pgn\nimport os\nimport conversions\nimport chess_model\n\nimport torch\n\n#folder containing pgn list\nSAMPLE_FOLDER = \"trainpgn/\"\n\n#where to save model\nSAVE_LOCATION = \"model10000.torch\"\n\n#number of games to train on\nMAXGAMES = 10000\n\n#max epochs if we run out of games\nEPOCHS = 1\n\nmodel = chess_model.make_model()\n\nloss_fn = torch.nn.MSELoss(reduction='sum')\nlearning_rate = 1e-4\n\n#games trained on\nnum = 1\nfor epoch in range(EPOCHS):\n\tfor filename in os.listdir(SAMPLE_FOLDER):\n\t\tif not filename.endswith(\".pgn\"):\n\t\t\tcontinue\n\t\tpgn = open(SAMPLE_FOLDER + filename)\n\t\tgame = chess.pgn.read_game(pgn)\n\t\twhile game != None:\n\t\t\tnum = num + 1\n\t\t\tif num%100 == 0:\n\t\t\t\tprint(\"game #%i\" % num)\n\t\t\tif num>=MAXGAMES:\n\t\t\t\tbreak;\n\t\t\t\n\t\t\ty = 0.5\n\t\t\tif game.headers['Result']=='1-0':\n\t\t\t\ty = 1\n\t\t\telif game.headers['Result'] == '0-1':\n\t\t\t\ty = 0\n\t\t\ty = torch.FloatTensor([y])\n\t\t\tboard = game.board()\n\t\t\tfor move in game.mainline_moves():\n\t\t\t\tboard.push(move)\n\n\t\t\t\ty_pred = model(conversions.board_to_onehot(board))\n\t\t\t\tloss = loss_fn(y_pred, y)\n\t\t\t\tmodel.zero_grad()\n\t\t\t\tloss.backward()\n\t\t\t\t\n\t\t\t\twith torch.no_grad():\n\t\t\t\t\tfor param in model.parameters():\n\t\t\t\t\t\tparam -= learning_rate * param.grad\n\t\t\tgame = chess.pgn.read_game(pgn)\n\t\tif num>=MAXGAMES:\n\t\t\tbreak;\n\t\t\t\nprint(\"Saving model...\")\ntorch.save(model.state_dict(), SAVE_LOCATION)\nprint(\"Model saved at \" + SAVE_LOCATION)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"493553265","text":"import tensorflow as tf\nimport time\n\nfrom .transformer_core import *\n\n\ntrain_step_signature = [\n tf.TensorSpec(shape=(None, None, None), dtype=tf.float64),\n]\n\n\nclass DecoderLayer(tf.keras.layers.Layer):\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(DecoderLayer, self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads)\n\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n\n def call(self, x, training, \n look_ahead_mask, padding_mask):\n # enc_output.shape == (batch_size, input_seq_len, d_model)\n\n attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n\n\n ffn_output = self.ffn(out1) # (batch_size, target_seq_len, d_model)\n ffn_output = self.dropout3(ffn_output, training=training)\n out3 = self.layernorm3(ffn_output + out1) # (batch_size, target_seq_len, d_model)\n\n return out3, attn_weights_block1\n\n \nclass Decoder(tf.keras.layers.Layer):\n def __init__(self, num_layers, d_model, num_heads, dff, \n maximum_position_encoding, inp_dim,\n rate=0.1):\n super(Decoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n \n \n self.input_layer = tf.keras.Sequential([\n tf.keras.layers.Input((None, None, inp_dim)),\n tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)\n tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)\n ])\n\n\n self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)\n\n self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate) \n for _ in range(num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n \n\n def call(self, x, training, \n look_ahead_mask, padding_mask):\n\n x = self.input_layer(x)\n\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n\n\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x, block1 = self.dec_layers[i](x, training, look_ahead_mask, padding_mask)\n\n attention_weights['decoder_layer{}_block1'.format(i+1)] = block1\n \n \n # x.shape == (batch_size, target_seq_len, d_model)\n return x, attention_weights\n\n\n\n\n\n\n\nclass Transformer(tf.keras.Model):\n \n \n def __init__(self, num_layers_enc, num_layers_dec, d_model, num_heads, dff,\n maximum_position_encoding, net_info, inp_dim, final_dim, config,\n rate=0.1):\n\n super(Transformer, self).__init__()\n \n self.decoder = Decoder(num_layers_dec, d_model, num_heads, dff, maximum_position_encoding, inp_dim,\n rate)\n\n self.final_layer = tf.keras.layers.Dense(final_dim, activation=None)\n\n \n self.pre_date_order = config[\"PRE_DATE_ORDER\"]\n self.date_fields = config[\"DATE_ORDER\"]\n self.post_date_order = config[\"POST_DATE_ORDER\"]\n \n self.FIELD_STARTS = config[\"FIELD_STARTS\"]\n self.FIELD_DIMS = config[\"FIELD_DIMS\"]\n \n \n for name, dim in net_info:\n acti = config[\"ACTIVATIONS\"].get(name, None)\n self.__setattr__(name, tf.keras.layers.Dense(dim, activation=acti))\n \n \n self.train_loss = tf.keras.metrics.Mean(name='train_loss')\n self.results = dict([(x, []) for x in [\"loss\", \"val_loss\", \"val_loss_full\",\"parts\"]])\n\n \n \n def call(self, tar, training,\n look_ahead_mask, dec_padding_mask):\n\n\n tar_inp = tar[:, :-1] # predict next from this\n tar_out = tar[:, 1:] \n \n# print(f\"tar shape {tar.shape}\", f\"tar_inp shape {tar_inp.shape}\", f\"tar_out shape {tar_out.shape}\")\n\n # dec_output.shape == (batch_size, tar_seq_len, d_model)\n dec_output, attention_weights = self.decoder(\n tar_inp, training, look_ahead_mask, dec_padding_mask)\n\n# print(f\"dec_output shape {dec_output.shape}\")\n# print(f\"final_output shape {final_output.shape}\")\n \n final_output = self.final_layer(dec_output)\n preds = {}\n \n# print(\"Final output shape start\", final_output.shape)\n for net_name in self.pre_date_order:\n# print(\"Running net\", net_name)\n pred = self.__getattribute__(net_name)(final_output)\n# print(\"pred shape\", pred.shape)\n preds[net_name] = pred\n \n st = self.FIELD_STARTS[net_name]\n end = st + self.FIELD_DIMS[net_name]\n to_add = tar_out[:, :, st: end]\n# print(\"Start and end\", st, end)\n \n final_output = tf.concat([final_output, to_add], axis=-1)\n# print(\"Final output shape after\",net_name, \"is\", final_output.shape, \"\\n\")\n# \n \n \n # predict all date parts indep of each other\n date_info = []\n for net_name in self.date_fields:\n# print(\"Running net\", net_name)\n pred = self.__getattribute__(net_name)(final_output)\n# print(\"pred shape\", pred.shape)\n preds[net_name] = pred\n \n st = self.FIELD_STARTS[net_name]\n end = st + self.FIELD_DIMS[net_name]\n to_add = tar_out[:, :, st: end]\n# print(\"Start and end\", st, end)\n \n date_info.append(to_add)\n \n final_output = tf.concat([final_output] + date_info, axis=-1)\n# print(\"Final output shape after date is\", final_output.shape, \"\\n**\\n\")\n \n \n for net_name in self.post_date_order:\n# print(\"Running net\", net_name)\n pred = self.__getattribute__(net_name)(final_output)\n# print(\"pred shape\", pred.shape)\n preds[net_name] = pred\n \n st = self.FIELD_STARTS[net_name]\n end = st + self.FIELD_DIMS[net_name]\n to_add = tar_out[:, :, st: end]\n# print(\"Start and end\", st, end)\n# print(\"to add shape\", to_add.shape)\n \n final_output = tf.concat([final_output, to_add], axis=-1)\n# print(\"Final output shape after\",net_name, \"is\", final_output.shape)\n\n# print(\"\\n\"*4)\n# \n return preds, attention_weights\n\n\n\n\n# @tf.function(input_signature=train_step_signature)\n def train_step(self, inp, tar):\n\n\n combined_mask, dec_padding_mask = create_masks(tar)\n\n with tf.GradientTape() as tape:\n predictions, _ = self(inp, \n True, \n combined_mask, \n dec_padding_mask)\n \n loss, *_ = self.loss_function(tar, predictions)\n\n\n gradients = tape.gradient(loss, self.trainable_variables) \n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n self.train_loss(loss)\n\n\n\n def val_step(self, inp, tar):\n\n combined_mask, dec_padding_mask = create_masks(tar)\n\n predictions, _ = self(inp, \n False, \n combined_mask, \n dec_padding_mask)\n \n return self.loss_function(tar, predictions)\n\n\n\n\n def fit(self, train_batches, x_cv, y_cv, epochs, early_stop=2, print_every=50, ckpt_every=2, mid_epoch_updates=None):\n warned_acc = False\n \n if mid_epoch_updates:\n batch_per_update = len(train_batches)// mid_epoch_updates\n\n for epoch in range(epochs):\n start = time.time()\n\n self.train_loss.reset_states()\n\n for (batch_no, (inp, tar)) in enumerate(train_batches):\n self.train_step(inp, tar)\n \n \n if batch_no % print_every == 0:\n print(f'Epoch {epoch + 1} Batch {batch_no} Loss {self.train_loss.result():.4f}')\n \n \n if mid_epoch_updates:\n if batch_no % batch_per_update == 0:\n v_loss, *vl_parts = self.val_step(x_cv, y_cv)\n if len(vl_parts) == 1: \n vl_parts = vl_parts[0]\n \n \n self.results[\"loss\"].append(self.train_loss.result().numpy())\n self.results[\"val_loss\"].append(v_loss)\n self.results[\"parts\"].append(vl_parts)\n\n try:\n acc_res = self.acc_function()\n\n acc_list = self.results.get(\"val_acc\", [])\n acc_list.append(acc_res)\n self.results[\"val_acc\"] = acc_list\n except Exception as e:\n if not warned_acc:\n warned_acc = True\n print(\"Not recording acc:\", e)\n\n\n\n\n print(f'Epoch {epoch + 1} Loss {self.train_loss.result():.4f}')\n\n v_loss, *vl_parts = self.val_step(x_cv, y_cv)\n if len(vl_parts) == 1: \n vl_parts = vl_parts[0]\n \n print(f\"** on validation data loss is {v_loss:.4f}\")\n \n# dict(zip([\"full\"] + DATA_KEY_ORDER, [full.numpy()] + [x.numpy() for x in parts])) \n\n self.results[\"loss\"].append(self.train_loss.result().numpy())\n self.results[\"val_loss\"].append(v_loss)\n self.results[\"parts\"].append(vl_parts)\n \n try:\n acc_res = self.acc_function()\n \n acc_list = self.results.get(\"val_acc\", [])\n acc_list.append(acc_res)\n self.results[\"val_acc\"] = acc_list\n print(f\"** on validation data acc is \\n{acc_res}\")\n except Exception as e:\n if not warned_acc:\n warned_acc = True\n print(\"Not recording acc:\", e)\n \n\n \n\n print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\\n')\n\n if min(self.results[\"val_loss\"] ) < min(self.results[\"val_loss\"][-early_stop:] ):\n print(f\"Stopping early, last {early_stop} val losses are: {self.results['val_loss'][-early_stop:]} \\\n \\nBest was {min(self.results['val_loss'] ):.3f}\\n\\n\")\n break\n \n \n if (epoch + 1) % ckpt_every == 0:\n ckpt_save_path = self.ckpt_manager.save()\n print(f'Saving checkpoint for epoch {epoch+1} at {ckpt_save_path}')\n\n\n","sub_path":"my_lib/BanksformerGen.py","file_name":"BanksformerGen.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"287280146","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 26 23:57:33 2021\n\n@author: TARUN\n\"\"\"\nimport streamlit as st\nimport nsepy\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics \n\nst.title(\"Stock Price Predictor\")\nst.markdown(\"
\",unsafe_allow_html=True)\nsname=st.sidebar.text_input('Enter the stock symbol')\nst.write (sname)\ndur= st.sidebar.slider('No. of Days of historical data to be used', min_value=1, max_value=700, value=300, step=1)\nalgo_name=st.sidebar.selectbox(\"Select Algorithm\",(\"Random Forest\",\"Linear Regression\",\"SVM\",\"ARIMA\"))\n\n\ndef gethdata(stockname,d):\n today=datetime.date.today()\n duration=d\n duration2=10\n start=today+datetime.timedelta(-duration)\n end1=today\n stockdata=nsepy.get_history(symbol=stockname,start=start,end=end1)\n return stockdata\nrf=gethdata(sname,dur)\nst.write(rf)\nst.set_option('deprecation.showPyplotGlobalUse', False)\ngv=st.multiselect(\"Select the feature to be represented in graph\",(\"Prev Close\",\"High\",\"Deliverable Volume\",'VWAP','Low','Trades','Open','Trades','Turnover'),[\"High\"])\nif not gv:\n st.error(\"Please select at least one feature.\")\na=[]\ni=0\nt=rf.index.values\nfor dt in t:\n a.append(dt.strftime(\"%m/%d/%Y\"))\nfig, ax = plt.subplots(figsize=(40, 20))\nxd = [datetime.datetime.strptime(d,\"%m/%d/%Y\").date() for d in a]\nax = plt.axes()\nax.set_facecolor(\"#1b1e38\")\nax.plot(xd, rf[gv],'-o',label=gv)\nplt.xlabel('Dates')\nax.grid()\nplt.legend()\nst.pyplot()\n\nparams=dict()\n \n \n \n\n\n\n# PREDICTION\nif algo_name==\"Random Forest\":\n n_estimator= st.sidebar.slider(\"n_estimators\",min_value=1,max_value=100,step=1)\n params[n_estimator]=n_estimator\n ndh=rf[['High']]\n v=ndh.values\n v=np.append(v, [0])\n v=np.delete(v,0)\n ndh2=rf[['Low']]\n v2=ndh2.values\n v2=np.append(v2, [0])\n v2=np.delete(v2,0)\n rf['ndhigh']=v\n rf['ndlow']=v2 \n rf.reset_index(inplace = True)\n fs=st.multiselect(\"Select the features to be used as input\",(\"Prev Close\",\"High\",\"Last\",\"Close\",\"Deliverable Volume\",'Volume','%Deliverble','VWAP','Low','Open','Trades','Turnover'),['High'])\n X= rf[fs] \n Y=rf['ndhigh']\n X2= rf[fs] \n Y2=rf['ndlow']\n Xtestn=X.iloc[-1:]\n Xtestnl=X2.iloc[-1:]\n X.drop(index=X.index[-1], axis=0, inplace=True)\n Y.drop(index=Y.index[-1], axis=0, inplace=True)\n X2.drop(index=X2.index[-1], axis=0, inplace=True)\n Y2.drop(index=Y2.index[-1], axis=0, inplace=True)\n tst_sze=st.slider(\"Testing data size\",min_value=0.1,max_value=1.0,step=0.1)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = tst_sze, random_state=1234)\n X2_train, X2_test, Y2_train, Y2_test = train_test_split(X2, Y2, test_size =tst_sze, random_state=1234)\n regressor = RandomForestRegressor(n_estimators= params[n_estimator])\n regressor2 = RandomForestRegressor(n_estimators= params[n_estimator])\n regressor.fit(X_train,Y_train)\n regressor2.fit(X2_train,Y2_train)\n test_data_prediction = regressor.predict(X_test)\n test_data_prediction2 = regressor2.predict(X2_test)\n error_score = metrics.r2_score(Y_test, test_data_prediction)\n error_score2 = metrics.r2_score(Y2_test, test_data_prediction2)\n st.write(\"R squared error for HIGH : \", error_score)\n st.write(\"R squared error for LOW : \", error_score2)\n \n # Plotting prediction_graph\n Y_test = list(Y_test)\n Y2_test = list(Y2_test)\n plt.subplots(figsize=(20, 10)) \n ax = plt.axes()\n ax.set_facecolor(\"#1b1e38\")\n \n plt.plot(Y_test,'-o', color='blue', label = 'Actual high Value')\n plt.plot(test_data_prediction,'-o', color='red', label='Predicted high Value')\n plt.plot(Y2_test,'-x', color='green', label = 'Actual low Value')\n plt.plot(test_data_prediction2,'-x', color='orange', label='Predicted low Value')\n plt.title('Actual Price vs Predicted Price')\n plt.xlabel('Number of values')\n \n \n plt.ylabel('HIGH & LOW Price')\n ax.grid()\n plt.legend()\n st.pyplot()\n test_data_predictionmon = regressor.predict(Xtestn)\n test_data_predictionlow = regressor2.predict(Xtestnl)\n st.write(\"Tomorrow's PREDICTED HIGH : \",test_data_predictionmon[0])\n st.write(\"Tomorrow's PREDICTED LOW : \",test_data_predictionlow[0])\n\nelif algo_name==\"Linear Regression\":\n today=datetime.date.today() \n duration2= st.sidebar.slider('No. of Days of testing data to be used:', min_value=1, max_value=700, value=50, step=1)\n start=today+datetime.timedelta(-dur)\n end1=today+datetime.timedelta(-duration2)\n lstockdata=nsepy.get_history(symbol=sname,start=start,end=end1) \n lrf=lstockdata\n ndh=lrf[['High']]\n v=ndh.values\n v=np.append(v, [0])\n v=np.delete(v,0)\n ndh2=lrf[['Low']]\n v2=ndh2.values\n v2=np.append(v2, [0])\n v2=np.delete(v2,0)\n lrf['ndhigh']=v\n lrf['ndlow']=v2 \n lrf.reset_index(inplace = True)\n fs=st.multiselect(\"Select the features to be used as input\",(\"Prev Close\",\"High\",\"Last\",\"Close\",\"Deliverable Volume\",'Volume','%Deliverble','VWAP','Low','Open','Trades','Turnover'),['High'])\n X= lrf[fs] \n Y=lrf['ndhigh']\n X2= lrf[fs] \n Y2=lrf['ndlow']\n Xtestn=X.iloc[-1:]\n Xtestnl=X2.iloc[-1:]\n X.drop(index=X.index[-1], axis=0, inplace=True)\n Y.drop(index=Y.index[-1], axis=0, inplace=True)\n X2.drop(index=X2.index[-1], axis=0, inplace=True)\n Y2.drop(index=Y2.index[-1], axis=0, inplace=True)\n X_train=X\n Y_train=Y\n X2_train=X2\n Y2_train=Y2 \n lr = LinearRegression()\n z=lr.fit(X_train,Y_train)\n lr2 = LinearRegression()\n z2=lr2.fit(X2_train,Y2_train)\n today=datetime.date.today()\n \n start=today+datetime.timedelta(-duration2) \n stockdatalt=nsepy.get_history(symbol=sname,start=start,end=today)\n df1=stockdatalt\n test=df1[fs] \n ndhi=df1[['High']]\n v11=ndhi.values\n v11=np.append(v11, [0])\n v11=np.delete(v11,0)\n ndhi2=df1[['Low']]\n v22=ndhi2.values\n v22=np.append(v22, [0])\n v22=np.delete(v22,0)\n test['ND high']=v11\n test['ND LOW']=v22\n df1.reset_index(inplace = True)\n testxh= df1[fs]\n lphigh=z.predict(testxh)\n testxl= df1[fs]\n lplow=z2.predict(testxl)\n Xtestn=X.iloc[-1:]\n test['high prediction']=lphigh\n test['low prediction']=lplow\n #prediction df\n Xtesthl=test.iloc[-1:] \n test.drop(index=test.index[-1],axis=0, inplace=True)\n error_score = metrics.r2_score(test['ND high'], test['high prediction'])\n error_score2 = metrics.r2_score( test['ND LOW'], test['low prediction'])\n st.write(\"R squared error for HIGH : \", error_score)\n st.write(\"R squared error for LOW : \", error_score2)\n # plotting data\n a=[]\n i=0\n t=test.index.values\n for dt in t:\n a.append(dt.strftime(\"%m/%d/%Y\"))\n fig, ax = plt.subplots(figsize=(20, 10))\n ax = plt.axes()\n ax.set_facecolor(\"#1b1e38\")\n xd = [datetime.datetime.strptime(d,\"%m/%d/%Y\").date() for d in a]\n ax.plot(xd, test['ND high'],'-o',label = \"Actual next day High\", color = 'red')\n ax.plot(xd, test['high prediction'],'-o', label = \"predicted high by the model\", color = 'orange')\n ax.plot(xd, test['ND LOW'],'-o',label = \"Actual next day low\", color = 'yellow')\n ax.plot(xd, test['low prediction'],'-o', label = \"predicted low by the model\", color = 'blue')\n plt.title('Actual Price vs Predicted Price')\n ax.grid()\n plt.legend()\n st.pyplot()\n st.write(\"Tomorrow's PREDICTED HIGH : \",Xtesthl['high prediction'].reset_index(drop=True).to_string(index=False))\n st.write(\"Tomorrow's PREDICTED LOW : \",Xtesthl['low prediction'].reset_index(drop=True).to_string(index=False))\nelse:\n st.error(\"ALGORITHM not added\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"509848906","text":"import torch\nimport numpy as np\nimport pickle\nimport os\nimport torchvision\ncpath = os.path.dirname(__file__)\n\n\nNUM_USER = 100\nSAVE = True\nDATASET_FILE = os.path.join(cpath, 'data')\nIMAGE_DATA = not False\nnp.random.seed(6)\n\n\nclass ImageDataset(object):\n def __init__(self, images, labels, normalize=False):\n if isinstance(images, torch.Tensor):\n if not IMAGE_DATA:\n self.data = images.view(-1, 784).numpy()/255\n else:\n self.data = images.numpy()\n else:\n self.data = images\n if normalize and not IMAGE_DATA:\n mu = np.mean(self.data.astype(np.float32), 0)\n sigma = np.std(self.data.astype(np.float32), 0)\n self.data = (self.data.astype(np.float32) - mu) / (sigma + 0.001)\n if not isinstance(labels, np.ndarray):\n labels = np.array(labels)\n self.target = labels\n\n def __len__(self):\n return len(self.target)\n\n\ndef data_split(data, num_split):\n delta, r = len(data) // num_split, len(data) % num_split\n data_lst = []\n i, used_r = 0, 0\n while i < len(data):\n if used_r < r:\n data_lst.append(data[i:i+delta+1])\n i += delta + 1\n used_r += 1\n else:\n data_lst.append(data[i:i+delta])\n i += delta\n return data_lst\n\n\ndef choose_two_digit(split_data_lst):\n available_digit = []\n for i, digit in enumerate(split_data_lst):\n if len(digit) > 0:\n available_digit.append(i)\n try:\n lst = np.random.choice(available_digit, 2, replace=False).tolist()\n except:\n print(available_digit)\n return lst\n\n\ndef main():\n # Get MNIST data, normalize, and divide by level\n print('>>> Get MNIST data.')\n trainset = torchvision.datasets.MNIST(DATASET_FILE, download=True, train=True)\n testset = torchvision.datasets.MNIST(DATASET_FILE, download=True, train=False)\n\n train_mnist = ImageDataset(trainset.train_data, trainset.train_labels)\n test_mnist = ImageDataset(testset.test_data, testset.test_labels)\n\n mnist_traindata = []\n for number in range(10):\n idx = train_mnist.target == number\n mnist_traindata.append(train_mnist.data[idx])\n split_mnist_traindata = []\n for digit in mnist_traindata:\n split_mnist_traindata.append(data_split(digit, 20))\n\n mnist_testdata = []\n for number in range(10):\n idx = test_mnist.target == number\n mnist_testdata.append(test_mnist.data[idx])\n split_mnist_testdata = []\n for digit in mnist_testdata:\n split_mnist_testdata.append(data_split(digit, 20))\n\n data_distribution = np.array([len(v) for v in mnist_traindata])\n data_distribution = np.round(data_distribution / data_distribution.sum(), 3)\n print('>>> Train Number distribution: {}'.format(data_distribution.tolist()))\n\n digit_count = np.array([len(v) for v in split_mnist_traindata])\n print('>>> Each digit in train data is split into: {}'.format(digit_count.tolist()))\n\n digit_count = np.array([len(v) for v in split_mnist_testdata])\n print('>>> Each digit in test data is split into: {}'.format(digit_count.tolist()))\n\n # Assign train samples to each user\n train_X = [[] for _ in range(NUM_USER)]\n train_y = [[] for _ in range(NUM_USER)]\n test_X = [[] for _ in range(NUM_USER)]\n test_y = [[] for _ in range(NUM_USER)]\n\n print(\">>> Data is non-i.i.d. distributed\")\n print(\">>> Data is balanced\")\n\n for user in range(NUM_USER):\n print(user, np.array([len(v) for v in split_mnist_traindata]))\n\n for d in choose_two_digit(split_mnist_traindata):\n l = len(split_mnist_traindata[d][-1])\n train_X[user] += split_mnist_traindata[d].pop().tolist()\n train_y[user] += (d * np.ones(l)).tolist()\n\n l = len(split_mnist_testdata[d][-1])\n test_X[user] += split_mnist_testdata[d].pop().tolist()\n test_y[user] += (d * np.ones(l)).tolist()\n\n # Setup directory for train/test data\n print('>>> Set data path for MNIST.')\n image = 1 if IMAGE_DATA else 0\n train_path = '{}/data/train/all_data_{}_random_niid.pkl'.format(cpath, image)\n test_path = '{}/data/test/all_data_{}_random_niid.pkl'.format(cpath, image)\n\n dir_path = os.path.dirname(train_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n dir_path = os.path.dirname(test_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # Create data structure\n train_data = {'users': [], 'user_data': {}, 'num_samples': []}\n test_data = {'users': [], 'user_data': {}, 'num_samples': []}\n\n # Setup 1000 users\n for i in range(NUM_USER):\n uname = i\n\n train_data['users'].append(uname)\n train_data['user_data'][uname] = {'x': train_X[i], 'y': train_y[i]}\n train_data['num_samples'].append(len(train_X[i]))\n\n test_data['users'].append(uname)\n test_data['user_data'][uname] = {'x': test_X[i], 'y': test_y[i]}\n test_data['num_samples'].append(len(test_X[i]))\n\n print('>>> User data distribution: {}'.format(train_data['num_samples']))\n print('>>> Total training size: {}'.format(sum(train_data['num_samples'])))\n print('>>> Total testing size: {}'.format(sum(test_data['num_samples'])))\n\n # Save user data\n if SAVE:\n with open(train_path, 'wb') as outfile:\n pickle.dump(train_data, outfile)\n with open(test_path, 'wb') as outfile:\n pickle.dump(test_data, outfile)\n\n print('>>> Save data.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/mnist/generate_random_niid.py","file_name":"generate_random_niid.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"501985880","text":"\"\"\" Non Linear Regression Analysis - China GDP Dataset \"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython.display import display\n\ndf = pd.read_csv(\"china_gdp.csv\")\n#display(df.head(10))\n#Intial Plotting\nplt.figure(figsize=(8,5))\nx_data, y_data = (df[\"Year\"].values, df[\"Value\"].values)\nplt.plot(x_data, y_data, 'ro')\nplt.ylabel('GDP')\nplt.xlabel('Year')\n#plt.show() #Looks roughly logarithmic\n\ndef sigmoid(x, Beta_1, Beta_2):\n \"\"\" Log function being fitted against chinas GDP \"\"\"\n y = 1 / (1 + np.exp(-Beta_1*(x-Beta_2)))\n return y\n\nbeta_1 = 0.10\nbeta_2 = 1990.0\nY_pred = sigmoid(x_data, beta_1 , beta_2)\nplt.plot(x_data, Y_pred*15000000000000.)\nplt.plot(x_data, y_data, 'ro')\n\n#Normalising x and y values\nxdata =x_data/max(x_data)\nydata =y_data/max(y_data)\n\n#Using the function curve_fit we can find the optimum values of beta_1 and beta_2 through using a non-linear least squares method. popt are the optimised parameters\n\nfrom scipy.optimize import curve_fit\npopt, pcov = curve_fit(sigmoid, xdata, ydata)\nprint(\" beta_1 = %f, beta_2 = %f\" % (popt[0], popt[1]))\n\n#plotting\nx = np.linspace(1960, 2015, 55)\nx = x/max(x)\nplt.figure(figsize=(8,5))\ny = sigmoid(x, *popt)\nplt.plot(xdata, ydata, 'ro', label='data')\nplt.plot(x,y, linewidth=3.0, label='fit')\nplt.legend(loc='best')\nplt.ylabel('GDP')\nplt.xlabel('Year')\nplt.show()\n\nmsk = np.random.rand(len(df)) < 0.8\ntrain_x = xdata[msk]\ntest_x = xdata[~msk]\ntrain_y = ydata[msk]\ntest_y = ydata[~msk]\n\n# build the model using train set\npopt, pcov = curve_fit(sigmoid, train_x, train_y)\n\n# predict using test set\ny_hat = sigmoid(test_x, *popt)\n\n# evaluation\nprint(\"Mean absolute error: %.2f\" % np.mean(np.absolute(y_hat - test_y)))\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((y_hat - test_y) ** 2))\nfrom sklearn.metrics import r2_score\nprint(\"R2-score: %.2f\" % r2_score(y_hat , test_y) )\n","sub_path":"NonLinReg.py","file_name":"NonLinReg.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627460528","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport datetime\nimport re\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0])))\nsys.path.insert(\n 0,\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"..\",\n \"pyquickhelper\",\n \"src\")))\nsys.path.insert(\n 0,\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"..\",\n \"pyensae\",\n \"src\")))\nsys.path.insert(\n 0,\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"..\",\n \"pymmails\",\n \"src\")))\nsys.path.insert(\n 0,\n os.path.abspath(\n os.path.join(\n os.path.split(__file__)[0],\n \"..\",\n \"..\",\n \"..\",\n \"..\",\n \"pyrsslocal\",\n \"src\")))\n\nfrom pyquickhelper.helpgen.default_conf import set_sphinx_variables, get_default_stylesheet\n\n\nset_sphinx_variables(__file__, \"Python dans tous ses états\", \"Xavier Dupré\",\n 2018, \"sphinx_rtd_theme\", None,\n locals(), add_extensions=None,\n github_repo=\"https://github.com/sdpython/ensae_teaching_cs.git\",\n extlinks=dict(\n issue=('https://github.com/sdpython/ensae_teaching_cs/issues/%s', 'issue')),\n book=True, nblayout='table')\n\n# do not put it back otherwise sphinx import matplotlib before setting up its backend\n# for the sphinx command .. plot::\n# import pyquickhelper\n# import pyensae\n# import pymmails\n# import pyrsslocal\n\ncustom_preamble = \"\"\"\\n\\\\newcommand{\\\\girafedec}[3]{ \\\\begin{array}{ccccc} #1 &=& #2 &+& #3 \\\\\\\\ a' &=& a &-& o \\\\end{array}}\n\\\\newcommand{\\\\vecteur}[2]{\\\\pa{#1,\\\\dots,#2}}\n\\\\newcommand{\\\\N}[0]{\\\\mathbb{N}}\n\\\\newcommand{\\\\indicatrice}[1]{\\\\mathbf{1\\\\!\\\\!1}_{\\\\acc{#1}}}\n\\\\usepackage[all]{xy}\n\\\\newcommand{\\\\infegal}[0]{\\\\leqslant}\n\\\\newcommand{\\\\supegal}[0]{\\\\geqslant}\n\\\\newcommand{\\\\ensemble}[2]{\\\\acc{#1,\\\\dots,#2}}\n\\\\newcommand{\\\\fleche}[1]{\\\\overrightarrow{ #1 }}\n\\\\newcommand{\\\\intervalle}[2]{\\\\left\\\\{#1,\\\\cdots,#2\\\\right\\\\}}\n\\\\newcommand{\\\\loinormale}[2]{{\\\\cal N}\\\\pa{#1,#2}}\n\\\\newcommand{\\\\independant}[0]{\\\\;\\\\makebox[3ex]{\\\\makebox[0ex]{\\\\rule[-0.2ex]{3ex}{.1ex}}\\\\!\\\\!\\\\!\\\\!\\\\makebox[.5ex][l]{\\\\rule[-.2ex]{.1ex}{2ex}}\\\\makebox[.5ex][l]{\\\\rule[-.2ex]{.1ex}{2ex}}} \\\\,\\\\,}\n\\\\newcommand{\\\\esp}{\\\\mathbb{E}}\n\\\\newcommand{\\\\pr}[1]{\\\\mathbb{P}\\\\pa{#1}}\n\\\\newcommand{\\\\loi}[0]{{\\\\cal L}}\n\\\\newcommand{\\\\vecteurno}[2]{#1,\\\\dots,#2}\n\\\\newcommand{\\\\norm}[1]{\\\\left\\\\Vert#1\\\\right\\\\Vert}\n\\\\newcommand{\\\\dans}[0]{\\\\rightarrow}\n\\\\newcommand{\\\\partialfrac}[2]{\\\\frac{\\\\partial #1}{\\\\partial #2}}\n\\\\newcommand{\\\\partialdfrac}[2]{\\\\dfrac{\\\\partial #1}{\\\\partial #2}}\n\\\\newcommand{\\\\loimultinomiale}[1]{{\\\\cal M}\\\\pa{#1}}\n\\\\newcommand{\\\\trace}[1]{tr\\\\pa{#1}}\n\\\\newcommand{\\\\abs}[1]{\\\\left|#1\\\\right|}\n\"\"\"\n#\\\\usepackage{eepic}\nimgmath_latex_preamble += custom_preamble\nlatex_elements['preamble'] += custom_preamble\n\nproject_var_name_t = \"ENSAE
Xavier Dupré\"\nproject_var_name = \"ensae_teaching_cs\"\nproject_var_name_1l = project_var_name_t.replace(\"
\", \" - \")\nhtml_search_language = \"fr\"\nhtml_split_index = True\n\ntexinfo_documents = [\n ('index',\n '%s' % project_var_name,\n '%s' % project_var_name_t,\n author,\n '%s' % project_var_name,\n 'ENSAE, contenu des enseignements',\n 'teachings'),\n]\n\nlanguage = \"fr\"\n\nepkg_dictionary[\"Anaconda\"] = 'https://www.continuum.io/downloads'\nepkg_dictionary[\"anaconda\"] = 'https://www.continuum.io/downloads'\nepkg_dictionary[\"Arduino\"] = 'https://www.arduino.cc/'\nepkg_dictionary[\"AUC\"] = 'http://www.xavierdupre.fr/app/mlstatpy/helpsphinx/c_metric/roc.html#aire-sous-la-courbe'\nepkg_dictionary[\"C++\"] = 'https://fr.wikipedia.org/wiki/C%2B%2B'\nepkg_dictionary[\"CNTK\"] = 'https://www.microsoft.com/en-us/research/product/cognitive-toolkit/'\nepkg_dictionary[\"dask\"] = 'http://dask.pydata.org/en/latest/'\nepkg_dictionary[\"dlib\"] = 'http://dlib.net/'\nepkg_dictionary[\"ENSAE\"] = 'http://www.ensae.fr/'\nepkg_dictionary[\"falcon\"] = 'https://falconframework.org/'\nepkg_dictionary[\"FastText\"] = 'https://fasttext.cc/'\nepkg_dictionary[\"Flask\"] = 'http://flask.pocoo.org/'\nepkg_dictionary[\"keyring\"] = 'https://github.com/jaraco/keyring'\nepkg_dictionary[\"lightmlboard\"] = 'http://www.xavierdupre.fr/app/lightmlboard/helpsphinx/index.html'\nepkg_dictionary[\"lightmlrestapi\"] = 'http://www.xavierdupre.fr/app/lightmlrestapi/helpsphinx/index.html'\nepkg_dictionary[\"Linux\"] = 'https://fr.wikipedia.org/wiki/Linux'\nepkg_dictionary[\"linux\"] = 'https://fr.wikipedia.org/wiki/Linux'\nepkg_dictionary[\"mlinsights\"] = 'http://www.xavierdupre.fr/app/mlinsights/helpsphinx/index.html'\nepkg_dictionary[\"mlprodict\"] = 'http://www.xavierdupre.fr/app/mlprodict/helpsphinx/index.html'\nepkg_dictionary[\"matrice de confusion\"] = \"https://fr.wikipedia.org/wiki/Matrice_de_confusion\"\nepkg_dictionary[\"miniconda\"] = 'https://conda.io/miniconda.html'\nepkg_dictionary[\"notebook\"] = 'http://jupyter.org/'\nepkg_dictionary[\"Notepad++\"] = \"https://notepad-plus-plus.org/\"\nepkg_dictionary[\"open source\"] = 'http://fr.wikipedia.org/wiki/Open_source'\nepkg_dictionary[\"OS/X\"] = 'https://fr.wikipedia.org/wiki/MacOS'\nepkg_dictionary[\"pickle\"] = 'https://docs.python.org/3/library/pickle.html'\nepkg_dictionary[\"PTVS\"] = 'https://microsoft.github.io/PTVS/'\nepkg_dictionary[\"PyCharm\"] = 'https://www.jetbrains.com/pycharm/'\nepkg_dictionary[\"pyenbc\"] = 'http://www.xavierdupre.fr/app/pyenbc/helpsphinx/'\nepkg_dictionary[\"pyensae\"] = 'http://www.xavierdupre.fr/app/pyensae/helpsphinx/'\nepkg_dictionary[\"pytorch\"] = 'http://pytorch.org/'\nepkg_dictionary[\"R\"] = 'https://www.r-project.org/'\nepkg_dictionary[\"RaspberryPI\"] = 'https://www.raspberrypi.org/'\nepkg_dictionary[\"requests\"] = 'http://docs.python-requests.org/en/latest/'\nepkg_dictionary['REST API'] = \"https://en.wikipedia.org/wiki/Representational_state_transfer\"\nepkg_dictionary[\"ROC\"] = 'http://www.xavierdupre.fr/app/mlstatpy/helpsphinx/c_metric/roc.html'\nepkg_dictionary[\"SciTE\"] = 'http://www.scintilla.org/SciTE.html'\nepkg_dictionary[\"Spyder\"] = 'https://github.com/spyder-ide/spyder'\nepkg_dictionary[\"teachpyx\"] = 'http://www.xavierdupre.fr/app/teachpyx/helpsphinx/'\nepkg_dictionary[\"TensorFlow\"] = 'https://www.tensorflow.org/'\nepkg_dictionary[\"theano\"] = 'http://deeplearning.net/software/theano/'\nepkg_dictionary[\"Windows\"] = 'https://fr.wikipedia.org/wiki/Microsoft_Windows'\n","sub_path":"_doc/sphinxdoc/source/conf_base.py","file_name":"conf_base.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"591281498","text":"\"\"\"empty message\n\nRevision ID: 16a715aa8b07\nRevises: 267c5a0d748b\nCreate Date: 2018-11-28 04:17:43.068246\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '16a715aa8b07'\ndown_revision = '267c5a0d748b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('payments', sa.Column('payment_code', sa.String(length=50), nullable=False))\n op.drop_column('payments', 'partner_payment_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('payments', sa.Column('partner_payment_id', sa.INTEGER(), autoincrement=False, nullable=False))\n op.drop_column('payments', 'payment_code')\n # ### end Alembic commands ###\n","sub_path":"hoteld/migrations/versions/16a715aa8b07_.py","file_name":"16a715aa8b07_.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"597367652","text":"import numpy as np\nimport datetime\nimport heapq\n\n\nclass AStarPlanner(object):\n \n def __init__(self, planning_env, visualize):\n self.planning_env = planning_env\n self.visualize = visualize\n self.nodes = dict()\n\n def plotEdge(self, src_id, dst_id):\n src_coord = self.planning_env.discrete_env.NodeIdToConfiguration(src_id)\n dst_coord = self.planning_env.discrete_env.NodeIdToConfiguration(dst_id)\n if self.visualize:\n self.planning_env.PlotEdge(src_coord, dst_coord)\n\n def Plan(self, start_config, goal_config):\n\n # TODO: Here you will implement the AStar planner\n # The return path should be a numpy array\n # of dimension k x n where k is the number of waypoints\n # and n is the dimension of the robots configuration space\n \n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n\n print('A* planning ...')\n print('Start State ...', start_config)\n print('Goal State ...', goal_config)\n\n start_id = self.planning_env.discrete_env.ConfigurationToNodeId(start_config)\n goal_id = self.planning_env.discrete_env.ConfigurationToNodeId(goal_config)\n\n print('Start State ID ...', start_id)\n print('Goal State ID ...', goal_id)\n\n open_set = []\n closed_set = set([])\n node_info = {}\n\n closest_dist2goal = self.planning_env.ComputeDistance(start_id, goal_id)\n closest_node = start_id\n\n open_set.append((self.planning_env.ComputeDistance(start_id, goal_id), start_id))\n node_info[start_id] = (0, 10*self.planning_env.ComputeDistance(start_id, goal_id), None)\n heapq.heapify(open_set)\n start = datetime.datetime.now() \n\n while (len(open_set) > 0):\n\n (t, node_id) = heapq.heappop(open_set)\n if node_id in closed_set:\n continue\n\n dist2goal = self.planning_env.ComputeDistance(node_id, goal_id)\n if dist2goal < closest_dist2goal:\n closest_dist2goal = dist2goal\n closest_node = node_id\n\n if (node_id != start_id):\n closed_set.add(node_id)\n self.plotEdge(node_info[node_id][2], node_id)\n\n if (len(node_info) % 5 ==0):\n print('Closest dist to goal : ', closest_dist2goal)\n\n if (node_id == goal_id):\n print('Goal found')\n break\n\n successors = self.planning_env.GetSuccessors(node_id)\n if len(successors) != 0: \n for succ_id in successors:\n if succ_id not in closed_set:\n if succ_id in node_info: \n if node_info[succ_id][0] > node_info[node_id][0]+1:\n node_info[succ_id] = (node_info[node_id][0]+1, node_info[node_id][1], node_id)\n else:\n node_info[succ_id] = (node_info[node_id][0]+1, 10*self.planning_env.ComputeDistance(succ_id, goal_id), node_id)\n heapq.heappush(open_set, (node_info[succ_id][0] + node_info[succ_id][1], succ_id))\n else:\n print('No successors for ', node_id)\n \n plan = []\n if (goal_id not in node_info):\n print ('Goal not reached ! Cannot plan path')\n else:\n path = [goal_id]\n while path[-1] != start_id:\n path.append(node_info[path[-1]][2])\n \n plan = [self.planning_env.discrete_env.NodeIdToConfiguration(node) for node in path[::-1]]\n elapsed = (datetime.datetime.now() - start).seconds\n print('Plan length :', len(plan))\n print('Nodes visited:', len(node_info))\n print('Elapsed time:', elapsed)\n\n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n [self.planning_env.PlotEdge(plan[i-1], plan[i]) for i in range(1,len(plan))]\n\n return np.array(plan)\n","sub_path":"robot_autonomy/hw3/code/AStarPlanner.py","file_name":"AStarPlanner.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"648893644","text":"# -*- coding: utf-8 -*-\n\n'''\n# 递归反转链表的一部分\n\n反转单链表的迭代实现不是一个困难的事情,但是递归实现就有点难度了,\n如果再加一点难度,让你仅仅反转单链表中的一部分,你是否能**够递归实现**呢?\n\n本文就来由浅入深,step by step 地解决这个问题。如果你还不会递归地反转单链表也没关系,\n**本文会从递归反转整个单链表开始拓展**,只要你明白单链表的结构,相信你能够有所收获。\n\n```java\n// 单链表节点的结构\npublic class ListNode {\n int val;\n ListNode next;\n ListNode(int x) { val = x; }\n}\n```\n\n什么叫反转单链表的一部分呢,就是给你一个索引区间,让你把单链表中这部分元素反转,其他部分不变:\n\n![](../pictures/%E5%8F%8D%E8%BD%AC%E9%93%BE%E8%A1%A8/title.png)\n\n**注意这里的索引是从 1 开始的**。迭代的思路大概是:先用一个 for 循环找到第 `m` 个位置,然后再用一个 for 循环将 `m` 和 `n` 之间的元素反转。但是我们的递归解法不用一个 for 循环,纯递归实现反转。\n\n迭代实现思路看起来虽然简单,但是细节问题很多的,反而不容易写对。相反,递归实现就很简洁优美,下面就由浅入深,先从反转整个单链表说起。\n'''\n\n### 一、递归反转整个链表\n'''\n这个算法可能很多读者都听说过,这里详细介绍一下,先直接看实现代码:\n\n```java\nListNode reverse(ListNode head) {\n if (head.next == null) return head;\n ListNode last = reverse(head.next);\n head.next.next = head;\n head.next = null;\n return last;\n}\n```\n\n看起来是不是感觉不知所云,完全不能理解这样为什么能够反转链表?\n这就对了,这个算法常常拿来显示递归的巧妙和优美,我们下面来详细解释一下这段代码。\n**对于递归算法,最重要的就是明确递归函数的定义**。具体来说,我们的 `reverse` 函数定义是这样的:\n**输入一个节点 `head`,将「以 `head` 为起点」的链表反转,并返回反转之后的头结点**。\n其中有两个地方需要注意:\n\n1、递归函数要有 base case,也就是这句:\n\n```java\nif (head.next == null) return head;\n```\n意思是如果链表只有一个节点的时候反转也是它自己,直接返回即可。\n\n2、当链表递归反转之后,新的头结点是 `last`,而之前的 `head` 变成了最后一个节点,别忘了链表的末尾要指向 null:\n\n```java\nhead.next = null;\n```\n\n\n### 二、反转链表前 N 个节点\n\n这次我们实现一个这样的函数:\n\n```java\n// 将链表的前 n 个节点反转(n <= 链表长度)\nListNode reverseN(ListNode head, int n)\n```\n\n比如说对于下图链表,执行 `reverseN(head, 3)`:\n\n解决思路和反转整个链表差不多,只要稍加修改即可:\n\n```java\nListNode successor = null; // 后驱节点\n\n// 反转以 head 为起点的 n 个节点,返回新的头结点\nListNode reverseN(ListNode head, int n) {\n if (n == 1) { \n // 记录第 n + 1 个节点\n successor = head.next;\n return head;\n }\n // 以 head.next 为起点,需要反转前 n - 1 个节点\n ListNode last = reverseN(head.next, n - 1);\n\n head.next.next = head;\n // 让反转之后的 head 节点和后面的节点连起来\n head.next = successor;\n return last;\n} \n```\n\n具体的区别:\n\n1、base case 变为 `n == 1`,反转一个元素,就是它本身,同时**要记录后驱节点**。\n\n2、刚才我们直接把 `head.next` 设置为 null,因为整个链表反转后原来的 `head` \n变成了整个链表的最后一个节点。但现在 `head` 节点在递归反转之后不一定是最后一个节点了,\n所以要记录后驱 `successor`(第 n + 1 个节点),反转之后将 `head` 连接上。\n\n![](../pictures/%E5%8F%8D%E8%BD%AC%E9%93%BE%E8%A1%A8/7.jpg)\n\nOK,如果这个函数你也能看懂,就离实现「反转一部分链表」不远了。\n\n### 三、反转链表的一部分\n\n现在解决我们最开始提出的问题,给一个索引区间 `[m,n]`(索引从 1 开始),仅仅反转区间中的链表元素。\n\n```java\nListNode reverseBetween(ListNode head, int m, int n)\n```\n\n首先,如果 `m == 1`,就相当于反转链表开头的 `n` 个元素嘛,也就是我们刚才实现的功能:\n\n```java\nListNode reverseBetween(ListNode head, int m, int n) {\n // base case\n if (m == 1) {\n // 相当于反转前 n 个元素\n return reverseN(head, n);\n }\n // ...\n}\n```\n\n如果 `m != 1` 怎么办?如果我们把 `head` 的索引视为 1,那么我们是想从第 `m` 个元素开始反转对吧;如果把 `head.next` 的索引视为 1 呢?那么相对于 `head.next`,反转的区间应该是从第 `m - 1` 个元素开始的;那么对于 `head.next.next` 呢……\n\n区别于迭代思想,这就是递归思想,所以我们可以完成代码:\n\n```java\nListNode reverseBetween(ListNode head, int m, int n) {\n // base case\n if (m == 1) {\n return reverseN(head, n);\n }\n // 前进到反转的起点触发 base case\n head.next = reverseBetween(head.next, m - 1, n - 1);\n return head;\n}\n```\n\n值得一提的是,递归操作链表并不高效。和迭代解法相比,\n虽然时间复杂度都是 O(N),但是迭代解法的空间复杂度是 O(1),而递归解法需要堆栈,空间复杂度是 O(N)。\n'''\n\n\n# python 由于有[:]列表切分和[::-1]列表反转,所以以上函数实现都很简单\n\ndef reverseBetween(list,m,n):\n reverse_part = list[m-1:n]\n return list[:m-1]+reverse_part[::-1]+list[n:]\n\n\ninput = [1,3,5,7,9,12,13,15]\nprint(reverseBetween(input,3,6))","sub_path":"LeetCode_easy/reverse_list.py","file_name":"reverse_list.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"325861050","text":"\"\"\"\n11. Foram anotadas as idades e alturas de 30 alunos. Faça um Programa que determine\n quantos alunos com mais de 13 anos possuem altura inferior à média de altura desses\n alunos.\n\"\"\"\nimport random\nalunos = [[], []]\nsoma = 0\ncont = 0\nfor c in range(5):\n idade = random.randint(6,40)\n altura = round(random.uniform(1,2), 2)\n alunos[0].append(idade)\n alunos[1].append(altura)\nmedia = round(sum(alunos[1]) /len(alunos[1]), 2)\nfor j in range(len(alunos[0])):\n if alunos[0][j] > 13 and alunos[1][j] < media:\n cont += 1\n print(f\"aluno {j+1}: idade {alunos[0][j]}, altura {alunos[1][j]}\")\nprint(\"<>\"*12)\nprint(f\"A media de altura de todos os alunos é {media}\")\nprint(f'A quantidade de alunos acima de 13 anos possuem altura inferior à média {cont}')","sub_path":"Lista04/ex011.py","file_name":"ex011.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"485726714","text":"# -*- coding: utf-8 -*-\n#########################################################################\n#\n# Copyright (C) 2017 OSGeo\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#########################################################################\n\n# Django settings for the GeoNode project.\nimport os\nimport ast\n\ntry:\n from urllib.parse import urlparse, urlunparse\n from urllib.request import urlopen, Request\nexcept ImportError:\n from urllib2 import urlopen, Request\n from urlparse import urlparse, urlunparse\n# Load more settings from a file called local_settings.py if it exists\ntry:\n from ihp.local_settings import *\n# from geonode.local_settings import *\nexcept ImportError:\n from geonode.settings import *\n\n#\n# General Django development settings\n#\nPROJECT_NAME = 'ihp'\n\n# add trailing slash to site url. geoserver url will be relative to this\nif not SITEURL.endswith('/'):\n SITEURL = '{}/'.format(SITEURL)\n\nSITENAME = os.getenv(\"SITENAME\", 'ihp')\n\n# Defines the directory that contains the settings file as the LOCAL_ROOT\n# It is used for relative settings elsewhere.\nLOCAL_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nWSGI_APPLICATION = \"{}.wsgi.application\".format(PROJECT_NAME)\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = os.getenv('LANGUAGE_CODE', \"en\")\n\nINSTALLED_APPS += ('photologue', 'sortedm2m', 'ihp', 'ihp.content', 'ihp.people', 'ihp.survey')\n\n# Location of url mappings\nROOT_URLCONF = os.getenv('ROOT_URLCONF', '{}.urls'.format(PROJECT_NAME))\n\n# Additional directories which hold static files\nSTATICFILES_DIRS = [os.path.join(LOCAL_ROOT, \"static\"), ] + STATICFILES_DIRS\n\n# Location of locale files\nLOCALE_PATHS = (\n os.path.join(LOCAL_ROOT, 'locale'),\n ) + LOCALE_PATHS\n\nTEMPLATES[0]['DIRS'].insert(0, os.path.join(LOCAL_ROOT, \"templates\"))\nloaders = TEMPLATES[0]['OPTIONS'].get('loaders') or ['django.template.loaders.filesystem.Loader','django.template.loaders.app_directories.Loader']\n# loaders.insert(0, 'apptemplates.Loader')\nTEMPLATES[0]['OPTIONS']['loaders'] = loaders\nTEMPLATES[0].pop('APP_DIRS', None)\n\nLANGUAGES = (\n ('en', \"English\"),\n ('fr', \"Français\"),\n)\n\nLICENSES = {\n 'ENABLED': True,\n 'DETAIL': 'never',\n 'METADATA': 'never',\n}\n\nAUTH_USER_MODEL = os.getenv('AUTH_USER_MODEL', 'ihp_people.IHPProfile')\nAUTH_USER_AUTOCOMPLETE = os.getenv('AUTH_USER_AUTOCOMPLETE', 'IHPProfileProfileAutocomplete')\n\n# allow registered users to sign in using their username or email\nACCOUNT_AUTHENTICATION_METHOD = \"username_email\"\n\n# prevent signing up by default\nACCOUNT_OPEN_SIGNUP = False\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_SUBJECT_PREFIX = \"\"\nACCOUNT_EMAIL_VERIFICATION = 'optional'\nACCOUNT_EMAIL_CONFIRMATION_EMAIL = True\nACCOUNT_EMAIL_CONFIRMATION_REQUIRED = True\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\nACCOUNT_APPROVAL_REQUIRED = True\n\nACCOUNT_ADAPTER = 'ihp.content.adapters.UnescoLocalAccountAdapter'\n\nSOCIALACCOUNT_ADAPTER = 'geonode.people.adapters.SocialAccountAdapter'\n\nACCOUNT_FORMS = {\n \"signup\": \"ihp.content.forms.UnescoLocalAccountSignupForm\",\n}\n\nSOCIALACCOUNT_AUTO_SIGNUP = False\nSOCIALACCOUNT_FORMS = {\n \"signup\": \"ihp.content.forms.UnescoSocialAccountSignupForm\",\n}\n#INSTALLED_APPS += (\n# 'allauth.socialaccount.providers.linkedin_oauth2',\n# 'allauth.socialaccount.providers.facebook',\n#)\n\nSOCIALACCOUNT_PROVIDERS = {\n 'linkedin_oauth2': {\n 'SCOPE': [\n 'r_emailaddress',\n 'r_basicprofile',\n ],\n 'PROFILE_FIELDS': [\n 'emailAddress',\n 'firstName',\n 'headline',\n 'id',\n 'industry',\n 'lastName',\n 'pictureUrl',\n 'positions',\n 'publicProfileUrl',\n 'location',\n 'specialties',\n 'summary',\n ]\n },\n 'facebook': {\n 'METHOD': 'oauth2',\n 'SCOPE': [\n 'email',\n 'public_profile',\n ],\n 'FIELDS': [\n 'id',\n 'email',\n 'name',\n 'first_name',\n 'last_name',\n 'verified',\n 'locale',\n 'timezone',\n 'link',\n 'gender',\n ]\n },\n}\n\nSOCIALACCOUNT_PROFILE_EXTRACTORS = {\n \"facebook\": \"geonode.people.profileextractors.FacebookExtractor\",\n \"linkedin_oauth2\": \"ihp.content.profileextractors.LinkedInExtractor\",\n}\n\n# notification settings\nNOTIFICATION_ENABLED = True\n# PINAX_NOTIFICATIONS_LANGUAGE_MODEL = \"allauth.account.Account\"\n\n# notifications backends\n# _EMAIL_BACKEND = \"pinax.notifications.backends.email.EmailBackend\"\n_EMAIL_BACKEND = \"geonode.notifications_backend.EmailBackend\"\nPINAX_NOTIFICATIONS_BACKENDS = [\n (\"email\", _EMAIL_BACKEND),\n]\n\n# Queue non-blocking notifications.\nPINAX_NOTIFICATIONS_QUEUE_ALL = False\nPINAX_NOTIFICATIONS_LOCK_WAIT_TIMEOUT = -1\n\n# PINAX_NOTIFICATIONS_HOOKSET = \"pinax.notifications.hooks.DefaultHookSet\"\nNOTIFICATIONS_ENABLED_BY_DEFAULT = False\nPINAX_NOTIFICATIONS_HOOKSET = \"ihp.people.hooks.IHPNotificationsHookSet\"\n\n# pinax.notifications\n# or notification\nNOTIFICATIONS_MODULE = 'pinax.notifications'\n\nif NOTIFICATION_ENABLED and 'pinax.notifications' not in INSTALLED_APPS:\n INSTALLED_APPS += (NOTIFICATIONS_MODULE, )\n\n# set to true to have multiple recipients in /message/create/\nUSER_MESSAGES_ALLOW_MULTIPLE_RECIPIENTS = True\n\nUNOCONV_ENABLE = True\nif UNOCONV_ENABLE:\n UNOCONV_EXECUTABLE = '/usr/bin/unoconv'\n UNOCONV_TIMEOUT = 60 # seconds\n\n# Security stuff\nAPI_LOCKDOWN = False\nSESSION_COOKIE_SECURE = False\nCSRF_COOKIE_SECURE = False\nCSRF_COOKIE_HTTPONLY = False\nCORS_ORIGIN_ALLOW_ALL = True\nX_FRAME_OPTIONS = 'DENY'\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nSECURE_SSL_REDIRECT = False\nSECURE_HSTS_SECONDS = 3600\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n\nAUTO_ASSIGN_REGISTERED_MEMBERS_TO_REGISTERED_MEMBERS_GROUP_NAME = False\n\n# Choose thumbnail generator -- this is the default generator\nTHUMBNAIL_GENERATOR_DEFAULT_BG = None\n\n# Cache Bustin Settings\nCACHE_BUSTING_STATIC_ENABLED = ast.literal_eval(os.environ.get('CACHE_BUSTING_STATIC_ENABLED', 'False'))\nCACHE_BUSTING_MEDIA_ENABLED = ast.literal_eval(os.environ.get('CACHE_BUSTING_MEDIA_ENABLED', 'False'))\n\nif not DEBUG and not S3_STATIC_ENABLED and not S3_MEDIA_ENABLED:\n if CACHE_BUSTING_STATIC_ENABLED or CACHE_BUSTING_MEDIA_ENABLED:\n from django.contrib.staticfiles import storage\n storage.ManifestStaticFilesStorage.manifest_strict = False\n if CACHE_BUSTING_STATIC_ENABLED:\n STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n if CACHE_BUSTING_MEDIA_ENABLED:\n DEFAULT_FILE_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\n# Settings for MONITORING plugin\nMONITORING_ENABLED = ast.literal_eval(os.environ.get('MONITORING_ENABLED', 'True'))\n\nMONITORING_CONFIG = os.getenv(\"MONITORING_CONFIG\", None)\nMONITORING_HOST_NAME = os.getenv(\"MONITORING_HOST_NAME\", HOSTNAME)\nMONITORING_SERVICE_NAME = os.getenv(\"MONITORING_SERVICE_NAME\", 'geonode')\n\n# how long monitoring data should be stored\nMONITORING_DATA_TTL = timedelta(days=int(os.getenv(\"MONITORING_DATA_TTL\", 365)))\n\n# this will disable csrf check for notification config views,\n# use with caution - for dev purpose only\nMONITORING_DISABLE_CSRF = ast.literal_eval(os.environ.get('MONITORING_DISABLE_CSRF', 'False'))\n\n# For GRAVATAR provider this hast to be complete URL\nAVATAR_GRAVATAR_DEFAULT = f'{SITEURL}static/avatar/img/default.jpg'\n\nif MONITORING_ENABLED:\n if 'geonode.monitoring' not in INSTALLED_APPS:\n INSTALLED_APPS += ('geonode.monitoring',)\n if 'geonode.monitoring.middleware.MonitoringMiddleware' not in MIDDLEWARE:\n MIDDLEWARE += \\\n ('geonode.monitoring.middleware.MonitoringMiddleware',)\n\n # skip certain paths to not to mud stats too much\n MONITORING_SKIP_PATHS = ('/api/o/',\n '/monitoring/',\n '/admin',\n '/jsi18n',\n STATIC_URL,\n MEDIA_URL,\n re.compile('^/[a-z]{2}/admin/'),\n )\n\n # configure aggregation of past data to control data resolution\n # list of data age, aggregation, in reverse order\n # for current data, 1 minute resolution\n # for data older than 1 day, 1-hour resolution\n # for data older than 2 weeks, 1 day resolution\n MONITORING_DATA_AGGREGATION = (\n (timedelta(seconds=0), timedelta(minutes=1),),\n (timedelta(days=1), timedelta(minutes=60),),\n (timedelta(days=14), timedelta(days=1),),\n )\n\n CELERY_BEAT_SCHEDULE['collect_metrics'] = {\n 'task': 'geonode.monitoring.tasks.collect_metrics',\n 'schedule': 20.0,\n }\n\nUSER_ANALYTICS_ENABLED = ast.literal_eval(os.getenv('USER_ANALYTICS_ENABLED', 'True'))\nUSER_ANALYTICS_GZIP = ast.literal_eval(os.getenv('USER_ANALYTICS_GZIP', 'True'))\n\nGEOIP_PATH = os.getenv('GEOIP_PATH', os.path.join(PROJECT_ROOT, 'GeoIPCities.dat'))\n\n# -- END Settings for MONITORING plugin\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d '\n '%(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(message)s',\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler',\n }\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"geonode\": {\n \"handlers\": [\"console\"], \"level\": \"INFO\", },\n \"geoserver-restconfig.catalog\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"owslib\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"pycsw\": {\n \"handlers\": [\"console\"], \"level\": \"ERROR\", },\n \"celery\": {\n \"handlers\": [\"console\"], \"level\": \"DEBUG\", },\n \"mapstore2_adapter.plugins.serializers\": {\n \"handlers\": [\"console\"], \"level\": \"DEBUG\", },\n \"geonode_logstash.logstash\": {\n \"handlers\": [\"console\"], \"level\": \"DEBUG\", },\n },\n}\n\nCENTRALIZED_DASHBOARD_ENABLED = ast.literal_eval(os.getenv('CENTRALIZED_DASHBOARD_ENABLED', 'False'))\nif CENTRALIZED_DASHBOARD_ENABLED and USER_ANALYTICS_ENABLED and 'geonode_logstash' not in INSTALLED_APPS:\n INSTALLED_APPS += ('geonode_logstash',)\n\n CELERY_BEAT_SCHEDULE['dispatch_metrics'] = {\n 'task': 'geonode_logstash.tasks.dispatch_metrics',\n 'schedule': 3600.0,\n }\n\nLDAP_ENABLED = ast.literal_eval(os.getenv('LDAP_ENABLED', 'False'))\nif LDAP_ENABLED and 'geonode_ldap' not in INSTALLED_APPS:\n INSTALLED_APPS += ('geonode_ldap',)\n\n# Add your specific LDAP configuration after this comment:\n# https://docs.geonode.org/en/master/advanced/contrib/#configuration\n","sub_path":"ihp/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":11952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"568633473","text":"import pyyolo\nimport numpy as np\nimport sys\nimport cv2\n\ndarknet_path = './darknet'\ndatacfg = 'cfg/coco.data'\ncfgfile = 'cfg/yolov3.cfg'\nweightfile = '../yolov3.weights'\nfilename = darknet_path + '/data/dog.jpg'\nthresh = 0.45\nhier_thresh = 0.5\n\n# OpenCV \n# cam = cv2.VideoCapture(-1)\n# ret_val, img = cam.read()\n# print(ret_val)\n# if ret_val:\n# ret_val = cv2.imwrite(filename,img)\n# print(ret_val)\n\npyyolo.init(darknet_path, datacfg, cfgfile, weightfile)\n\n# From file\nprint('----- test original C using a file')\noutputs = pyyolo.test(filename, thresh, hier_thresh, 0)\nfor output in outputs:\n\tprint(output)\n\n# Camera \nprint('----- test python API using a file')\ni = 1\nwhile i < 2:\n\t# ret_val, img = cam.read()\n\torig_img = cv2.imread(filename)\n\timg = orig_img.transpose(2,0,1)\n\tc, h, w = img.shape[0], img.shape[1], img.shape[2]\n\t# print w, h, c \n\tdata = img.ravel()/255.0\n\tdata = np.ascontiguousarray(data, dtype=np.float32)\n\toutputs = pyyolo.detect(w, h, c, data, thresh, hier_thresh)\t\n\tfor output in outputs:\n\t\tprint(output)\n\t\t# left is X-coordinate of left side of rect, etc.-\n\t\t# Therefore, the top left corner is (left, top) and bottom right is (right, bottom)\n\t\tleft, right, bottom, top = output['left'], output['right'], output['bottom'], output['top']\n\t\tprob, label = output['prob'], output['class']\n\t\ttext = \"{0} ({1:.3f})\".format(label, prob)\n\t\tcv2.rectangle(orig_img, (left, top), (right, bottom), color=(0, 255, 0))\n\t\tcv2.putText(orig_img, text, (left, top), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,255,255), thickness=2, lineType=cv2.LINE_AA)\n\tcv2.imwrite(\"predicted.jpg\", orig_img)\n\ti = i + 1\n\n# free model\npyyolo.cleanup()\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"587679455","text":"'''\r\nCreated on Mar 26, 2019\r\n\r\n@author: yogishoban\r\n'''\r\nimport nltk\r\nimport xlrd\r\n\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nimport pickle\r\n\r\n\r\nemotions=xlrd.open_workbook(\"Emotion phrases\",\"r\")\r\nemotional=emotions.sheet_by_index(0)\r\nrows=emotional.nrows\r\ncols=emotional.ncols\r\n#reading all the datas from the dataset\r\ndef getwords (dataset):\r\n allwords=[]\r\n for words in dataset:\r\n allwords.append(words)\r\n return allwords\r\n\r\n#read the dataset\r\ndef read_datasets(dataset, t_type):\r\n data = []\r\n for l in dataset:\r\n data.append([l, t_type])\r\n return data\r\n\r\n#repeated text with the help of nltk freqdist\r\ndef get_word_features(wordlist):\r\n wordlist = nltk.FreqDist(wordlist)\r\n word_features = []\r\n for wk in wordlist.keys():\r\n if wordlist[wk] > 5: \r\n word_features.append(wk) \r\n return word_features\r\n\r\n\r\n# \r\ndef extract_features(document):\r\n document_words = set(document)\r\n features = {}\r\n for word in word_features:\r\n features = word\r\n return features\r\n\r\n \r\n \r\n\r\nfor i in range(emotional.nrows):\r\n if(emotional.cell(i,0).value=='joy'):\r\n joy_feel=read_datasets('Emotion phrases', 'joy')\r\nfor i in range(emotional.nrows):\r\n if(emotional.cell(i,0).value=='anger'):\r\n anger_feel=read_datasets('Emotion phrases', 'anger')\r\nfor i in range(emotional.nrows):\r\n if(emotional.cell(i,0).value=='sadness'):\r\n sadness_feel=read_datasets('Emotion phrases', 'sadness')\r\nfor i in range(emotional.nrows):\r\n if(emotional.cell(i,0).value=='surprise'):\r\n surprise_feel=read_datasets('Emotion phrases', 'surprise')\r\nfor i in range(emotional.nrows):\r\n if(emotional.cell(i,0).value=='love'):\r\n love_feel=read_datasets('Emotion phrases', 'love')\r\nfor i in range(emotional.nrows):\r\n if(emotional.cell(i,0).value=='fear'):\r\n joy_feel=read_datasets('Emotion phrases', 'fear')\r\nfear_feel = read_datasets(\"Emotion Phrases\" ,'fear')\r\n\r\n#get the training set the train with the help of naive Bayes classifier \r\ntraining_set = nltk.classify.util.apply_features(read_datasets, data)\r\nprint(training_set)\r\n\r\n","sub_path":"textemotionalrecognition/emotiondataset.py","file_name":"emotiondataset.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353105103","text":"import numpy as np\nfrom geometry import Geometry\nfrom load import LoadCase\nfrom interpolation import Interpolation\nfrom solution import Solution\nfrom data.consts import parameters_case, parameters_geometry\nfrom helpers import step\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nplt.style.use('seaborn-whitegrid') # change to the plotting\nmpl.rcParams[\"figure.dpi\"] = 160\n\nclass Simulation:\n def __init__(self, **kwargs):\n self.geo = Geometry(**kwargs)\n self.interp = Interpolation()\n self.case = LoadCase(self, **kwargs)\n self.solution = Solution(self)\n self.x = np.zeros(13)\n\n def run(self):\n self.x = np.linalg.solve(sim.case.A, sim.case.B)\n return self.x\n \n @property\n def BCs(self):\n BCs = [\"Vy'(la)\", \"Vz'(la)\", \"My'(la)\", \"Mz'(la)\", \"T(la)\",\"vy'(x1)\", \"vz'(x1)\", \"vy'(x2)\",\"vz'(x2)\", \"vy'(x3)\",\"vz'(x3)\",\"vz'(act)\"]\n\n A = list(sim.case.A)\n\n dic = {}\n for BC, row in zip(BCs, A):\n dic[BC] = row\n \n return dic\n\nif __name__ == \"__main__\":\n params = {}\n params.update(parameters_case)\n params.update(parameters_geometry)\n\n sim = Simulation(**params)\n\n print(sim.case.A, sim.case.B)\n sim.run()\n print(f\"Det : {np.linalg.det(sim.case.A)}\")\n # print(sim.x)\n # print(sim.BCs)\n print(sim.solution.sol)\n\n sols = sim.x\n\n # print(sols)\n # print(sim.BCs)\n # print(sols[0], sols[2], sols[4], sols[6], )\n # print(sim.interp.integrate_q(sim.geo.l_a,ord=0)[-1])\n # print(sim.interp.integrate_q(sim.geo.l_a,ord=1)[-1])\n # print(sim.interp.integrate_q(sim./geo.l_a,ord=2)[-1])\n # print(sim.interp.integrate_q(sim.geo.l_a,ord=3)[-1])\n # print(sim.case.v_z_prime(sim.case.geo.x_2)*np.cos(sim.case.defl))\n # print(sim.case.v_y_prime(sim.case.geo.x_2)*np.sin(sim.case.defl)) #vz(\n # sim.interp.integrate_q(sim.geo.l_a,ord=1)[-1]\n\n print(f\"Sum of forces in the y-direction : {sols[0]+sols[2]+sols[4]+sols[6]*sim.case.a_y-sim.case.P*sim.case.a_y+sim.interp.integrate_q(sim.geo.l_a,ord=1)[-1]:.2f}N\")\n print(f\"Sum of forces in the z-direction : {sols[1]+sols[3]+sols[5]+sols[6]*sim.case.a_z-sim.case.P*sim.case.a_z:.2f}N\")\n\n BC = sim.BCs\n # print(sim.solution.sol)\n for key in BC.keys():\n print(f\"{key} - {BC[key]}\")\n # print(sim.BCs)\n \n # print(f\"Sum of fo/r v_z:{-1/(sim.case.E*sim.geo.MMoI[1])}\")\n # print(sim.case.B)\n # print(sim.interp.integrate_q(sim.geo.x_1,ord=4)[-1])\n sim.solution.plot_solution()\n sim.solution.plot_torque()\n sim.solution.plot_twist()\n sim.solution.plot_defl()\n sim.solution.plot_shear()\n sim.solution.plot_moment()\n sim.solution.plot_slope()\n # print(sim.Bcs)\n","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"104944057","text":"# -*- encoding: utf-8 -*-\r\n\"\"\"\r\n@File : model_train.py\r\n@Time : 2019/10/14 13:18\r\n@Author : zwt\r\n@git : \r\n@Software: PyCharm\r\n\"\"\"\r\nimport codecs\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nimport json\r\nfrom bert_lstm_model import Model\r\nfrom al_bert import tokenization\r\nimport logging\r\nfrom data_process import BatchManager, convert_samples\r\n\r\n_logger = logging.getLogger()\r\n\r\n\r\nclass ModelTrain:\r\n\r\n def __init__(self):\r\n self.lstm_dim = 128\r\n self.batch_size = 1\r\n self.max_seq_len = 70\r\n self.clip = 5.0\r\n self.dropout_keep = 0.5\r\n self.optimizer = 'adam'\r\n self.lr = 0.001\r\n self.tag_schema = 'iob'\r\n self.ckpt_path = '..\\\\models'\r\n self.steps_check = 10\r\n self.zeros = False\r\n self.lower = True\r\n self.max_epoch = 2\r\n self.num_tags = len(convert_samples.tag_to_id)\r\n self.model = Model(init_checkpoint_file='D:\\models\\\\albert_base_zh\\\\albert_model.ckpt'\r\n , bert_config_dir='D:\\models\\\\albert_base_zh\\\\albert_config_base.json')\r\n self.saver = tf.train.Saver()\r\n\r\n self.tokenizer = tokenization.FullTokenizer(vocab_file='D:\\models\\\\albert_base_zh\\\\vocab.txt',\r\n do_lower_case=True)\r\n\r\n def train(self):\r\n path = '..\\data\\\\train.json'\r\n train_sentences = self.load_sentences(path)\r\n train_data = self.prepare_dataset(\r\n train_sentences, self.max_seq_len, self.lower)\r\n train_manager = BatchManager(train_data, self.batch_size)\r\n init = tf.global_variables_initializer()\r\n steps_per_epoch = train_manager.len_data\r\n with tf.Session() as sess:\r\n loss = []\r\n sess.run(init)\r\n for i in range(self.max_epoch):\r\n for batch in train_manager.iter_batch(shuffle=True):\r\n step, batch_loss = self.model.run_step(sess, True, batch)\r\n loss.append(batch_loss)\r\n if step % self.steps_check == 0:\r\n iteration = step // steps_per_epoch + 1\r\n print(\"iteration:{} step:{}/{}, \"\r\n \"NER loss:{:>9.6f}\".format(\r\n iteration, step % steps_per_epoch, steps_per_epoch, np.mean(loss)))\r\n loss = []\r\n self.save_model(sess, self.model, self.ckpt_path, global_steps=step)\r\n\r\n def load_sentences(self, path):\r\n \"\"\"\r\n Load sentences. A line must contain at least a word and its tag.\r\n Sentences are separated by empty lines.\r\n \"\"\"\r\n sentences = []\r\n num = 0\r\n for j, line in enumerate(codecs.open(path, 'r', 'utf8')):\r\n sentence = []\r\n num += 1\r\n data = json.loads(line)\r\n list_lable = str(data['label']).split(' ')\r\n for i, value in enumerate(list(data['text'])):\r\n temp = []\r\n temp.append(value)\r\n temp.append(list_lable[i])\r\n sentence.append(temp)\r\n sentences.append(sentence)\r\n return sentences\r\n\r\n def save_model(self, sess, model, path, global_steps):\r\n checkpoint_path = os.path.join(path, \"ner.ckpt\")\r\n model.saver.save(sess, checkpoint_path, global_step=global_steps)\r\n\r\n def prepare_dataset(self, sentences, max_seq_length, lower=None, train=True):\r\n \"\"\"\r\n Prepare the dataset. Return a list of lists of dictionaries containing:\r\n - word indexes\r\n - word char indexes\r\n - tag indexes\r\n \"\"\"\r\n data = []\r\n for s in sentences:\r\n if lower:\r\n string = [w[0].strip().lower() for w in s]\r\n else:\r\n string = [w[0].strip() for w in s]\r\n char_line = ' '.join(string)\r\n text = tokenization.convert_to_unicode(char_line)\r\n\r\n if train:\r\n tags = [w[-1] for w in s]\r\n else:\r\n tags = ['O' for _ in string]\r\n\r\n labels = ' '.join(tags)\r\n labels = tokenization.convert_to_unicode(labels)\r\n\r\n ids, mask, segment_ids, label_ids = self.convert_single_example(char_line=text,\r\n max_seq_length=max_seq_length,\r\n tokenizer=self.tokenizer,\r\n label_line=labels)\r\n data.append([string, segment_ids, ids, mask, label_ids])\r\n\r\n return data\r\n\r\n def convert_single_example(self, char_line, max_seq_length, tokenizer, label_line):\r\n \"\"\"\r\n 将一个样本进行分析,然后将字转化为id, 标签转化为lb\r\n \"\"\"\r\n text_list = char_line.split(' ')\r\n label_list = label_line.split(' ')\r\n\r\n tokens = []\r\n labels = []\r\n for i, word in enumerate(text_list):\r\n token = tokenizer.tokenize(word)\r\n tokens.extend(token)\r\n label_1 = label_list[i]\r\n for m in range(len(token)):\r\n if m == 0:\r\n labels.append(label_1)\r\n else:\r\n labels.append(\"X\")\r\n # 序列截断\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels = labels[0:(max_seq_length - 2)]\r\n ntokens = []\r\n segment_ids = []\r\n label_ids = []\r\n ntokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n # append(\"O\") or append(\"[CLS]\") not sure!\r\n label_ids.append(convert_samples.tag_to_id[\"[CLS]\"])\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n segment_ids.append(0)\r\n label_ids.append(convert_samples.tag_to_id[labels[i]])\r\n ntokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n # append(\"O\") or append(\"[SEP]\") not sure!\r\n label_ids.append(convert_samples.tag_to_id[\"[SEP]\"])\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n\r\n # padding\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n # we don't concerned about it!\r\n label_ids.append(0)\r\n ntokens.append(\"**NULL**\")\r\n\r\n return input_ids, input_mask, segment_ids, label_ids\r\n\r\n\r\nif __name__ == \"__main__\":\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\n model = ModelTrain()\r\n model.train()\r\n","sub_path":"albert_crf_ner/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"344452847","text":"import tensorflow as tf\r\nfrom tensorflow.keras import layers, models\r\nimport gym\r\nimport random\r\nimport time\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom BreakoutBuffers import *\r\n\r\n# RL Assigment 3: DQN Learning; Part 2: Atari Breakout\r\n# April 2020\r\n# Abishek Ekaanth, Virgil Woerdings, Ruben Walen\r\n# BreakoutPlay: contains the neural code, learning code and main loop\r\n#\r\n# TODO:\r\n# discount factor DONE\r\n# kernel size, stride checking DONE\r\n# check parameters\r\n# ...\r\n#\r\n# ENHANCEMENTS:\r\n# trajectories buffer: instead of storing individual samples...\r\n#... store whole-game trajectories. Backpropagate (discounted) rewards...\r\n#... along the game trajectory when a positive reward is received\r\n# main/positive buffer split: always attempt to train network with a certain...\r\n#... proportion of positive reward samples kept separately in the buffer\r\n\r\nclass BreakoutNetwork:\r\n def __init__(self, frame_size, resize_factor, n_actions, loss_function, optimiser, load_weights=None, convolutional_layers=None):\r\n \"\"\" The DQN Network implementation (static architecture, see below).\r\n The architecture has a variable number of convolutional layers, which are then collected using a dense layer, which outputs to a dense action space layer.\r\n Params:\r\n frame_size (list/array/tuple of int): the size of the frame\r\n resize_factor (float): automatic scaling factor for the frame\r\n n_actions (int): the size of the discrete action space\r\n loss_function (tf loss function): a TensorFlow loss function for training\r\n optimiser (tf optimizer): a TensorFlow network optimizer\r\n load_weights (string): a filepath to a network weights file\r\n convolutional_layers (see below or None): convolutional layer structure or None for default\"\"\"\r\n self.original_frame_size = frame_size\r\n self.resize_factor = resize_factor\r\n self.reduced_frame_size = np.array([int(self.original_frame_size[0] * resize_factor),\r\n int(self.original_frame_size[1] * resize_factor),\r\n int(self.original_frame_size[2])])\r\n self.n_actions = n_actions\r\n\r\n # construct the network\r\n if convolutional_layers == None:\r\n self.network_params = [(32, (3, 3), (1, 1)), (64, (3, 3), (1, 1)), (64, (3, 3), (1, 1))] # convolutional feature dimensionality (output) and stride: every entry = 1 conv. layer\r\n else:\r\n self.network_params = convolutional_layers\r\n # [nfilters, (kernel_size_X, kernel_size_Y), (stride_X, stride_Y)]\r\n # previous tries: [32, (3, 3), (1, 1)]\r\n # [(32, (3, 3), (1, 1)), (64, (3, 3), (1, 1)), (64, (3, 3), (1, 1))]\r\n self.initialiser = tf.keras.initializers.RandomUniform(minval=-0.0002, maxval=0.0005, seed=None) # a weights initialiser: this prevents high initial Q-values\r\n\r\n model = models.Sequential()\r\n for i, entry in enumerate(self.network_params):\r\n model.add(layers.Conv2D(entry[0], entry[1], strides=entry[2], activation='linear',\r\n input_shape=self.reduced_frame_size, kernel_initializer=self.initialiser)) # convolutional layer\r\n model.add(layers.MaxPooling2D((2, 2))) # max pooling to lower image size\r\n \r\n model.add(layers.Flatten()) # flatten feature maps\r\n model.add(layers.Dense(self.network_params[-1][0],\r\n activation='relu', kernel_initializer=self.initialiser)) # first dense layer: connect to last conv. layer\r\n model.add(layers.Dense(n_actions, activation='linear', kernel_initializer=self.initialiser)) # second dense layer: output actions\r\n\r\n self.model = model\r\n self.model.compile(loss=loss_function, optimizer=optimiser)\r\n\r\n if load_weights != None: # load weights from a weights filepath\r\n self.model.load_weights(load_weights)\r\n\r\n def predictQVectorFromFrame(self, frames):\r\n \"\"\" Predict the Q-values of frames. Automatically resizes images.\"\"\"\r\n if len(frames.shape) == 3:\r\n frames = np.reshape(frames, (1, frames.shape[0], frames.shape[1], frames.shape[2])) # single-image batch\r\n resized_images = tf.image.resize(frames, self.reduced_frame_size[0:2]) # resize images first\r\n return self.model.predict(resized_images)\r\n #values = frame\r\n #for layer in self.model._layers:\r\n # values = layer(values) # propagate through the layer\r\n #return values # return the values in the output layer after propagating through all layers\r\n\r\n def fit(self, input_frames, output_matrix, batch_size, epochs):\r\n \"\"\" TensorFlow fitting, using output_matrix as targets and input_frames as inputs. This is called in DQNLeaner.updateNetwork().\"\"\"\r\n resized_images = tf.image.resize(input_frames, self.reduced_frame_size[0:2]) # resize images first\r\n self.model.fit(resized_images, output_matrix, batch_size=batch_size, epochs=epochs)\r\n return\r\n\r\nclass BreakoutDQNLearner:\r\n def __init__(self, buffer_size, cycles_per_network_transfer, discount_factor, load_weights=None, game_seed=None, buffer_mode='simple', embellish_reward_factor=1,\r\n custom_game=None, convolutional_layers=None):\r\n \"\"\"Our implementation of a DQN-based Q-learning algorithm. This class handles experience storing, game stepping, network updates, action strategy, and more.\r\n Params:\r\n buffer_size (int): the maximum size of the replay buffer\r\n cycles_per_network_transfer (int): how many cycles before the prediction network weights are transferred to the target network\r\n discount_factor (float, 0 to 1): the discount factor for the policy learner\r\n load_weights (string): a filename to load network weights from, for the load/save system\r\n game_seed (None or int): a seed for the Atari environment, if None: the seed is random each game\r\n buffer_mode ('simple' OR 'posisplit' OR 'trajectory'): the type of buffer used, seed BreakoutBuffers.py\r\n embellish_reward_factor (float): a linear scaling factor for rewards sampled from actions. May make training the networks easier?\r\n custom_game (None or game): a playable game with the right handles, such as Atari Breakout from atari-py gym. If None: uses Atari Breakout.\r\n convolutional_layers (see BreakoutNetwork or None): convolutional layer structure for BreakoutNetwork or None for default\r\n \"\"\"\r\n \r\n self.buffer_mode = buffer_mode\r\n if buffer_mode == 'simple':\r\n self.buffer = BreakoutExperienceBuffer(buffer_size)\r\n elif buffer_mode == 'posisplit':\r\n self.buffer = BreakoutExperiencePosisplitBuffer(buffer_size, buffer_size * 0.2)\r\n elif buffer_mode == 'trajectory':\r\n self.buffer = BreakoutExperienceTrajectoryBuffer(buffer_size, auto_backpropagation_discount=discount_factor) # ATTN: buffer_size is now the size in games, not in samples!\r\n else:\r\n raise Exception((\"@BreakoutDQNLearner.__init__: invalid buffer mode: \" + buffer_mode))\r\n \r\n self.n_updates_count = 0 # how many times the network(s) was updated\r\n self.cycles_per_network_transfer = cycles_per_network_transfer # after how many update cylces we update the target network...\r\n self.discount_factor = discount_factor # the Q-policy learner discount factor\r\n self.embellish_reward_factor = embellish_reward_factor # this is a linear scaling factor for the rewards\r\n #... with the prediction network weights\r\n\r\n if custom_game == None:\r\n self.game = gym.make('Breakout-v0')\r\n else:\r\n self.game = custom_game\r\n self.game_seed = game_seed\r\n self.game.seed(game_seed)\r\n self.current_frame = self.game.reset()\r\n self.current_frame, _, self.game_over, _ = self.game.step(random.choice(range(1, self.game.action_space.n)))\r\n self.game_over = False\r\n self.last_frame_time = 0\r\n self.action_space_size = self.game.action_space.n\r\n\r\n # target and prediction networks separated to reduce target instability (double DQN)\r\n #self.opt = tf.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # stochastic gradient descent\r\n #self.opt = tf.optimizers.RMSprop(learning_rate=0.001, rho=0.9) # RMSprop\r\n self.opt = tf.optimizers.Adagrad(learning_rate=0.01) # adagrad\r\n self.resize_factor = 0.7\r\n self.target_network = BreakoutNetwork(self.current_frame.shape, self.resize_factor, self.action_space_size, \"mean_squared_error\",\r\n self.opt, load_weights=load_weights, convolutional_layers=convolutional_layers)\r\n self.prediction_network = BreakoutNetwork(self.current_frame.shape, self.resize_factor, self.action_space_size, \"mean_squared_error\",\r\n self.opt, load_weights=load_weights, convolutional_layers=convolutional_layers)\r\n\r\n self.buffer_indices = {'start_frame': 0, 'action': 1, 'reward': 2, 'game_over': 3, 'result_frame': 4}\r\n\r\n print(\">BreakoutDQNLearner: Q check (initial frame)\")\r\n print(self.target_network.predictQVectorFromFrame(self.current_frame))\r\n print(\"Actions:\", self.game.get_action_meanings())\r\n\r\n def getMostPrudentAction(self, strategy='epsilon-greedy', **kwargs):\r\n \"\"\" Use a strategy function to determine the most prudent action given the current frame (state) and environment\r\n strategy ('random' OR 'epsilon-greedy'): the prudence strategy for picking the action. May require one or more kwargs.\"\"\"\r\n Q_vector = self.prediction_network.predictQVectorFromFrame(self.current_frame)\r\n if strategy == 'epsilon-greedy':\r\n epsilon = kwargs['epsilon']\r\n action = self._selectEpsilonGreedy(Q_vector, epsilon=epsilon)\r\n elif strategy == 'random':\r\n action = self._selectRandom(Q_vector)\r\n else:\r\n raise Exception(\"@BreakoutDQNLearner.getMostPrudentAction: unknown strategy\")\r\n return action\r\n\r\n def takeActionAndStoreExperience(self, do_not_store=False, strategy='epsilon-greedy', **kwargs):\r\n \"\"\"Take an action according to the action strategy (see getMostPrudentAction()) and store the experience in the buffer\r\n (unless do_not_store is True)\"\"\"\r\n action = self.getMostPrudentAction(strategy, **kwargs)\r\n frame, reward, game_over, _ = self.game.step(action)\r\n\r\n reward = reward * self.embellish_reward_factor # we can embellish rewards linearly in this way\r\n tup = (self.current_frame, action, reward, game_over, frame)\r\n if not do_not_store:\r\n self.buffer.put(tup) # put new action tuple in the experience replay buffer\r\n self.current_frame = frame\r\n self.game_over = game_over\r\n if self.game_over:\r\n self.resetAndRandomNonZeroMove()\r\n return tup\r\n\r\n def updateNetwork(self, use_replay_buffer=True, nsamples_replay_buffer=1, train_batch_size='auto', epochs=1, experiences=None):\r\n \"\"\"One cycle of DQN network updating. Trains the prediction network, and may then transfer to the target network (see self.cycles_per_network_transfer))\r\n The core training loop uses a root-mean-squared error loss function that should be 0 for every action node in the DQN that is not the\r\n action node corresponding to the current sampled experience's action. The loss for that action node follows the Q-policy learning loss\r\n Params:\r\n use_replay_buffer (bool): whether to use the replay buffer - if False you must provide experiences yourself\r\n nsamples_replay_buffer (int): how many samples to draft from the replay buffer\r\n train_batch_size ('auto' or int): if not 'auto', you can set the training batch size to something not equal to the number of samples\r\n epochs (int): number of epochs to train\r\n experiences (None or list of experiences): a list of experiences if use_replay_buffer == False\"\"\"\r\n if train_batch_size == 'auto':\r\n train_batch_size = nsamples_replay_buffer\r\n if use_replay_buffer:\r\n experience_batch = self.buffer.sample(nsamples_replay_buffer) # throws exception: buffer content too small\r\n else:\r\n experience_batch = experiences\r\n\r\n # the core training loop of the algorithm starts here\r\n target_matrix = np.zeros((nsamples_replay_buffer, self.action_space_size)) # matrix of target Q values\r\n input_frames = [None for _ in range(nsamples_replay_buffer)] # the frames to use as inputs\r\n for i, exp in enumerate(experience_batch):\r\n target_matrix[i, :] = self.prediction_network.predictQVectorFromFrame(exp[self.buffer_indices['start_frame']]) # no loss for non-represented action\r\n if exp[self.buffer_indices['game_over']] == False: # not a game over state - add the max Q of the next frame to the action taken\r\n max_Q = np.max(self.target_network.predictQVectorFromFrame(exp[self.buffer_indices['result_frame']])) # from frame resulting from action\r\n target_matrix[i, exp[self.buffer_indices['action']]] = self.discount_factor * max_Q # replace by max_Q target\r\n target_matrix[i, exp[self.buffer_indices['action']]] += exp[self.buffer_indices['reward']] # update with reward associated with that action in that sample\r\n input_frames[i] = exp[self.buffer_indices['start_frame']]\r\n\r\n self.prediction_network.fit(np.array(input_frames), target_matrix, train_batch_size, epochs)\r\n self.n_updates_count += 1\r\n if self.n_updates_count % self.cycles_per_network_transfer == 0: # transfer prediction network to train network\r\n self.target_network.model.set_weights(self.prediction_network.model.get_weights())\r\n return\r\n\r\n def render(self, frame_rate_mills, disable=False):\r\n \"\"\"Wait for a maximum of frame_rate_mills milliseconds per render cycle, draw the game screen with most recent action\r\n disable (bool): useful for command-line runs\"\"\"\r\n if not disable:\r\n time_now = time.time()\r\n frame_wait = max(0, (frame_rate_mills - (time_now - self.last_frame_time)))\r\n time.sleep(frame_wait)\r\n self.last_frame_time = time_now\r\n self.game.render()\r\n return\r\n \r\n def _selectEpsilonGreedy(self, Q_vector, epsilon=0.8):\r\n \"\"\"Use epsilon-greedy strategy to select an action\r\n Q_vector contains the predicted Q-value (value function) for each action\r\n Returns an integer corresponding to the chosen action\"\"\"\r\n if random.random() < epsilon: # pick highest rewarding (exploit)\r\n max_values = np.where(Q_vector == np.amax(Q_vector))[0] # all maximum value indices; prevent determinism on first index (argmax)\r\n chosen_index = random.choice(max_values) # pick one of the max values at random\r\n return chosen_index\r\n else: # pick random for exploration\r\n return random.randrange(Q_vector.shape[1])\r\n pass\r\n\r\n def _selectRandom(self, Q_vector):\r\n \"\"\"Select a random action\r\n Q_vector contains the predicted Q-value (value function) for each action\r\n Returns an integer corresponding to the chosen action\"\"\"\r\n return random.randrange(Q_vector.shape[1])\r\n\r\n def resetAndRandomNonZeroMove(self):\r\n \"\"\"Reset the game, reseed and then do a random move to start the game again.\"\"\"\r\n self.game.reset() # reset the game completely\r\n self.game.seed(self.game_seed) # reseed\r\n self.current_frame, _, self.game_over, _ = self.game.step(random.choice(range(1, self.game.action_space.n))) # prevent getting stuck in zero-moves\r\n return\r\n\r\n# Main loop: \r\nif __name__ == \"__main__\":\r\n # DQN Learning on Atari Breakout\r\n BUFFER_SIZE = 500 # size of the replay buffer\r\n CYCLES_FOR_TRANSFER = 6 # cycles to wait before transferring prediction weights to target network\r\n N_ACTIONS_PER_PLAY_CYCLE = 25 # number of actions to sample in each master epoch\r\n N_SAMPLES_PER_LEARN_CYCLE = 40 # number of samples to train with in master epoch training step\r\n N_EPOCHS_PER_LEARN_CYCLE = 10 # number of epochs to train per master epoch\r\n N_CYCLES_PERFORMANCE_EVAL = 0 # number of cycles for performance evaluation during each master epoch (slows down the algorithm)\r\n N_EPOCHS_MASTER = 100\r\n EPSILON = 0.8 # epsilon-greedy exploration parameter (during training)\r\n DISCOUNT = 0.80 # discount factor during training\r\n EMBELLISH_REWARD_FACTOR = 10 # linear reward scaling\r\n FRAME_RATE = 0.02 # frame rate for rendering steps\r\n DISABLE_RENDERING = False # whether to disable rendering the game\r\n DISABLE_PLOTTING = False # disable some plot making (see end of this file)\r\n EXPERIENCE_BUFFER_MODE = 'posisplit' # experience buffer type: 'simple', 'posisplit' or 'trajectory'\r\n\r\n WEIGHT_LOAD_PATH = None # if none, do not load weights to DQNs, initialise randomly\r\n STORE_WEIGHTS = True # whether to store the DQN weights after completeing the run (stores target network last values)\r\n WEIGHT_STORE_PATH = os.getcwd() + \"/weights\"\r\n WEIGHT_STORE_NAMESTAMP = \"latest\" # if None: generate a time-based namestamp; if some string: can overwrite that file!\r\n\r\n #np.random.seed(333)\r\n #random.seed(333)\r\n GAME_SEED = None # environment seed\r\n \r\n learner = BreakoutDQNLearner(BUFFER_SIZE, CYCLES_FOR_TRANSFER, DISCOUNT,\r\n load_weights=WEIGHT_LOAD_PATH, game_seed=GAME_SEED, buffer_mode=EXPERIENCE_BUFFER_MODE,\r\n embellish_reward_factor=EMBELLISH_REWARD_FACTOR)\r\n print(\">__main__: Filling buffer (samples:\", BUFFER_SIZE, \"total)\")\r\n for i in range(BUFFER_SIZE): # buffer filling\r\n #print(\"Filling buffer: cycle\", i + 1)\r\n learner.takeActionAndStoreExperience(epsilon=EPSILON, strategy='random')\r\n #learner.render(FRAME_RATE, disable=DISABLE_RENDERING)\r\n for i in range(N_EPOCHS_MASTER):\r\n print(\"Master epoch\", i + 1)\r\n for _ in range(N_ACTIONS_PER_PLAY_CYCLE):\r\n learner.takeActionAndStoreExperience(epsilon=EPSILON)\r\n #learner.render(FRAME_RATE, disable=DISABLE_RENDERING)\r\n learner.updateNetwork(nsamples_replay_buffer=N_SAMPLES_PER_LEARN_CYCLE, epochs=N_EPOCHS_PER_LEARN_CYCLE)\r\n total_score = 0\r\n state = learner.game.clone_full_state()\r\n for _ in range(N_CYCLES_PERFORMANCE_EVAL):\r\n learner.resetAndRandomNonZeroMove()\r\n tup = learner.takeActionAndStoreExperience(epsilon=0.95, do_not_store=True)\r\n total_score += tup[learner.buffer_indices['reward']]\r\n learner.game.restore_full_state(state)\r\n print(\"Total score for master epoch:\", total_score)\r\n\r\n if STORE_WEIGHTS:\r\n print(\">__main__: storing weights\")\r\n if not os.path.isdir(WEIGHT_STORE_PATH):\r\n print(\">__main__: creating directory:\", WEIGHT_STORE_PATH)\r\n os.mkdir(WEIGHT_STORE_PATH)\r\n timest = time.localtime(time.time())\r\n if WEIGHT_STORE_NAMESTAMP == None:\r\n namestamp = \"breakout_weights_\" + str(timest.tm_mon) + str(timest.tm_mday) + str(timest.tm_hour) + str(timest.tm_min) + str(timest.tm_sec)\r\n else:\r\n namestamp = WEIGHT_STORE_NAMESTAMP\r\n learner.target_network.model.save_weights((WEIGHT_STORE_PATH + \"/\" + namestamp))\r\n\r\n # Test the AI in NUM_GAMES games\r\n NUM_GAMES = 5\r\n learner.resetAndRandomNonZeroMove()\r\n total_score = 0\r\n game_score = 0\r\n games_completed = 0\r\n max_Q_vector = []\r\n actions_taken = []\r\n complete = False\r\n while not complete:\r\n tup = learner.takeActionAndStoreExperience(epsilon=0.98, do_not_store=True)\r\n if games_completed == 0: # first game only\r\n Q_vector = learner.prediction_network.predictQVectorFromFrame(tup[learner.buffer_indices['start_frame']])\r\n max_Q = np.max(Q_vector)\r\n max_Q_vector.append(max_Q)\r\n actions_taken.append(tup[learner.buffer_indices['action']])\r\n #print(\"Action\", tup[learner.buffer_indices['action']])\r\n learner.render(FRAME_RATE, disable=DISABLE_RENDERING)\r\n total_score += tup[learner.buffer_indices['reward']]\r\n game_score += tup[learner.buffer_indices['reward']]\r\n if tup[learner.buffer_indices['game_over']] == True:\r\n print(\"Game finished; score:\", game_score)\r\n game_score = 0\r\n games_completed += 1\r\n if games_completed >= NUM_GAMES:\r\n complete = True\r\n else:\r\n learner.resetAndRandomNonZeroMove()\r\n print(\"Average score\", str((total_score / NUM_GAMES)))\r\n\r\n if not DISABLE_PLOTTING:\r\n fig, ax1 = plt.subplots()\r\n ax1.plot(max_Q_vector)\r\n ax1.set_title(\"Max Q-values per action frame (one game)\")\r\n ax1.set_xlabel(\"Frame\")\r\n ax1.set_ylabel(\"Q-value\")\r\n\r\n fig, ax2 = plt.subplots()\r\n ax2.hist(actions_taken, learner.action_space_size, align='mid')\r\n ax2.set_title(\"Histogram of actions taken (one game)\")\r\n ax2.set_xlabel(\"Action\")\r\n ax2.set_ylabel(\"Frequency\")\r\n ax2.set_xticks((np.arange(0.4, (0.4 + learner.action_space_size - 0.75), step=0.75)))\r\n ax2.set_xticklabels(list(learner.game.get_action_meanings()))\r\n plt.show()\r\n","sub_path":"A3/BreakoutPlay.py","file_name":"BreakoutPlay.py","file_ext":"py","file_size_in_byte":21615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88621352","text":"#!/usr/bin/env python3\n\n#\n# This file is part of LiteX-Boards.\n#\n# Copyright (c) 2021 Gwenhael Goavec-Merou \n# SPDX-License-Identifier: BSD-2-Clause\n\nimport argparse\nimport subprocess\n\nfrom migen import *\n\nfrom litex_boards.platforms import digilent_arty_z7\nfrom litex.build import tools\nfrom litex.build.xilinx import common as xil_common\nfrom litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict\n\nfrom litex.soc.interconnect import axi\nfrom litex.soc.interconnect import wishbone\n\nfrom litex.soc.cores.clock import *\nfrom litex.soc.integration.soc_core import *\nfrom litex.soc.integration.soc import SoCRegion\nfrom litex.soc.integration.builder import *\nfrom litex.soc.cores.led import LedChaser\n\n# CRG ----------------------------------------------------------------------------------------------\n\n\nclass _CRG(Module):\n def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):\n self.rst = Signal()\n self.clock_domains.cd_sys = ClockDomain()\n\n # # #\n\n if use_ps7_clk:\n self.comb += ClockSignal(\"sys\").eq(ClockSignal(\"ps7\"))\n self.comb += ResetSignal(\"sys\").eq(ResetSignal(\"ps7\") | self.rst)\n else:\n self.submodules.pll = pll = S7PLL(speedgrade=-1)\n self.comb += pll.reset.eq(self.rst)\n pll.register_clkin(platform.request(platform.default_clk_name), platform.default_clk_freq)\n pll.create_clkout(self.cd_sys, sys_clk_freq)\n # Ignore sys_clk to pll.clkin path created by SoC's rst.\n platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin)\n\n# BaseSoC ------------------------------------------------------------------------------------------\n\n\nclass BaseSoC(SoCCore):\n def __init__(self, variant=\"z7-20\", sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs):\n platform = digilent_arty_z7.Platform(variant)\n\n if kwargs.get(\"cpu_type\", None) == \"zynq7000\":\n kwargs['integrated_sram_size'] = 0\n kwargs['with_uart'] = False\n self.mem_map = {\n 'csr': 0x4000_0000, # Zynq GP0 default\n }\n\n # SoCCore ----------------------------------------------------------------------------------\n SoCCore.__init__(self, platform, sys_clk_freq,\n ident = \"LiteX SoC on Arty Z7\",\n ident_version = True,\n **kwargs)\n\n # Zynq7000 Integration ---------------------------------------------------------------------\n if kwargs.get(\"cpu_type\", None) == \"zynq7000\":\n preset_name = \"arty_z7_20.tcl\" if variant == \"z7-20\" else \"arty_z7_10.tcl\"\n\n os.system(\"wget http://kmf2.trabucayre.com/\" + preset_name)\n self.cpu.set_ps7(preset=preset_name)\n\n # Connect AXI GP0 to the SoC\n wb_gp0 = wishbone.Interface()\n self.submodules += axi.AXI2Wishbone(\n axi = self.cpu.add_axi_gp_master(),\n wishbone = wb_gp0,\n base_address = self.mem_map['csr'])\n self.add_wb_master(wb_gp0)\n\n use_ps7_clk = True\n else:\n use_ps7_clk = False\n\n # CRG --------------------------------------------------------------------------------------\n self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)\n\n # Leds -------------------------------------------------------------------------------------\n if with_led_chaser:\n self.submodules.leds = LedChaser(\n pads = platform.request_all(\"user_led\"),\n sys_clk_freq = sys_clk_freq)\n\n# Build --------------------------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"LiteX SoC on Arty Z7\")\n parser.add_argument(\"--build\", action=\"store_true\", help=\"Build bitstream.\")\n parser.add_argument(\"--load\", action=\"store_true\", help=\"Load bitstream.\")\n parser.add_argument(\"--variant\", default=\"z7-20\", help=\"Board variant (z7-20 or z7-10).\")\n parser.add_argument(\"--sys-clk-freq\", default=125e6, help=\"System clock frequency.\")\n builder_args(parser)\n soc_core_args(parser)\n vivado_build_args(parser)\n parser.set_defaults(cpu_type=\"zynq7000\")\n args = parser.parse_args()\n\n soc = BaseSoC(\n variant = args.variant,\n sys_clk_freq=int(float(args.sys_clk_freq)),\n **soc_core_argdict(args)\n )\n builder = Builder(soc, **builder_argdict(args))\n print(builder.compile_software)\n builder.build(**vivado_build_argdict(args), run=args.build)\n\n if args.load:\n prog = soc.platform.create_programmer()\n prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + \".bit\"))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"litex_boards/targets/digilent_arty_z7.py","file_name":"digilent_arty_z7.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"594255694","text":"import findspark\r\nfindspark.init()\r\nfindspark.find()\r\nimport itertools\r\nimport pyspark\r\nimport sys\r\nimport time\r\nimport json\r\nfrom collections import defaultdict\r\nfrom operator import add\r\nfrom pyspark import SparkContext, SparkConf\r\nfrom pyspark import SparkContext\r\nfrom itertools import combinations\r\nimport random\r\n\r\ndef hash_value(j,i,Num_hash,signature_matrix,hashfunction,prime_num):\r\n for k in range(Num_hash):\r\n signature_matrix[k][j] = min(signature_matrix[k][j],(i*hashfunction[k][0]+hashfunction[k][1]) % prime_num)\r\n return signature_matrix\r\n\r\ndef sign1(j,dict_business,Num_hash,signature_matrix,hashfunction,prime_num):\r\n for i in dict_business[j]:\r\n sign1=hash_value(j,i,Num_hash,signature_matrix,hashfunction,prime_num)\r\n return signature_matrix\r\n\r\ndef Prime_check(x):\r\n k=[False for i in range(2,x) if x%i==0]\r\n if not k:\r\n return True\r\n else:\r\n return False\r\n \r\ndef Candidate(data, c, r):\r\n cand = []\r\n d_s = []\r\n dict1 = {}\r\n data = list(data)\r\n for i in range(c):\r\n s_list = []\r\n for j in range(r):\r\n s_list.append(data[j][i])\r\n d_s.append(data[j][i])\r\n s_list = tuple(s_list)\r\n if s_list not in dict1:\r\n dict1[s_list] = []\r\n dict1[s_list].append(i)\r\n for values in dict1.items():\r\n if len(values[1]) > 1:\r\n cand.extend(list(combinations(values[1], 2)))\r\n return iter(cand)\r\n\r\ndef pairCandidatesz(pairs):\r\n list_1=dict_user[pairs[0]]\r\n list_2=dict_user[pairs[1]]\r\n result = list()\r\n intersected=list(set(list_1) & set(list_2))\r\n rated_intersection=len(intersected)\r\n rated_union=len(set(list_1+list_2)) \r\n if rated_intersection >= 3 and rated_intersection / rated_union >= 0.01:\r\n user1=reversed_uid[pairs[0]]\r\n user2=reversed_uid[pairs[1]]\r\n List_ratings1=[]\r\n List_ratings2=[]\r\n for items in intersected:\r\n items=reversed_bid[items]\r\n List_ratings1.append(utility_dict[items,user1])\r\n List_ratings2.append(utility_dict[items,user2])\r\n mean1=sum(List_ratings1)/len(List_ratings1)\r\n mean2=sum(List_ratings2)/len(List_ratings2)\r\n List_ratings1=[values-mean1 for values in List_ratings1]\r\n List_ratings2=[values-mean2 for values in List_ratings2]\r\n result.append(((user1, user2), (List_ratings1,List_ratings2)))\r\n return result\r\n\r\ndef Pearson(ratings):\r\n check1=ratings[0]\r\n check2=ratings[1]\r\n numerator=sum([x*y for x,y in zip(check1,check2)])\r\n sum_den1=(sum([x**2 for x in check1]))**0.5\r\n sum_den2=(sum([x**2 for x in check2]))**0.5\r\n denominator=(sum_den1)*sum_den2\r\n \r\n try:\r\n if numerator / denominator > 0:\r\n return numerator / denominator\r\n except ZeroDivisionError:\r\n return None\r\n \r\n \r\ndef valid_pairs(baskets, support, numofwhole):\r\n sub_s = int(support*(len(baskets))/numofwhole)\r\n check_dict = {} \r\n for basket in baskets:\r\n list_basket=list(basket)\r\n list_basket.sort(key=lambda x:x[0])\r\n for value1, value2 in combinations(list_basket, 2): \r\n if tuple([value1[0],value2[0]]) in check_dict:\r\n check_dict[tuple([value1[0],value2[0]])]+=[(value1[1],value2[1])]\r\n else:\r\n check_dict[tuple([value1[0],value2[0]])]=[(value1[1],value2[1])]\r\n \r\n L=[]\r\n for keys in check_dict.keys():\r\n if len(check_dict[keys])>=sub_s:\r\n L.append([keys,check_dict[keys]])\r\n return L\r\n\r\ndef pearson_correlation(pairs):\r\n list_pairs=list(pairs)\r\n if len(list_pairs)==0:\r\n return 0\r\n item1_ratings=[pairs[0] for pairs in pairs]\r\n item2_ratings=[pairs[1] for pairs in pairs]\r\n mean_item1=sum(item1_ratings)/len(item1_ratings)\r\n mean_item2=sum(item2_ratings)/len(item2_ratings)\r\n check1=[item-mean_item1 for item in item1_ratings]\r\n check2=[item-mean_item2 for item in item2_ratings]\r\n numerator=sum([x*y for x,y in zip(check1,check2)])\r\n sum_den1=(sum([x*y for x,y in zip(check1,check1)]))**0.5\r\n sum_den2=(sum([x*y for x,y in zip(check2,check2)]))**0.5\r\n denominator=(sum_den1)*sum_den2\r\n if numerator==0 or denominator==0:\r\n return 0\r\n return numerator/denominator\r\n \r\n\r\n \r\nif __name__ == \"__main__\":\r\n if len(sys.argv)!=4:\r\n print(\"This function needs 3 input arguments \")\r\n sys.exit(1)\r\n input_file=sys.argv[1]\r\n outputfile=sys.argv[2]\r\n condition = sys.argv[3]\r\n \r\n conf = (\r\n SparkConf()\r\n .setAppName(\"your app name\")\r\n .set(\"spark.driver.memory\", \"4g\")\r\n .set(\"spark.executor.memory\", \"4g\"))\r\n sc = SparkContext(conf=conf)\r\n #sc = SparkContext('local[*]','test')\r\n \r\n time1=time.time()\r\n reviews = sc.textFile(input_file).persist()\r\n rdd=reviews.map(lambda x:json.loads(x))\r\n \r\n if condition=='item_based':\r\n \r\n reviews = sc.textFile(input_file).persist()\r\n rdd=reviews.map(lambda x:json.loads(x))\r\n ext_rdd=rdd.map(lambda x:(x['user_id'],(x['business_id'],x['stars'])))\r\n test_reviews = sc.textFile('test_review.json').persist()\r\n test_rdd=test_reviews.map(lambda x:json.loads(x))\r\n test_rdd=test_rdd.map(lambda x:(x['user_id'],x['business_id']))\r\n trainRDD_user = ext_rdd.groupByKey().mapValues(dict).collect()\r\n UserDict = dict(trainRDD_user)\r\n user_avg_rating=ext_rdd.map(lambda x:(x[0],x[1][1])).groupByKey().mapValues(list).map(lambda x:(x[0],sum(x[1])/len(x[1]))).collect()\r\n avgsDict = dict(user_avg_rating)\r\n overall_avg = ext_rdd.map(lambda row: row[1][1]).mean()\r\n count_user=ext_rdd.map(lambda x:x[0]).distinct().count()\r\n k=ext_rdd.groupByKey().map(lambda x:x[1]).mapPartitions(lambda x:valid_pairs(list(x),7,count_user))\r\n pearson_corr_values=k.reduceByKey(add).mapValues(pearson_correlation)\r\n final_rdd=pearson_corr_values.filter(lambda x:x[1]>0 and x[1]is not None)\r\n final=final_rdd.map(lambda x: {\"b1\": x[0][0], \"b2\": x[0][1], \"sim\": x[1]}) \r\n with open(outputfile, 'w') as fp:\r\n fp.writelines(json.dumps(t) + '\\n' for t in final.collect()) \r\n \r\n \r\n \r\n \r\n if condition=='user_based':\r\n check1=rdd.map(lambda x:(x['user_id'],x['business_id'],float(x['stars'])))\r\n businessid_unique = check1.map(lambda x: (x[1])).distinct().collect()\r\n businessid_count=len(businessid_unique) \r\n business_dict={}\r\n i=0\r\n for ids in businessid_unique:\r\n business_dict[ids]=i\r\n i+=1\r\n\r\n reversed_bid = {v : k for k, v in business_dict.items()}\r\n\r\n uid_dict = dict(check1.map(lambda x: (x[0])).distinct().zipWithIndex().collect())\r\n reversed_uid = {v : k for k, v in uid_dict.items()}\r\n\r\n utility_dict = dict(check1.map(lambda x: ((x[1], x[0]), x[2])).groupByKey().mapValues(lambda l: sum(l) / len(l)).collect())\r\n userid_unique = check1.map(lambda x: (x[0],x[1])).groupByKey().mapValues(set).collect()\r\n userid_count=len(userid_unique)\r\n dict_user = {}\r\n for i in range(userid_count):\r\n dict_user[i]=[]\r\n for u in userid_unique[i][1]:\r\n dict_user[i].append(business_dict[u])\r\n\r\n prime_num=userid_count\r\n while not Prime_check(prime_num):\r\n prime_num += 1\r\n Num_hash = 50\r\n hashfunction = []\r\n random.seed(10000)\r\n for i in range(Num_hash): \r\n hashfunction.append([random.randint(0, 10000), random.randint(0, 10000)])\r\n\r\n signature_matrix = [[businessid_count for col in range(userid_count)] for row in range(Num_hash)]\r\n\r\n row = 1\r\n b = Num_hash / 1\r\n for j in range(userid_count):\r\n signature_matrix=sign1(j,dict_user,Num_hash,signature_matrix,hashfunction,prime_num)\r\n\r\n \r\n sign_rdd = sc.parallelize(signature_matrix, b)\r\n candidates = sign_rdd.mapPartitions(lambda x: Candidate(x, userid_count, row)).map(lambda x: (x, 1))\r\n c=candidates.reduceByKey(lambda x, y: 1).map(lambda x: x[0])\r\n\r\n li=c.flatMap(pairCandidatesz).mapValues(Pearson).filter(lambda x:x[1] is not None).map(lambda x: {\"u1\" : x[0][0], \"u2\" : x[0][1], \"sim\" : (x[1])}) \\\r\n .filter(lambda x: x[\"sim\"])\r\n\r\n with open(outputfile, 'w') as fp:\r\n fp.writelines(json.dumps(t) + '\\n' for t in li.collect())\r\n\r\n Duration=time.time()-time1\r\n print('Duration:',Duration)","sub_path":"ItemAndUserModel(train).py","file_name":"ItemAndUserModel(train).py","file_ext":"py","file_size_in_byte":8596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103614504","text":"import nltk\n# nltk.download('punkt')\n\nwith open('data_HW3_Plato_Republic.txt') as myfile:\n myText = myfile.read().replace('\\n', ' ')\n myText = myText.encode('ascii', 'ignore')\n myText = myText.decode()\n\n###### (a) ######\nmyText_tokenized = nltk.word_tokenize(myText.lower())\nT = len(myText_tokenized)\nprint(\"-- length of texts (number of words T) : \", T)\n\n###### (b) ######\nmyUnigram = nltk.ngrams(myText_tokenized, 1)\nfdist_uni = nltk.FreqDist(myUnigram)\ncommon = [word for word in fdist_uni.most_common() if len(word[0][0])>=8]\nprint(common[0:5])\n\n###### (c) ######\nmyBigram = nltk.ngrams(myText_tokenized, 2)\nfdist_bi = nltk.FreqDist(myBigram)\ndef fcn(w1, w2):\n return fdist_bi[(w1, w2)]/fdist_uni[(w1,)]\n\n###### (d) ######\nperplexity = 1.0\nfor k in range(T-1):\n perplexity *= fcn(myText_tokenized[k], myText_tokenized[k+1])**(-1/(T-1))\nprint(perplexity)","sub_path":"Assignments/Homework3/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"491999615","text":"import kivy\r\nfrom kivy.app import App\r\nfrom kivy.config import Config\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.anchorlayout import AnchorLayout\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen\r\nfrom kivy.uix.floatlayout import FloatLayout\r\nfrom kivy.uix.image import Image\r\nfrom kivy.core.window import Window\r\nfrom kivy.core.window import WindowBase\r\nfrom kivy.core.text import LabelBase\r\nfrom kivy.uix.popup import Popup\r\nimport random , glob\r\nimport json\r\n\r\n\r\n# LabelBase.register(name='bright_young_things',fn_regular='Bright Young Things.ttf')\r\npuncte=0\r\n\r\n\r\n# on definit un ScreenManager au debut ! \r\nsm = ScreenManager()\r\n#Variable global qui n'esdt pas propre a une classe , donc je peux utiliser dans tout le code\r\n\r\n\r\n#Ecran d'acueuille \r\nclass MenuScreen(Screen):\r\n def build(self):\r\n self.name='Menu'\r\n self.add_widget(Image(source='Battle.jpeg', allow_stretch=True , keep_ratio= False))\r\n Menu_Layout=BoxLayout(padding=100,spacing=80,orientation='vertical')\r\n\r\n #Bouton Play \r\n self.Bouton_play = Button(text='SINGLEPLAYER')\r\n self.Bouton_play.font_size= Window.size[0]*0.05\r\n self.Bouton_play.background_color=[0,0,0,0.5]\r\n self.Bouton_play.bind(on_press=self._play)\r\n Menu_Layout.add_widget(self.Bouton_play)\r\n\r\n #Bouton Multiplayer\r\n self.bouton_multiplayer = Button(text=\"MULTIPLAYER\")\r\n self.bouton_multiplayer.font_size = Window.size[0]*0.05\r\n self.bouton_multiplayer.background_color = [0,0,0,0.5]\r\n self.bouton_multiplayer.bind(on_press = self._multiplayer)\r\n Menu_Layout.add_widget(self.bouton_multiplayer)\r\n\r\n #Bouton Quitter \r\n self.Bouton_quitter = Button(text='QUITTER LE JEU')\r\n self.Bouton_quitter.font_size= Window.size[0]*0.05\r\n self.Bouton_quitter.background_color=[0,0,0,0.5]\r\n self.Bouton_quitter.bind(on_press=self._Quitter)\r\n Menu_Layout.add_widget(self.Bouton_quitter)\r\n #Pour appeler box , on met pas return pour pas faire une boucle\r\n self.add_widget(Menu_Layout)\r\n #Definir un fonction pour le bouton PLAY\r\n def _play(self ,src):\r\n game = GridGameScreen()\r\n game.build()\r\n sm.add_widget(game)\r\n sm.current='Game'\r\n sm.transition.direction = \"left\"\r\n\r\n def _multiplayer(self,src):\r\n multijoueur = Two_players_Screen()\r\n multijoueur.build()\r\n sm.add_widget(multijoueur)\r\n sm.current = 'multiplayer'\r\n sm.transition.direction = \"left\"\r\n \r\n #Definir un fonction pour le bouton Quitter \r\n def _Quitter(self,src):\r\n # sm.current = \"ecran 1\" Pas besoin de mettre le sm.current() car ca quitte.\r\n Test_Jeu_BatailleApp().stop()\r\n\r\n\r\n\r\n#*****************************************************************CLASSE POUR 1 JOUEUR**********************************************************************\r\n\r\n #Classe pour le jeu\r\nclass GridGameScreen(Screen):\r\n def build(self):\r\n self.name = 'Game'\r\n self.add_widget(Image(source='fond du jeu.jpg', allow_stretch=True , keep_ratio= False))\r\n \r\n \r\n self.essai = 0 \r\n self.touche = 0\r\n self.touche_torpi =0\r\n self.touche_croizeur =0\r\n self.touche_sousmarin =0\r\n self.touche_porteavion =0\r\n####RANDOM\r\n # a= ['c:\\\\Users\\\\Nelu8770\\\\Desktop\\\\KIVY TEST\\\\cahier.txt','c:\\\\Users\\\\Nelu8770\\\\Desktop\\\\KIVY TEST\\\\cahier_2.txt',\r\n\r\n # 'c:\\\\Users\\\\Nelu8770\\\\Desktop\\\\KIVY TEST\\\\cahier_3.txt','c:\\\\Users\\\\Nelu8770\\\\Desktop\\\\KIVY TEST\\\\cahier_4.txt']\r\n\r\n pattern = \"C:\\\\Users\\\\pedro\\\\Desktop\\\\KIVY TEST\\\\Bateaux\\\\*.txt\" #etoile pour afficher pour ce qui est du text\r\n b=random.choice(glob.glob(pattern))\r\n # print(glob.glob(pattern))\r\n## On ouvre le fichier et on definit la position avec une liste vite\r\n with open (b,'rt') as f:\r\n self.pos_sous_marin = []\r\n self.pos_croiseur = []\r\n self.pos_porte_avion = []\r\n self.pos_torpilleur = []\r\n self.output = \"\"\r\n matrice = []\r\n## Declaration de noms pour les bato\r\n sous_marin= \"s\"\r\n croiseur=\"c\"\r\n porte_avion = \"p\"\r\n torpilleur = \"t\"\r\n## Parcours le fichier et supprime les espaces\r\n fichier = f.readlines()\r\n for line in fichier:\r\n effacer_espace = line.rstrip()\r\n matrice.append(list(effacer_espace))\r\n print(matrice)\r\n## Parcours la liste pour determiner la position des sous Marin\r\n for ligne in range(len(matrice)):\r\n for col in range (len(matrice[ligne])):\r\n if matrice [ligne][col] in sous_marin:\r\n position =([(ligne),(col)]) # determine la position des sous marin\r\n self.pos_sous_marin.append(position)\r\n print(\"positions des sous marins:\",self.pos_sous_marin)\r\n## Parcours la liste pour determiner la position des croiseurs\r\n for ligne in range(len(matrice)):\r\n for col in range (len(matrice[ligne])):\r\n if matrice [ligne][col] in croiseur:\r\n position =([(ligne),(col)])\r\n self.pos_croiseur.append(position)\r\n print(\"positions des croisseur:\",self.pos_croiseur)\r\n## Parcours la liste pour determiner la position des portes avions\r\n for ligne in range(len(matrice)):\r\n for col in range (len(matrice[ligne])):\r\n if matrice [ligne][col] in porte_avion:\r\n position =([(ligne),(col)])\r\n self.pos_porte_avion.append(position)\r\n print(\"positions des porte avions:\",self.pos_porte_avion)\r\n## Parcours la liste pour determiner la position des torpilleur\r\n for ligne in range(len(matrice)):\r\n for col in range (len(matrice[ligne])):\r\n if matrice [ligne][col] in torpilleur:\r\n position =([(ligne),(col)])\r\n self.pos_torpilleur.append(position)\r\n print(\"positions des torpilleur:\",self.pos_torpilleur)\r\n\r\n#********************************************** INTERFACE DU JEU **************************************************************\r\n\r\n self.title = 'Bataille'\r\n boite_message = BoxLayout(orientation= 'vertical',spacing=2)\r\n grid = GridLayout(rows=12 , cols = 11,padding=15,spacing= 3)\r\n grid.add_widget(Label(text='SINGLE \\nPLAYER'))\r\n\r\n#Grille pour afficher les chiffres\r\n for col in range(1,11):\r\n grid.add_widget(Label(text=str(col)))\r\n#Grille pour afficher les lettre de A-->J + Button \r\n Lettre = ['A','B','C','D','E','F','G','H','I','J']\r\n for A_to_J in range(len(Lettre)):\r\n grid.add_widget(Label(text=str(Lettre[A_to_J])))\r\n for col in range(10):\r\n a = [(A_to_J +1),(col+1)]\r\n btn = Button(text=\"\",id=str(a))\r\n # on assigne a chaque bouton la fonction _message\r\n btn.bind(on_press=self._message)\r\n grid.add_widget(btn)\r\n #Ecran separe pour afficher text 'touché , coulé , vous avez gagnez ,perdu etc...\r\n self.output= Label(font_size='30sp',size_hint_y=0.1,bold=True,color=(1,0,0,10))\r\n boite_message.add_widget(grid)\r\n boite_message.add_widget(self.output)\r\n \r\n \r\n self.add_widget(boite_message)\r\n\r\n#Creation de la fonction qui permet d'informer et cliquer sur chaque bato\r\n def _message(self, src):\r\n#Fonction qui afficher si on a touché le sous marin et on change le background en rouge\r\n for sousmarin in range(len(self.pos_sous_marin)):\r\n if src.id == str(self.pos_sous_marin[sousmarin]):\r\n src.disabled = True\r\n src.background_color = [1,0,0,10] #couleur rouge\r\n self.touche += 1\r\n self.touche_sousmarin +=1\r\n if self.touche_sousmarin == 3:\r\n print(\"YEAHHHH : sous marin coulé\") # pour afficher dans le teminal\r\n self.output.text =\" FELICITATION : un des deux sous marins a coulé\"\r\n print(self.touche , self.output)\r\n break\r\n elif self.touche_sousmarin == 6 : \r\n print(\"yeaahhh tous les sous marins touchés\")\r\n self.output.text = \" FELICITATION : les 2 Sous marins ont coulés\"\r\n else:\r\n self.output.text = 'Sous marin touché'\r\n\r\n\r\n\r\n#Fonction qui afficher si on a touché le croisseur et on change le background en rouge\r\n for croizeur in range(len(self.pos_croiseur)):\r\n if src.id == str(self.pos_croiseur[croizeur]):\r\n src.disabled = True\r\n src.background_color = [1,0,0,10] #couleur rouge\r\n self.touche += 1\r\n self.touche_croizeur +=1\r\n if self.touche_croizeur == 4:\r\n print(\"YEAHHHH : Croisseur coulé\")\r\n self.output.text = \" FELICITATION : le croisseur a coulé\"\r\n print(self.touche , self.output)\r\n break\r\n else:\r\n self.output.text = 'Croisseur touché '\r\n\r\n\r\n#Fonction qui afficher si on a touché le porte avion et on change le background en rouge\r\n for porteavion in range(len(self.pos_porte_avion)):\r\n if src.id == str(self.pos_porte_avion[porteavion]):\r\n src.disabled = True\r\n src.background_color = [1,0,0,10] #couleur rouge\r\n self.touche += 1\r\n self.touche_porteavion += 1\r\n if self.touche_porteavion == 5:\r\n print(\"YEAHHHH : Porte avion coulé\")\r\n self.output.text =\" FELICITATION : le porte-avion a coulé\"\r\n print(self.touche , self.output)\r\n break\r\n else:\r\n self.output.text = 'Porte avion touché '\r\n\r\n\r\n#Fonction qui afficher si on a touché le torpilleur et on change le background en rouge\r\n for torpieur in range(len(self.pos_torpilleur)):\r\n if src.id == str(self.pos_torpilleur[torpieur]):\r\n src.disabled = True\r\n src.background_color = [1,0,0,10] #couleur rouge\r\n self.touche_torpi += 1\r\n self.touche +=1\r\n if self.touche_torpi == 2:\r\n print(\"YEAHHHH : Torpilleur touché\")\r\n self.output.text =\" FELICITATION : Le torpilleur a coulé\"\r\n print(self.touche , self.output)\r\n break\r\n else:\r\n self.output.text= 'Torpilleur touché'\r\n\r\n\r\n#Si on touche un autre case que le bateau et on change le background en bleu \r\n if src.disabled == False :\r\n src.background_color = [0.2,0.7,1,10] # Couleur bleu\r\n src.disabled = True\r\n self.output.text = \"Failed : Aucun un bateau à été touché\"\r\n print(self.output)\r\n\r\n global puncte\r\n self.essai += 1\r\n print(\"Nombre d'essai :\" , self.essai)\r\n puncte = 100*(self.touche/self.essai)\r\n # a= round(puncte,2)\r\n # print(a) #pour tester dans le terminal\r\n print('Your score:'+str(puncte))\r\n print(\"\\n\")\r\n\r\n #Pour dire que la partie est termineé 17 bato\r\n if self.touche == 17:\r\n pseudo = pseudoScreen()\r\n pseudo.build()\r\n sm.add_widget(pseudo)\r\n sm.current = \"Pseudo\"\r\n print('La partie est terminée')\r\n\r\n\r\n\r\n\"\"\"\r\n2 JOUEURS\r\n\"\"\"\r\n\r\n#****************************************************CLASSE POUR 2 JOUEURS************************************************\r\n\r\nclass Two_players_Screen(Screen):\r\n def build(self):\r\n self.name = \"multiplayer\"\r\n self.title = \"2 PLAYERS\"\r\n self.add_widget(Image(source='image3.png',allow_stretch=True, keep_ratio=False))\r\n # self.add_widget(Image(source='fond du jeu.jpg', allow_stretch=True , keep_ratio= False))\r\n\r\n #compteurs pour chaque joueur\r\n self.essai_joueur1 = 0\r\n self.essai_joueur2 = 0\r\n self.touche_joueur1 = 0\r\n self.touche_joueur2 = 0\r\n self.touche_s1 = 0\r\n self.touche_s2 = 0\r\n self.touche_c1 = 0\r\n self.touche_c2 = 0\r\n self.touche_p1 = 0\r\n self.touche_p2 = 0\r\n self.touche_t1 = 0\r\n self.touche_t2 = 0\r\n \r\n### Charger la position des bateaux pour chaque joueur\r\n\r\n#chargement de la positions des bateaux pour le joueur 1\r\n\r\n pattern = \"C:\\\\Users\\\\pedro\\\\Desktop\\\\KIVY TEST\\\\Bateaux 2 joueurs\\\\Joueur 1\\\\*.txt\" #etoile pour afficher pour ce qui est du text\r\n b=random.choice(glob.glob(pattern))\r\n with open (b) as file:\r\n sous_marin = \"s\"\r\n croiseur = \"c\"\r\n porte_avions = \"p\"\r\n torpilleur = \"t\"\r\n\r\n self.pos_sous_marin1 = []\r\n self.pos_croiseur1 = []\r\n self.pos_porte_avions1 = []\r\n self.pos_torpilleur1 = []\r\n\r\n joueur1 = []\r\n\r\n fichier1 = file.readlines()\r\n for ligne1 in fichier1:\r\n ligne1_rs = ligne1.rstrip()\r\n joueur1.append(list(ligne1_rs))\r\n \r\n print (joueur1)\r\n\r\n #sous-marin\r\n for line in range (len(joueur1)):\r\n for col in range(len(joueur1[line])):\r\n if joueur1 [line][col] in sous_marin:\r\n position = ([(line),(col)])\r\n self.pos_sous_marin1.append (position)\r\n print(\"Position des sous-marins joueur 1:\",self.pos_sous_marin1)\r\n \r\n #croiseur\r\n for line in range (len(joueur1)):\r\n for col in range(len(joueur1[line])):\r\n if joueur1 [line][col] in croiseur:\r\n position = ([(line),(col)])\r\n self.pos_croiseur1.append (position)\r\n print(\"Position du croiseur joueur 1:\",self.pos_croiseur1)\r\n \r\n #porte-avions\r\n for line in range (len(joueur1)):\r\n for col in range(len(joueur1[line])):\r\n if joueur1 [line][col] in porte_avions:\r\n position = ([(line),(col)])\r\n self.pos_porte_avions1.append (position)\r\n print(\"Position du porte-avions joueur 1:\",self.pos_porte_avions1)\r\n\r\n #torpilleur\r\n for line in range (len(joueur1)):\r\n for col in range(len(joueur1[line])):\r\n if joueur1 [line][col] in torpilleur:\r\n position = ([(line),(col)])\r\n self.pos_torpilleur1.append (position)\r\n print(\"Position du torpilleur joueur 1:\",self.pos_torpilleur1)\r\n\r\n\r\n#chargement des positions des bateaux pour le joueur 2:\r\n\r\n pattern = \"C:\\\\Users\\\\pedro\\\\Desktop\\\\KIVY TEST\\\\Bateaux 2 joueurs\\\\Joueur 2\\\\*.txt\" #etoile pour afficher pour ce qui est du text\r\n b=random.choice(glob.glob(pattern))\r\n with open (b) as file:\r\n self.pos_sous_marin2 = []\r\n self.pos_croiseur2 = []\r\n self.pos_porte_avions2 = []\r\n self.pos_torpilleur2 = []\r\n\r\n joueur2 = []\r\n\r\n fichier2 = file.readlines()\r\n for row in fichier2:\r\n row_rs = row.rstrip()\r\n joueur2.append(list(row_rs))\r\n print(joueur2)\r\n\r\n #sous marins\r\n for rows in range (len(joueur2)):\r\n for cols in range (len(joueur2[rows])):\r\n if joueur2[rows][cols] in sous_marin:\r\n position = ([(rows),(cols)])\r\n self.pos_sous_marin2.append(position)\r\n print (\"Position des sous-marins joueur 2:\", self.pos_sous_marin2)\r\n\r\n #croiseur\r\n for rows in range (len(joueur2)):\r\n for cols in range (len(joueur2[rows])):\r\n if joueur2[rows][cols] in croiseur:\r\n position = ([(rows),(cols)])\r\n self.pos_croiseur2.append(position)\r\n print (\"Position du croisseur joueur 2:\", self.pos_croiseur2)\r\n\r\n #porte-avions\r\n for rows in range (len(joueur2)):\r\n for cols in range (len(joueur2[rows])):\r\n if joueur2[rows][cols] in porte_avions:\r\n position = ([(rows),(cols)])\r\n self.pos_porte_avions2.append(position)\r\n print (\"Position du porte_avions joueur 2:\", self.pos_porte_avions2)\r\n\r\n #torpilleur\r\n for rows in range (len(joueur2)):\r\n for cols in range (len(joueur2[rows])):\r\n if joueur2[rows][cols] in torpilleur:\r\n position = ([(rows),(cols)])\r\n self.pos_torpilleur2.append(position)\r\n print (\"Position du torpilleur joueur 2:\", self.pos_torpilleur2)\r\n\r\n\r\n###Grille pour 2 joueurs\r\n\r\n # self.add_widget(Image(source='image3.png',allow_stretch=True, keep_ratio=False))\r\n\r\n\r\n BOX= BoxLayout(orientation = \"vertical\")\r\n\r\n BOITE = BoxLayout(orientation=\"horizontal\")\r\n Lettres = ['A','B','C','D','E','F','G','H','I','J'] \r\n\r\n\r\n#grille joueur 1\r\n\r\n box1= BoxLayout(orientation=\"vertical\")\r\n grid_1= GridLayout(rows = 12,cols=11,padding=15,spacing=1)\r\n grid_1.add_widget(Label(text=\"Player\\n 1\",color=[0,0,0,5],font_size=\"15sp\"))\r\n\r\n for col in range(1,11):\r\n grid_1.add_widget(Label(text=str(col),font_size=\"20sp\",color = [0,0,0,5]))\r\n\r\n for A_to_J in range(len(Lettres)):\r\n grid_1.add_widget(Label(text=str(Lettres[A_to_J]),font_size=\"20sp\",color=[0,0,0,5]))\r\n\r\n for col in range (10):\r\n a1 = [(A_to_J + 1), (col+1)]\r\n btn_1 = Button(text = \"\",id=str(a1))\r\n btn_1.bind(on_press=self._pressed_1)\r\n grid_1.add_widget(btn_1)\r\n \r\n box1.add_widget(grid_1)\r\n self.output_1 = Label(size_hint=(1,0.1),font_size=\"20sp\")\r\n box1.add_widget(self.output_1)\r\n\r\n\r\n#grille joueur 2\r\n\r\n box2 = BoxLayout(orientation=\"vertical\")\r\n grid_2 = GridLayout(rows=12,cols=11,padding=15,spacing=1)\r\n grid_2.add_widget(Label(text=\"Player\\n 2\",color=[0,0,0,5],font_size=\"15sp\"))\r\n\r\n for cols in range (1,11):\r\n grid_2.add_widget(Label(text=str(cols),font_size=\"20sp\",color=[0,0,0,5]))\r\n\r\n for A_to_J in range (len(Lettres)):\r\n grid_2.add_widget(Label(text=str(Lettres[A_to_J]),font_size=\"20sp\",color=[0,0,0,5]))\r\n\r\n for cols in range (10):\r\n a2 = [(A_to_J + 1),(cols + 1)]\r\n btn_2 = Button(text = \"\",id=str(a2))\r\n btn_2.bind(on_press=self._pressed_2)\r\n grid_2.add_widget(btn_2)\r\n \r\n box2.add_widget(grid_2)\r\n self.output_2 = Label(size_hint=(1,0.1),font_size=\"20sp\")\r\n box2.add_widget(self.output_2)\r\n\r\n#ajoute les grille a l'ecran\r\n\r\n BOITE.add_widget(box1)\r\n BOITE.add_widget(box2)\r\n BOX.add_widget(BOITE)\r\n self.add_widget(BOX)\r\n\r\n\r\n#Informer des résultats de nos tirs\r\n\r\n#Informations grille 1\r\n\r\n def _pressed_1(self,source):\r\n print(id(source))\r\n self.essai_joueur1 +=1\r\n\r\n #sous-marin\r\n for sous_marin in range(len(self.pos_sous_marin1)):\r\n if source.id == str (self.pos_sous_marin1[sous_marin]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur1 +=1\r\n self.touche_s1 += 1\r\n\r\n while self.touche_s1 < 7:\r\n if self.touche_s1 == 3:\r\n self.output_1.text = \" FELICITATION : un des deux sous marins a coulé\"\r\n elif self.touche_s1 == 6:\r\n self.output_1.text = \" FELICITATION : les 2 Sous marins ont coulés\"\r\n else:\r\n self.output_1.text = \"Sous-marin touché\"\r\n break\r\n \r\n #croiseur\r\n for croiseur in range(len(self.pos_croiseur1)):\r\n if source.id == str (self.pos_croiseur1[croiseur]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur1 +=1\r\n self.touche_c1 +=1\r\n\r\n while self.touche_c1 < 5:\r\n if self.touche_c1 == 4:\r\n self.output_1.text = \" FELICITATION : le croisseur a coulé\"\r\n else:\r\n self.output_1.text = \"Croiseur touché\"\r\n break\r\n \r\n\r\n #porte-avions\r\n for porte_avions in range(len(self.pos_porte_avions1)):\r\n if source.id == str(self.pos_porte_avions1[porte_avions]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur1 +=1\r\n self.touche_p1 +=1\r\n\r\n while self.touche_p1 < 6:\r\n if self.touche_p1 == 5:\r\n self.output_1.text = \" FELICITATION : le porte-avions a coulé\"\r\n else:\r\n self.output_1.text = \"Porte-avions touché\"\r\n break\r\n\r\n #torpilleur\r\n for torpilleur in range (len(self.pos_torpilleur1)):\r\n if source.id == str(self.pos_torpilleur1[torpilleur]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur1 +=1\r\n self.touche_t1 +=1\r\n\r\n while self.touche_t1 <3:\r\n if self.touche_t1 == 2:\r\n self.output_1.text = \" FELICITATION : Le torpilleur a coulé\"\r\n else:\r\n self.output_1.text = \"Torpilleur touché\"\r\n break\r\n\r\n \r\n #eau\r\n if source.disabled == False:\r\n source.background_color = [0,0,1,10]\r\n source.disabled = True\r\n self.output_1.text = \"Eau\"\r\n\r\n#calcule le score du joueur 1\r\n global puncte\r\n puncte = 100 * (self.touche_joueur1/self.essai_joueur1)\r\n print(puncte)\r\n\r\n#Joueur 1 gagne\r\n\r\n if self.touche_joueur1 == 17:\r\n pseudos = pseudoScreen()\r\n pseudos.build()\r\n sm.add_widget(pseudos)\r\n \r\n self.popup_1 = Popup()\r\n\r\n self.popup_1.title = \"PLAYER 1 WINS\"\r\n self.popup_1.title_align + \"center\"\r\n self.popup_1.title_size = \"100sp\"\r\n self.popup_1.title_color = [0,1,0,10]\r\n\r\n save_1 = Button(text=\"Save Score\",font_size=\"50sp\",color=[0,1,0,10])\r\n save_1.bind(on_press=self._save_1)\r\n self.popup_1.add_widget(save_1)\r\n\r\n self.popup_1.open()\r\n\r\n\r\n#Informations joueur 2\r\n\r\n def _pressed_2(self,source):\r\n\r\n self.essai_joueur2 +=1\r\n #sous-marin\r\n for sous_marin in range(len(self.pos_sous_marin2)):\r\n if source.id == str(self.pos_sous_marin2[sous_marin]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur2 +=1\r\n self.touche_s2 +=1\r\n\r\n while self.touche_s2 < 7:\r\n if self.touche_s2 == 3:\r\n self.output_2.text = \" FELICITATION : un des deux sous marins a coulé\"\r\n elif self.touche_s2 == 6:\r\n self.output_2.text = \" FELICITATION : les 2 Sous marins ont coulés\"\r\n else:\r\n self.output_2.text = \"Sous-marin touché\"\r\n break\r\n\r\n #croiseur\r\n for croiseur in range(len(self.pos_croiseur2)):\r\n if source.id == str (self.pos_croiseur2[croiseur]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur2 +=1\r\n self.touche_c2 +=1\r\n\r\n while self.touche_c2 < 5:\r\n if self.touche_c2 == 4:\r\n self.output_2.text = \" FELICITATION : le croisseur a coulé\"\r\n else:\r\n self.output_2.text = \"Croiseur touché\"\r\n break\r\n \r\n\r\n #porte-avions\r\n for porte_avions in range(len(self.pos_porte_avions2)):\r\n if source.id == str(self.pos_porte_avions2[porte_avions]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur2 +=1\r\n self.touche_p2 +=1\r\n\r\n while self.touche_p2 < 6:\r\n if self.touche_p2 == 5:\r\n self.output_2.text = \" FELICITATION : le porte-avions a coulé\"\r\n else:\r\n self.output_2.text = \"Porte-avions touché\"\r\n break\r\n\r\n #torpilleur\r\n for torpilleur in range (len(self.pos_torpilleur2)):\r\n if source.id == str(self.pos_torpilleur2[torpilleur]):\r\n source.disabled = True\r\n source.background_color = [1,0,0,10]\r\n self.touche_joueur2 +=1\r\n self.touche_t2 +=1\r\n\r\n while self.touche_t2 <3:\r\n if self.touche_t2 == 2:\r\n self.output_2.text = \" FELICITATION : Le torpilleur a coulé\"\r\n else:\r\n self.output_2.text = \"Torpilleur touché\"\r\n break\r\n \r\n #eau\r\n if source.disabled == False: #si on touche pas les batteaux, donc les btns sont clicable => on a touché l'eau\r\n source.background_color = [0,0,1,10]\r\n source.disabled = True\r\n self.output_2.text = \"Failed : Aucun un bateau à été touché\"\r\n\r\n#Score joueur 2\r\n global puncte\r\n puncte = 100* (self.touche_joueur2/self.essai_joueur2)\r\n print(puncte)\r\n\r\n#Joueur 2 gagne\r\n\r\n if self.touche_joueur2 == 17:\r\n pseudo = pseudoScreen()\r\n pseudo.build()\r\n sm.add_widget(pseudo)\r\n # sm.current = 'Pseudo'\r\n\r\n self.popup_2 = Popup()\r\n\r\n self.popup_2.title = \"PLAYER 2 WINS\"\r\n self.popup_2.title_align + \"center\"\r\n self.popup_2.title_size = \"100sp\"\r\n self.popup_2.title_color = [0,1,0,10]\r\n\r\n save_2 = Button(text=\"Save Score\",font_size=\"50sp\",color=[0,1,0,10])\r\n save_2.bind(on_press=self._save_2)\r\n self.popup_2.add_widget(save_2)\r\n\r\n self.popup_2.open()\r\n\r\n\r\n#fontions boutons popup\r\n def _save_1(self,source):\r\n sm.current = \"Pseudo\"\r\n self.popup_1.dismiss()\r\n \r\n def _save_2(self,source):\r\n sm.current = \"Pseudo\"\r\n self.popup_2.dismiss()\r\n\r\n\r\n \r\n\"\"\"\r\nDERNIER ECRAN\r\n\"\"\"\r\n \r\n\r\n#****************************************************CLASSE POUR LE SCORE*************************************************\r\n\r\nclass pseudoScreen(Screen):\r\n def build(self):\r\n self.name = 'Pseudo'\r\n self.title = 'PSEUDO'\r\n self.add_widget(Image(source='title_screen.jpg', allow_stretch=True , keep_ratio= False))\r\n\r\n self.Score_Layout=BoxLayout(padding=50,spacing=80,orientation='vertical')\r\n self.Sous_score_Layout2=BoxLayout(orientation='vertical') # pour le bouton SAVE + Lable\r\n Sous_score_Layout=BoxLayout(orientation='horizontal',size_hint=(1,0.8)) # pour le save et label et input\r\n\r\n \r\n # print(str(puncte) + 'yoyoyoyoyo')\r\n#[b][/b] + markup = True pour afficher en BOLD\r\n self.label_score=Label(text='[b]Your score:[/b]'+'\\n' + str(round(puncte,2)),color =(0,5,0,10),font_size='30sp',italic=True ,markup=True)\r\n # print('rrr'+str(puncte))\r\n\r\n self.Bouton_enregistrer=Button(text='SAVE', on_press=self._saveScore,font_size='40sp')\r\n self.Input=TextInput(multiline=False,font_size = '35sp')\r\n self.Input.bind(on_text_validate=self._saveScore)\r\n\r\n \r\n Sous_score_Layout.add_widget(self.label_score)\r\n Sous_score_Layout.add_widget(self.Input)\r\n Sous_score_Layout.add_widget(self.Bouton_enregistrer)\r\n \r\n \r\n# Pour afficher le pseudo dans le label \r\n self.display_score = Label(text='')\r\n self.Score_Layout.add_widget(Sous_score_Layout)\r\n self.Score_Layout.add_widget(self.display_score)\r\n\r\n self.Score_Layout.add_widget(self.Sous_score_Layout2)\r\n\r\n self.Bouton_Fin=Button(text='FINI !',color = (1,0,0.2,2))\r\n self.Bouton_Fin.font_size=Window.size[0]*0.05\r\n self.Bouton_Fin.background_color=[1,1,1,0.5]\r\n self.Bouton_Fin.bind(on_press=self._Quitter) \r\n self.Score_Layout.add_widget(self.Bouton_Fin) \r\n\r\n\r\n self.add_widget(self.Score_Layout)\r\n\r\n def _Quitter(self,src):\r\n # sm.current = \"ecran 1\" Pas besoin de mettre le sm.current() car ca quitte.\r\n Test_Jeu_BatailleApp().stop()\r\n\r\n\r\n def _saveScore(self , src):\r\n\r\n # score_maximum=0\r\n # index_maximum=0\r\n tous_les_scores=[]\r\n tous_les_noms=[]\r\n #ecriture dans fichier json\r\n try:\r\n with open('data.json')as json_file:\r\n data=json.load(json_file) #lecture du fichier json\r\n \r\n data['player_score'].append({\r\n 'name':self.Input.text, \r\n 'score':puncte \r\n })\r\n for dictionnaire in data['player_score']: # mettre a jour la clé player _score\r\n \r\n tous_les_scores.append(int(dictionnaire['score']))\r\n tous_les_noms.append(dictionnaire['name'])\r\n score_maximum=max(tous_les_scores)\r\n name_maximum=tous_les_scores.index(score_maximum)\r\n # print('***BEST SCORE***')\r\n # print('SCORE :' + str(score_maximum))\r\n # print('NAME :' + all_names[index_maximum])\r\n # print(\"\\n\")\r\n # print('***LAST SCORE***')\r\n # print('SCORE :' + str(all_scores[-1]))\r\n # print('NAME :' + all_names[-1]) \r\n\r\n \r\n\r\n\r\n with open('data.json','w') as outfile:\r\n json.dump(data,outfile,indent=4)\r\n\r\n except :\r\n data={}\r\n data['player_score']=[]\r\n data['player_score'].append({\r\n 'name':self.Input.text, \r\n 'score':puncte \r\n })\r\n with open('data.json','w') as outfile:\r\n json.dump(data,outfile,indent=4)\r\n\r\n\r\n#Pour afficher les pseudo dans le display\r\n self.display_score= Label(text=\" [u][b]Pseudo[/b][/u]:{} [b]Last Score[/b] :{} \\n [u][b]Pseudo[/b][/u] :{} [b]Best Score[/b]:{}\".format(tous_les_noms[-1],tous_les_scores[-1],tous_les_noms[name_maximum],score_maximum)\r\n ,font_size ='50sp',markup= True)\r\n\r\n self.Sous_score_Layout2.add_widget(self.display_score)\r\n\r\n # sm.current = 'Pseudo'\r\n\r\n\r\n\r\n\r\n\r\n### Lance le Jeu \r\nclass Test_Jeu_BatailleApp(App):\r\n def build(self):\r\n menu = MenuScreen()\r\n menu.build()\r\n sm.add_widget(menu)\r\n sm.current='Menu'\r\n return sm\r\n\r\nTest_Jeu_BatailleApp().run()","sub_path":"KIVY TEST/Bataille-navale-V.05.2.py","file_name":"Bataille-navale-V.05.2.py","file_ext":"py","file_size_in_byte":31988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"478882357","text":"import os\nimport yaml\nimport jsonschema\nimport argparse\n\nSCHEMA = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n \"yaml_schema.yaml\")\n\n\ndef validate_yaml(yaml_file: str):\n \"\"\"\n Validates the syntax of the yaml file.\n\n Arguments:\n yaml_file: path to yaml file to be validated\n\n Returns:\n jsonschema.validate\n\n Raises:\n\n \"\"\"\n # read in yaml_file\n with open(yaml_file, 'r') as f_in:\n yaml_contents = f_in.read()\n yaml_in = yaml.full_load(yaml_contents)\n\n _validate_yaml_from_dict(yaml_in)\n print('YAML file is valid ✅')\n\n\ndef _validate_yaml_from_dict(yaml_dict: dict):\n \"\"\"\n Validates the syntax of the yaml file, using a dict as input.\n\n Arguments:\n yaml_dict: yaml model as dict.\n\n Returns:\n jsonschema.validate\n\n Raises:\n\n \"\"\"\n # read in SCHEMA\n with open(SCHEMA, 'r') as f_in:\n yaml_contents = f_in.read()\n schema = yaml.full_load(yaml_contents)\n\n jsonschema.validate(instance=yaml_dict, schema=schema)\n\n\ndef main():\n \"\"\"\n Function called by the CLI.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Validates a yaml model '\n 'so that it can be used by yaml2bsml.')\n\n parser.add_argument('yaml_file', type=str,\n help='Directory of yaml file, that '\n 'should be validated.')\n\n args = parser.parse_args()\n\n print(f'Path to yaml file: {args.yaml_file}')\n print('Validating...')\n\n validate_yaml(args.yaml_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"yaml2sbml/yaml_validation.py","file_name":"yaml_validation.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"92463821","text":"# Crie um programa que avalie possibilidade de emprestimo pra um cliente em um banco.\n# Vai receber o valor da casa, o salário do comprador e em quantos anos ele vai pagar\n# Calcule o valor da prestação mensal, sabendo que ela não pode exceder 30% do slário do cliente.\n\nsalarioCliente = float(input('Insira o seu salário aqui: R$'))\ncasaValorTotal = float(input('Insira o valor da casa total aqui: R$'))\nanosDePrestação = float(input('Insira em quantos anos ele vai pagar: '))\n\ncalculoPrestação = casaValorTotal / (anosDePrestação * 12)\n\nif calculoPrestação <= (salarioCliente * 0.30):\n print('Você pode fazer o emprestimo!')\nelse:\n print('Você não pode fazer o emprestimo!')\n\n","sub_path":"AulasPythonCursoEmVideo/ex036.py","file_name":"ex036.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545551378","text":"def compare(a, b):\r\n if a >= b:\r\n print(str(a) + ' Is bigger')\r\n else:\r\n print(str(b) + ' Is bigger')\r\n\r\n\r\nx = 34\r\ny = 95\r\ncompare(x, y)\r\n\r\n\r\ndef compare(d, f):\r\n if d >= f:\r\n return d\r\n else:\r\n return f\r\n\r\n\r\nx = 32\r\ny = 99\r\ncompare(x, y)\r\n","sub_path":"Practice/mandrikov/sravnenie.py","file_name":"sravnenie.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95678490","text":"from typing import Union\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import check\n\nfrom ..utils import custom_errors\nfrom main import HelpCenterBot\n\n\ndef authorized_channels_check(ctx: commands.Context) -> bool:\n target = ctx.channel.id\n if isinstance(ctx.channel, discord.Thread):\n target = ctx.channel.parent_id\n\n if target in ctx.bot.authorized_channels_id:\n return True\n\n raise custom_errors.NotAuthorizedChannels(ctx.bot.authorized_channels_id)\n\n\ndef authorized_channels():\n return check(authorized_channels_check)\n\n\ndef is_high_staff_check(bot: HelpCenterBot, user: Union[discord.Member, discord.User]) -> tuple[bool, list[id]]:\n bug_center: discord.Guild = bot.get_guild(bot.bug_center_id)\n\n member: discord.Member = user\n if isinstance(user, discord.User):\n member = bug_center.get_member(user.id)\n\n allowed_roles_ids: list[int] = [value for (key, value) in bot.staff_roles.items() if key in ('administrator', 'assistant', 'screening')]\n\n return discord.utils.find(lambda r: r.id in allowed_roles_ids, member.roles) or member.guild_permissions.administrator, allowed_roles_ids\n\n\ndef is_high_staff():\n async def inner(ctx: commands.Context):\n result, list_ids = is_high_staff_check(ctx.bot, ctx.author)\n if result:\n return True\n raise custom_errors.NotAuthorizedRoles(list_ids)\n\n return check(inner)\n","sub_path":"cogs/utils/checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"226766543","text":"from __future__ import division\n\nimport re\nimport sys\n\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\n# [END import_libraries]s\n\nimport speech_recognition as sr\n\nfrom __configure.mubby_value import STT_FILE_NAME\n\n# Audio recording parameters\nRATE = 16000\nCHUNK = int(RATE / 10) # 100ms\n\n\nclass SpeechToText:\n def __init__(self):\n pass\n\n def speech_to_text(self, client_info, stt_api=None, socket_action=None):\n file_name = client_info['folder_path'] + STT_FILE_NAME\n stt_text = ''\n\n if stt_api:\n if stt_api == \"google\":\n stt_text = self.google_stt(file_name)\n elif stt_api == \"google_streaming\":\n stt_text = self.google_stt_streaming(socket_action)\n else:\n print(\"그런 건 없어 스트리밍 시켜줄게\")\n stt_text = self.google_stt(file_name)\n else:\n stt_text = self.google_stt(file_name)\n\n return stt_text\n\n def google_stt(self, file_name):\n print(\"file_name >> {}\".format(file_name))\n\n with sr.AudioFile(file_name) as source:\n r = sr.Recognizer()\n audio = r.record(source)\n try:\n stt_text = r.recognize_google(audio, show_all=False, language='ko_KR')\n print(\"Success Google STT\")\n except Exception as e:\n print(\"Google STT false >> {}\".format(e))\n stt_text = ''\n\n # print(\"self.output_stt >> {}\".format(output_stt))\n\n return stt_text\n\n @staticmethod\n def listen_print_loop(responses):\n \"\"\"\n Iterates through server responses and prints them.\n\n The responses passed is a generator that will block until a response\n is provided by the server.\n\n Each response may contain multiple results, and each result may contain\n multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we\n print only the transcription for the top alternative of the top result.\n\n In this case, responses are provided for interim results as well. If the\n response is an interim one, print a line feed at the end of it, to allow\n the next result to overwrite it, until the response is a final one. For the\n final one, print a newline to preserve the finalized transcription.\n \"\"\"\n text = ''\n num_chars_printed = 0\n for response in responses:\n if not response.results:\n continue\n\n # The `results` list is consecutive. For streaming, we only care about\n # the first result being considered, since once it's `is_final`, it\n # moves on to considering the next utterance.\n result = response.results[0]\n if not result.alternatives:\n continue\n\n # Display the transcription of the top alternative.\n transcript = result.alternatives[0].transcript\n\n # Display interim results, but with a carriage return at the end of the\n # line, so subsequent lines will overwrite them.\n #\n # If the previous result was longer than this one, we need to print\n # some extra spaces to overwrite the previous result\n overwrite_chars = ' ' * (num_chars_printed - len(transcript))\n\n if not result.is_final:\n sys.stdout.write(transcript + overwrite_chars + '\\r')\n sys.stdout.flush()\n\n num_chars_printed = len(transcript)\n\n else:\n # print(transcript + overwrite_chars)\n text = transcript + overwrite_chars\n\n # Exit recognition if any of the transcribed phrases could be\n # one of our keywords.\n if re.search(r'\\b(exit|quit)\\b', transcript, re.I):\n print('Exiting..')\n break\n\n num_chars_printed = 0\n\n return text\n\n def google_stt_streaming(self, socket_action):\n # See http://g.co/cloud/speech/docs/languages\n # for a list of supported languages.\n language_code = 'ko-KR' # a BCP-47 language tag\n\n # for content in comuni.get_data(client_record):\n # print(\"Type >> {}\".format(type(content)))\n\n client = speech.SpeechClient()\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=RATE,\n language_code=language_code)\n streaming_config = types.StreamingRecognitionConfig(\n config=config,\n interim_results=True)\n requests = (types.StreamingRecognizeRequest(audio_content=content)\n for content in socket_action.get_data())\n\n responses = client.streaming_recognize(streaming_config, requests)\n\n # Now, put the transcription responses to use.\n stt_text = self.listen_print_loop(responses)\n # 아무말도 안 하면 어떻게 될까 궁금하네 해보자.\n return stt_text\n","sub_path":"__utils/stt_module.py","file_name":"stt_module.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"344053763","text":"from decimal import Decimal\nfrom unittest.mock import MagicMock\nimport sys\nimport datetime\n\nfrom PyQt5 import QtWidgets, QtGui, QtCore\n\nfrom database.dataquerier import DateF2\nfrom exceptions.exceptions import MissingDataException\nfrom rents.rentpage import EditRentDialog\nfrom tests.dataqueriermock import dataQuerierMock\nfrom tests.testcasebase import TestCaseBase\nfrom widgets.dialogservice import DialogService\n\n\nclass EditRentDialogTests(TestCaseBase):\n @classmethod\n def setUpClass(cls):\n cls.app = QtWidgets.QApplication(sys.argv)\n\n def setUp(self):\n super().setUp()\n self.rent = MagicMock()\n self.rent.info = {\"LastPaymentDate\": None,\n \"AgentName\": None,\n \"Rent\": Decimal(\"4.00\"),\n \"RentCode\": \"AXD10\",\n \"Tenure\": \"L\",\n \"Frequency\": 2,\n \"DeedType\": \"UNENF\",\n \"NFeeTotal\": Decimal(\"95.00\"),\n \"Notes\": None,\n \"MailTo\": \"A\",\n \"TenantName\": \"Mr & Mrs N P Bamford\",\n \"AcType\": \"N\",\n \"DeedInfo\": \"enforceable - DCOV required - interest 4% above base\",\n \"Agent\": None,\n \"ChargesBalance\": Decimal(\"1.00\"),\n \"Status\": \"A\",\n \"PriceBase\": Decimal(\"999999.998999999952502548694610595703125\"),\n \"Source\": \"JFL-MTD1X\",\n \"DateCode\": \"F2Jun24A\",\n \"ChargesTotal\": Decimal(\"1.00\"),\n \"Email\": None,\n \"ReducedRent\": None,\n \"LastRentDate\": datetime.date(2004, 6, 24),\n \"Arrears\": Decimal(\"0.00\"),\n \"AgentAddress\": None,\n \"Price2\": Decimal(\"999999.998999999952502548694610595703125\"),\n \"PeriodRent\": Decimal(\"2.00\"),\n \"LandlordNo\": 2,\n \"NFee\": 6,\n \"TitleGrade\": 2,\n \"LastRentPayment\": None,\n \"AdvArr\": \"R\"}\n\n self.oldRent = MagicMock()\n self.oldRent.info = self.rent.info.copy()\n\n self.rent.landlord = {\"ManagerDetails\": \"Tel: 01404 815918 email: hesmondo@btinternet.com\",\n \"Manager\": \"Richard Hesmondhalgh\",\n \"BankSortCode\": \"09-01-50\",\n \"Address\": \"Hawthorn Dene, School Lane, West Hill, Ottery St. Mary, Devon EX11 1UP\",\n \"ManagerNo\": 2,\n \"BankAccountNumber\": \"01513656\",\n \"BankAccountName\": \"R Hesmondhalgh & D Maloney\",\n \"ManagerAddress\": \"Hawthorn Dene, School Lane, West Hill, Ottery St. Mary, Devon EX11 1UP\",\n \"BankName\": \"Santander\",\n \"Name\": \"Richard Hesmondhalgh\"}\n\n self.dbHandler = MagicMock()\n self.dbHandler.bufferedExecuteOne = MagicMock(return_value=[\"\"])\n\n self.dataQuerier = dataQuerierMock()\n\n self.dialog = EditRentDialog(self.rent)\n self.dialog.accept = MagicMock()\n\n def test_dialog_creatable(self):\n pass\n\n def test_validateFields_nonsense_datecode_rejected(self):\n self.dialog.lineDateCode.setText(\"NotAValidDateCode\")\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_inconsistent_frequency_rejected(self):\n self.dialog.lineDateCode.setText(\"F2Jan01A\")\n self.setFrequency(1)\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_f1_inconsistent_datecode_and_lastRentDate_day_rejected(self):\n self.dialog.lineDateCode.setText(\"F1Jan01A\")\n self.setFrequency(1)\n self.dialog.lineLastRentDate.setText(\"02/01/2010\")\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_f1_inconsistent_datecode_and_lastRentDate_month_rejected(self):\n self.dialog.lineDateCode.setText(\"F1Jan01A\")\n self.setFrequency(1)\n self.dialog.lineLastRentDate.setText(\"01/02/2010\")\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_f1_non_A_letter_rejected(self):\n self.dialog.lineDateCode.setText(\"F1Jan01B\")\n self.setFrequency(1)\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_f2_datecode_doesnt_exist_rejected(self):\n self.dataQuerier.getDateF2 = MagicMock()\n self.dataQuerier.getDateF2.side_effect = MissingDataException(\"not found\")\n self.dialog.lineDateCode.setText(\"F2Jan01B\")\n self.setFrequency(2)\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_f2_lastRentDate_doesnt_match_dates_rejected(self):\n self.dataQuerier.getDateF2 = MagicMock()\n self.dataQuerier.getDateF2.dates = MagicMock(return_value=[datetime.date(2000, 1, 1), datetime.date(2000, 6, 1)])\n self.dialog.lineDateCode.setText(\"F2Jan01B\")\n self.setFrequency(2)\n self.dialog.lineLastRentDate.setText(\"01/02/2010\")\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_quasi_matches_f2_dates_accepted(self):\n self.dataQuerier.getDateF2 = MagicMock(return_value=DateF2([\"FQJan01A\", datetime.date(2000, 1, 1), datetime.date(2000, 6, 1)]))\n self.dialog.lineDateCode.setText(\"FQJan01A\")\n self.setFrequency(1)\n self.dialog.lineLastRentDate.setText(\"01/01/2010\")\n\n self.assertTrue(self.dialog.validateRentDetails())\n\n def test_validateFields_quasi_doesnt_match_f2_dates_rejected(self):\n self.dataQuerier.getDateF2 = MagicMock(return_value=DateF2([\"FQJan01A\", datetime.date(2000, 1, 1), datetime.date(2000, 7, 1)]))\n self.dialog.lineDateCode.setText(\"FQJan01A\")\n self.setFrequency(1)\n self.dialog.lineLastRentDate.setText(\"02/01/2010\")\n\n self.dialog.validateRentDetails()\n\n self.assertFalse(self.dialog.accept.called)\n\n def test_validateFields_f2_valid_edit_accepted(self):\n self.dataQuerier.getDateF2 = MagicMock(return_value=DateF2([\"F2Jan01A\", datetime.date(2000, 1, 1), datetime.date(2000, 7, 1)]))\n self.dialog.lineDateCode.setText(\"F2Jan01A\")\n self.setFrequency(2)\n self.dialog.lineLastRentDate.setText(\"01/01/2010\")\n\n self.assertTrue(self.dialog.validateRentDetails())\n\n def test_validateFields_f4_valid_edit_accepted(self):\n self.dataQuerier.getDateF4 = MagicMock(return_value=DateF2([\"F4Jan01A\", datetime.date(2000, 1, 1), datetime.date(2000, 4, 1), datetime.date(2000, 7, 1), datetime.date(2000, 10, 1)]))\n self.dialog.lineDateCode.setText(\"F4Jan01A\")\n self.setFrequency(4)\n self.dialog.lineLastRentDate.setText(\"01/01/2010\")\n\n self.assertTrue(self.dialog.validateRentDetails())\n\n def test_validateFields_lastRentDate_changed_shows_warning(self):\n self.dialog.lineLastRentDate.setText(\"01/01/2010\")\n\n self.assertFalse(self.dialog.validateRentDetails())\n\n self.assertTrue(DialogService.areYouSureDialog.called)\n\n def test_validateFields_dateCode_changed_shows_warning(self):\n self.dialog.lineDateCode.setText(\"F4Jan01A\")\n\n self.assertFalse(self.dialog.validateRentDetails())\n\n self.assertTrue(DialogService.areYouSureDialog.called)\n\n def test_validateFields_lastRentDate_changed_shows_warning(self):\n self.setFrequency(4)\n\n self.assertFalse(self.dialog.validateRentDetails())\n\n self.assertTrue(DialogService.areYouSureDialog.called)\n\n def setFrequency(self, frequency):\n if frequency == 1:\n self.dialog.comboFrequency.setCurrentIndex(0)\n elif frequency == 2:\n self.dialog.comboFrequency.setCurrentIndex(1)\n elif frequency == 4:\n self.dialog.comboFrequency.setCurrentIndex(2)\n elif frequency == 12:\n self.dialog.comboFrequency.setCurrentIndex(3)\n elif frequency == 52:\n self.dialog.comboFrequency.setCurrentIndex(4)\n","sub_path":"tests/editrentdialogtests.py","file_name":"editrentdialogtests.py","file_ext":"py","file_size_in_byte":8589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"510349348","text":"# -*- coding: UTF-8 -*-\n#https://www.cnblogs.com/geosnoob/p/11581111.html\nimport xlrd\n\ndata = xlrd.open_workbook(\"d:\\\\Book1.xlsx\")\n#获取一个工作表\n#table = data.sheets()[0]\n#table = data.sheet_by_index(0)\ntable = data.sheet_by_name(u\"Sheet1\")\n\n\n#获取行数和列数\nnrows = table.nrows\nncols = table.ncols\nfor i in range(nrows):\n #获取整行和整列的值\n #print(table.col_values(i))\n print(table.row_values(i))\n\n#获取单元格的值\n#cell = table.cell(2,3).value\n\n#使用行列索引\ncell_A1 = table.row(0)[0].value\ncell_A2 = table.col(1)[0].value\n\n#简单的写入\nrow = 0 \ncol = 0\n#类型0 empty, 1 string, 2 number, 3 date, 4 boolean, 5 error\nctype = 1\nvalue = \"testtest\"\nxf = 1 #扩展的格式化 \ntable.put_cell(row, col, ctype, value, xf) #不能写入\n","sub_path":"python/Learn/Study/xlrd/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"317297637","text":"import logging\nfrom urllib.parse import urljoin\n\nimport requests\n\nfrom .base import BaseEndpoint\n\nFILE_CHUNK_BYTES = 5242880\n\nlogger = logging.getLogger(__file__)\n\n\nclass FileEndpoint(BaseEndpoint):\n \"\"\"\n Endpoint for attaching files to Actions.\n https://actionstep.atlassian.net/wiki/spaces/API/pages/18251961/Action+Documents\n \"\"\"\n\n resource = \"actiondocuments\"\n\n def __init__(self, *args, **kwargs):\n self.file_upload = FileUploadEndpoint(*args, **kwargs)\n self.folders = FolderEndpoint(*args, **kwargs)\n super().__init__(*args, **kwargs)\n\n def upload(self, filename: str, file_bytes: bytes):\n \"\"\"\n Upload a file to Actionstep.\n {\n \"id\": \"qwsqswqsqw\",\n \"status\" : \"Uploaded\",\n }\n \"\"\"\n return self.file_upload.create(filename, file_bytes)\n\n def attach(self, filename: str, file_id: str, action_id: str, foldername=None):\n \"\"\"\n Attach a file to an Action\n \"\"\"\n links = {\"action\": action_id}\n if foldername:\n folder_data = self.folders.get(foldername, action_id)\n links[\"folder\"] = folder_data[\"id\"]\n\n data = {self.resource: [{\"name\": filename, \"file\": file_id, \"links\": links}]}\n resp_data = super().create(data)\n return resp_data[self.resource]\n\n\nclass FolderEndpoint(BaseEndpoint):\n \"\"\"\n Endpoint for Action folders.\n https://actionstep.atlassian.net/wiki/spaces/API/pages/21135480/Action+Folders\n \"\"\"\n\n resource = \"actionfolders\"\n\n def get(self, foldername: str, action_id: str):\n params = {\"name\": foldername, \"action\": action_id}\n resp_data = super().get(params)\n return resp_data[self.resource]\n\n\nclass FileUploadEndpoint(BaseEndpoint):\n \"\"\"\n Endpoint for uploading/downloading files.\n https://actionstep.atlassian.net/wiki/spaces/API/pages/21135509/Files\n https://actionstep.atlassian.net/wiki/spaces/API/pages/12025904/Requests#Requests-UploadingFiles\n \"\"\"\n\n resource = \"files\"\n\n def create(self, filename: str, file_bytes: bytes):\n chunk_size = FILE_CHUNK_BYTES\n byte_chunks = [\n file_bytes[i : i + chunk_size]\n for i in range(0, len(file_bytes), chunk_size)\n ]\n part_count = len(byte_chunks)\n file_id = None\n headers = {**self.headers}\n del headers[\"Content-Type\"]\n\n logger.info(\"Uploading %s to Actionstep\", filename)\n for idx, chunk_bytes in enumerate(byte_chunks):\n url = urljoin(self.url + \"/\", file_id) if file_id else self.url\n params = {\"part_count\": part_count, \"part_number\": idx + 1}\n files = {\"file\": (filename, chunk_bytes)}\n resp = requests.post(url, files=files, params=params, headers=headers)\n resp_data = self._handle_json_response(url, resp)\n file_data = resp_data[\"files\"]\n file_id = file_data[\"id\"]\n\n assert file_data[\"status\"] == \"Uploaded\", f\"File {file_id} not fully uploaded.\"\n return resp_data[\"files\"]\n","sub_path":"app/actionstep/api/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545921067","text":"from pathlib import Path\n\nimport soundfile as sf\nimport numpy as np\nimport torch as th\nfrom transformers import Wav2Vec2Tokenizer, Wav2Vec2Model\nfrom torchaudio.transforms import Resample\nfrom tqdm import tqdm\n\nfrom ray.util import ActorPool\n\nclass Wav2VecTokenizer:\n def __init__(self):\n self.tokenizer = Wav2Vec2Tokenizer.from_pretrained(\n \"facebook/wav2vec2-base-960h\"\n )\n\n def tokenize(self, audio_path):\n audio_input, sampling_rate = sf.read(audio_path)\n resampler = Resample(sampling_rate)\n audio_input = resampler(th.tensor(audio_input))\n input_values = self.tokenizer(audio_input, return_tensors=\"pt\").input_values\n return input_values\n\n\nclass Wav2VecExtract:\n def __init__(self, device):\n self.device = device\n\n self.raw_model = Wav2Vec2Model.from_pretrained(\n \"facebook/wav2vec2-base-960h\"\n ).to(self.device)\n\n def predict(self, input_values, features_path):\n input_values = input_values.to(self.device)\n hidden_state = self.raw_model(input_values).last_hidden_state\n hidden_state = hidden_state.flatten()\n hidden_state = hidden_state.cpu().detach().numpy()\n np.save(features_path, hidden_state)\n\n\nif __name__ == \"__main__\":\n tokenizer = Wav2VecTokenizer()\n extractor = Wav2VecExtract(th.device(\"cuda\"))\n audio_dir = Path(\"/impressionv2_faces/audio/\")\n videos = list(audio_dir.glob(\"*.wav\"))\n\n input_values_list = []\n for video_path in tqdm(videos):\n video_name = video_path.stem\n feature_path = audio_dir / f\"{video_name}_wav2vec2.npy\"\n input_values = tokenizer.tokenize(video_path)\n input_values_list.append(input_values)\n\n for input_values in input_values_list:\n extractor.predict(video_path, feature_path)\n","sub_path":"wav2vec2etractmulti.py","file_name":"wav2vec2etractmulti.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"595292784","text":"import cv2\nimport numpy as np\nimport pylab as plt\nfrom glob import glob\nimport argparse\nimport os\nimport pickle as pkl\nimport KidneyClassifier as train\nimport math\nfrom starter_code.utils import load_case\nimport imageio\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nfrom sklearn import metrics\nfrom skimage import feature\n\ndef create_binary_pattern(img, o, c, b):\n\n fd, fi = feature.hog(img, orientations=o, pixels_per_cell=c,\n cells_per_block=b, visualize=True, multichannel=False)\n return fd\n\ndef create_features(img_gray):\n\n orien = 8\n cells = (20,30)\n block = (1,1)\n imageSize = img_gray.shape[0]*img_gray.shape[1]\n features=np.empty_like(img_gray,type(np.array([0]*8)))\n features.fill(np.array([0]*8))\n for k in range(0,len(img_gray)-cells[0]):\n for l in range(0,len(img_gray[0])-cells[1]):\n features[k,l] = create_binary_pattern(img_gray[k:k+cells[0],l:l+cells[1]], orien, cells,block)\n\n return features\n\ndef compute_prediction(img, model):\n\n border = 5 # (haralick neighbourhood - 1) / 2\n print('computing feature')\n features = np.array(create_features(img))\n feat = []\n for r in features:\n for c in r:\n feat.append(c)\n features = np.array(feat)\n print(features.shape)\n predictions = model.predict(features)\n inference_img = predictions.reshape(img.shape)\n\n return inference_img\n\ndef parrallel(im,i,seg,model):\n inference_img = compute_prediction(im[200:400,100:400], model)\n image = np.concatenate((inference_img,seg[i][200:400,100:400]),axis=1).astype(np.uint8)\n print(\"Fin del caso:\",i)\n jacSeg = np.copy(seg[i][200:400,100:400])\n jacVol = np.copy(inference_img)\n jacSeg[jacSeg==255]=1\n jacVol[jacVol==255]=1\n return (image,jacVol,jacSeg)\n\ndef infer_images(img,seg, model):\n\n print ('[INFO] Running inference on %s test images' %len(img))\n lista=[]\n for i, im in enumerate(img):\n lista.append((im,i,seg,model))\n newGifArray=[]\n with Pool(10) as p:\n # for el in lista:\n newGifArray = p.starmap(parrallel,lista)\n # newGifArray.append(parrallel(el[0],el[1],el[2],el[3]))\n print(len(newGifArray))\n # for i, im in enumerate(img):\n # parrallel(im,i,seg,model,newGifArray)\n jaccards =[]\n with imageio.get_writer(os.path.join('infKidney1.gif'), mode='I') as writer:\n for arr in newGifArray:\n writer.append_data(arr[0])\n jaccards.append(metrics.jaccard_score(arr[1].flatten(),arr[2].flatten(),average=\"weighted\"))\n print(np.average(jaccards))\n\n\ndef main():\n img, seg = load_case(\"123\")\n img = img.get_data()\n seg = seg.get_data()\n seg[seg==1]=255\n model = pkl.load(open( \"model.p\", \"rb\" ) )\n infer_images(img,seg,model)\n\nif __name__ == '__main__':\n main()\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469349089","text":"import os\n\n\nclass Filter:\n _BREAK_LINE_SYMBOL = int(len('\\n'))\n\n def __init__(self, origin_path: str, destination_path: str = '', qty_symbols: int = 8):\n self._origin = origin_path\n self._qty_symbols = qty_symbols\n sep = os.sep\n segments_origin_path = origin_path.split(sep)\n file_name_filtered = 'filtered_' + segments_origin_path.pop()\n if destination_path == '':\n self._destination = sep.join(segments_origin_path) + sep + file_name_filtered\n else:\n self._destination = destination_path + sep + file_name_filtered\n\n def run(self):\n self._run_specify_encode(encoding='utf-8')\n\n def _run_specify_encode(self, encoding: str):\n print('filtration started... \\n')\n with open(self._origin, encoding=encoding) as origin:\n with open(self._destination, 'w', encoding=encoding) as destination:\n old_qty = 0\n new_qty = 0\n try:\n for line in origin:\n old_qty = old_qty + 1\n if len(line) >= (self._qty_symbols + self._BREAK_LINE_SYMBOL):\n destination.writelines(line)\n new_qty = new_qty + 1\n print(f'\\nqty lines in source file == {old_qty}')\n print(f'qty lines in new file == {new_qty}')\n print(\"\\nI'm tired, but I did my job\\n\")\n except UnicodeDecodeError:\n print('\\\\ 0_0 /\\n\\ndanger word --> ' + str(line) + f'encoding error in {old_qty} line\\n')\n response_on_error = input('would you like to specify a different encoding? y/n \\n')\n if response_on_error.lower() == 'y':\n user_encoding = str(input(\"\\ntype your encoding (example: 'latin-1') => \"))\n user_encoding = user_encoding.replace(\"'\", '').replace('\"', '')\n self._run_specify_encode(user_encoding)\n else:\n exit('\\nBye :(')\n origin.close()\n destination.close()\n","sub_path":"fcore.py","file_name":"fcore.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"501205999","text":"import cv2\n\n\nclass MotionTrackerNaive:\n def __init__(self):\n # Init the video capture and take the first image\n self.cap = cv2.VideoCapture(0)\n _, self.img = self.cap.read()\n\n # Manually draw a bounding box in the img then press Enter\n # type(bbox) = , NOT a list\n # bbox = (x-coord of origin, y-coord or origin, width, height)\n self.bbox = cv2.selectROI(\"Tracking\", self.img, False)\n\n # Instantiate and initialize the tracker\n self.tracker = cv2.TrackerMOSSE_create()\n self.tracker.init(self.img, self.bbox)\n\n def draw_box(self):\n x, y, w, h = int(self.bbox[0]), int(self.bbox[1]), int(self.bbox[2]), int(self.bbox[3])\n print(x, y, w, h)\n cv2.rectangle(self.img, (x, y), (x + w, y + h), (255, 0, 255), 3, 1)\n cv2.putText(self.img, \"Tracking\", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n\n def run(self):\n while True:\n timer = cv2.getTickCount()\n success, self.img = self.cap.read()\n\n success, self.bbox = self.tracker.update(self.img)\n\n if success:\n self.draw_box()\n else:\n cv2.putText(self.img, \"Lost\", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)\n cv2.putText(self.img, str(int(fps)), (75, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.imshow(\"Tracking\", self.img)\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n self.clean_up()\n break\n\n def clean_up(self):\n self.cap.release()\n cv2.destroyAllWindows()\n","sub_path":"main/motion_tracking_naive.py","file_name":"motion_tracking_naive.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"646575715","text":"import unittest\n\nfrom pulsar import Future, new_event_loop\nfrom pulsar.apps.data import create_store\nfrom pulsar.apps.test import check_server\n\n\nOK = check_server('postgresql')\n\nif not OK:\n try:\n import pulsar.apps.greenio.pg\n except ImportError as e:\n MSG = str(e)\n else:\n MSG = 'Requires a running postgresql database'\nelse:\n MSG = ''\n\n\n@unittest.skipUnless(OK, MSG)\nclass PostgreSqlTest(object):\n\n @classmethod\n def setUpClass(cls):\n cls.created = []\n cls.store = create_store(cls.cfg.postgresql_server,\n database=cls.name('test'))\n assert cls.store.database == cls.name('test')\n return cls.createdb()\n\n @classmethod\n def tearDownClass(cls):\n for db in cls.created:\n yield cls.store.delete_database(db)\n\n @classmethod\n def createdb(cls, name=None):\n if name:\n name = cls.name(name)\n name = yield cls.store.create_database(name)\n cls.created.append(name)\n\n @classmethod\n def name(cls, name):\n cn = cls.__name__.lower()\n return '%s_%s_%s' % (cls.cfg.exc_id, cn, name)\n\n\nclass TestPostgreSqlStore(PostgreSqlTest, unittest.TestCase):\n\n def test_store(self):\n store = self.store\n self.assertEqual(store.name, 'postgresql')\n sql = store.sql_engine\n self.assertTrue(sql)\n\n def test_ping(self):\n result = self.store.ping()\n self.assertIsInstance(result, Future)\n result = yield result\n self.assertEqual(result, True)\n","sub_path":"tests/stores/postgresql.py","file_name":"postgresql.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"446104557","text":"'''\nBase cnn model.\n'''\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential, model_from_json\nfrom tensorflow.keras.layers import Dense,Dropout,Activation,Flatten,Conv2D,MaxPooling2D\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import shuffle\nfrom sklearn.metrics import roc_curve,roc_auc_score,classification_report\nfrom sklearn.model_selection import train_test_split\nimport random\nimport gc\n\n\n# Amount of test data\nTEST_PERCENT = 0.2\n\nX_load = np.load('X_data.npy')\ny_load = np.load('y_data.npy')\nbenign = []\nmalignant = []\n\nfor i in range(len(X_load)):\n if y_load[i] == 0:\n benign.append([X_load[i],y_load[i]])\n else:\n malignant.append([X_load[i],y_load[i]])\nprint(type(benign))\nprint(len(malignant))\n\n# Don't know if this actually does anything but im leaving it here for now\ngc.collect()\n\nX= []\ny= []\n\nfor i in range(100):\n malignant.append(benign[i])\nprint(type(malignant))\n\nfor feature,label in malignant:\n X.append(feature)\n y.append(label)\n\nprint(len(X))\nX= np.asarray(X)\ny= np.asarray(y)\nX= X/255.0\n\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=2)\n\nmodel = Sequential()\nmodel.add(Conv2D(32,(3,3),activation='relu', input_shape = (224,224,3),data_format='channels_last'))\nmodel.add(MaxPooling2D(pool_size=(2,2),strides=2))\n\nmodel.add(Conv2D(64,(3,3),activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(64,activation='relu'))\nmodel.add(Dropout(0.25))\n\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.fit(X_train,y_train,batch_size = 64,epochs=5,shuffle=True,validation_split=0.1)\n\nscores = model.evaluate(X_test,y_test)\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1] * 100))\n\n# Saves model along with weights\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n\nmodel.save_weights(\"model.h5\")\nprint(\"saved model\")\n\n\n# Just doing my own little test to make sure im not crazy\npredictions = model.predict(X_test)\ni = 0\nfor prediction in predictions:\n print(prediction)\n print(y_test[i])\n i += 1\n\nabove_threshold_indices = predictions > 0.5\nbelow_threshold_indices = predictions < 0.5\npredictions[above_threshold_indices] = 1\npredictions[below_threshold_indices] = 0\n\n# most of this block was taken from https://towardsdatascience.com/building-a-logistic-regression-in-python-step-by-step-becd4d56c9c8\nlogit_roc_auc = roc_auc_score(y_test, predictions)\nfpr, tpr, thresholds = roc_curve(y_test, predictions)\nplt.figure()\nplt.plot(fpr, tpr, label='Binary classification(area = %0.2f)' % logit_roc_auc)\nplt.plot([0, 1], [0, 1],'r--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic')\nplt.legend(loc=\"lower right\")\nplt.savefig('Log_ROC')\nplt.show()\n\nprint(classification_report(y_test,predictions))\n\n","sub_path":"cnnModel.py","file_name":"cnnModel.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"57226454","text":"from rest_framework.generics import (\n ListCreateAPIView,\n RetrieveUpdateDestroyAPIView,\n \n)\nfrom rest_framework.views import APIView\nfrom .models import *\nfrom .serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom django.contrib.auth.views import (\n LoginView,\n FormView,\n)\nfrom Website.models import Brand\nfrom .forms import (\n StudentRegisterForm,\n)\nfrom django.views.generic import (\n TemplateView,\n)\nfrom django.urls import reverse_lazy\nfrom Quiz.models import Report\n# Create your views here.\nclass StudentListCreate(ListCreateAPIView):\n\n queryset = Student.objects.all()\n serializer_class = StudentSerializer\n\nclass TokenToUser(APIView):\n\n def post(self , request):\n info = request.data\n \n token = Token.objects.get(key=info['token'])\n student = Student.objects.get(user_id = token.user)\n serializer = StudentSerializer(student)\n return Response(serializer.data , status=status.HTTP_200_OK)\n\n \nclass StudentFromToken(APIView):\n\n def post(self , request):\n token_rcv = request.data['token']\n token = Token.objects.get(key=token_rcv)\n user = User.objects.get(pk=token.user_id)\n student = Student.objects.get(user=user)\n serializer = StudentSerializer(student)\n\n return Response(serializer.data , status = status.HTTP_200_OK)\n\nclass StudentFromUsername(APIView):\n\n def post(self,request):\n username = request.data['username']\n student = Student.objects.get(user=User.objects.get(username=username))\n serializer = StudentSerializer(student)\n return Response(serializer.data , status = status.HTTP_200_OK)\n\n\n# web views\nclass WebLoginView(LoginView):\n template_name = 'Accounts/login.html'\n\n def get_context_data(self, **kwargs):\n context = super(WebLoginView, self).get_context_data(**kwargs)\n context['brand'] = Brand.objects.get(id=1)\n return context\n\n\nclass StudentRegisterView(FormView):\n form_class = StudentRegisterForm\n template_name = 'Accounts/register.html'\n success_url = reverse_lazy('Accounts:web-login')\n\n def get_context_data(self, **kwargs):\n context = super(StudentRegisterView, self).get_context_data(**kwargs)\n context['brand'] = Brand.objects.get(id=1)\n return context\n\n def form_valid(self, form):\n print(form.cleaned_data)\n user = User.objects.create(\n first_name = form.cleaned_data['first_name'],\n last_name = form.cleaned_data['last_name'],\n email = form.cleaned_data['email'],\n username = form.cleaned_data['username'],\n )\n user.set_password(form.cleaned_data['password'])\n user.is_active = False\n user.save()\n\n Student.objects.create(\n user=user,\n middle_name = form.cleaned_data['middle_name'],\n contact_number = form.cleaned_data['contact_number'],\n date_of_birth = form.cleaned_data['date_of_birth'],\n display_image_url = form.cleaned_data['display_image_url'],\n about = form.cleaned_data['about'],\n address = form.cleaned_data['address'],\n )\n Token.objects.create(user=user)\n return super(StudentRegisterView, self).form_valid(form)\n\n\nclass ProfileView(TemplateView):\n template_name = 'Accounts/profile.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProfileView, self).get_context_data(**kwargs)\n user = self.request.user\n if not(user.is_staff):\n context['student'] = Student.objects.get(user=user)\n context['brand'] = Brand.objects.get(id=1)\n return context\n\n\n\n","sub_path":"Accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"414979123","text":"# -*- coding: utf-8 -*-\n# snapshottest: v1 - https://goo.gl/zC4yUc\nfrom __future__ import unicode_literals\n\nfrom snapshottest import Snapshot\n\n\nsnapshots = Snapshot()\n\nsnapshots['test_add_missing_field[uvloop] 1'] = [\n {\n '_id': 'foo',\n 'missing': False\n },\n {\n '_id': 'bar',\n 'missing': True\n },\n {\n '_id': 'baz',\n 'missing': False\n }\n]\n","sub_path":"tests/caches/snapshots/snap_test_migrate.py","file_name":"snap_test_migrate.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"255328055","text":"def sadCycle(base, number):\n\tfrom collections import Counter\n\tcycle = [number]\n\tc = Counter(cycle)\n\twhile c[number] < 6:\n\t\tcomponentList = []\n\t\tstrnum = str(number)\n\t\tfor element in strnum:\n\t\t\tcomponentList.append(int(element))\n\t\tsquared_components = []\n\t\tfor component in componentList:\n\t\t\tsquared_components.append(component**base)\n\t\tsum_squares = sum(squared_components)\n\t\tcycle.append(sum_squares)\n\t\tc.update(cycle)\n\t\tnumber = sum_squares\n\tprint(cycle)\n","sub_path":"sadCycles.py","file_name":"sadCycles.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"294450450","text":"import os\nimport json\nimport gevent\nimport logging\nfrom flask import Flask, request, Response\nfrom flask import render_template, send_from_directory, url_for\nfrom flask import send_file, make_response, abort\nfrom flask.ext.restful import reqparse, abort, Api, Resource\nfrom flask.ext.basicauth import BasicAuth\n\n\nfrom wemo.signals import statechange\nfrom wemo.device.switch import Switch\nfrom wemo.device.insight import Insight\nfrom wemo.device.maker import Maker\nfrom wemo.environment import Environment, UnknownDevice\nfrom wemo.pluginmanager import PluginManager\nfrom socketio import socketio_manage\nfrom socketio.namespace import BaseNamespace\nfrom socketio.mixins import BroadcastMixin\n\nhere = lambda *x: os.path.join(os.path.dirname(__file__), *x)\n\n\napp = Flask(__name__)\napi = Api(app)\nlog = logging.getLogger(__name__)\n\nENV = None\n\npluginManager = PluginManager()\n\ndef initialize(bind=None, auth=None):\n global ENV\n pluginManager.start()\n if ENV is None:\n ENV = Environment(bind=bind)\n ENV.start()\n gevent.spawn(ENV.discover, 10)\n if auth is not None:\n elems = auth.split(':', 1)\n username = elems[0]\n password = elems[1]\n print(\"Protected server with basic auth username/password: \", username, password)\n app.config['BASIC_AUTH_USERNAME'] = username\n app.config['BASIC_AUTH_PASSWORD'] = password\n app.config['BASIC_AUTH_FORCE'] = True\n basic_auth = BasicAuth(app)\n\ndef get_device(name, should_abort=True):\n try:\n return ENV.get(name)\n except UnknownDevice:\n if not should_abort:\n raise\n abort(404, error='No device matching {}'.format(name))\n\n\n# First, the REST API\nclass EnvironmentResource(Resource):\n\n def get(self):\n result = {}\n for dev in ENV:\n result[dev.name] = serialize(dev)\n return result\n\n def post(self):\n seconds = (request.json or {}).get('seconds', (\n request.values or {}).get('seconds', 5))\n ENV.discover(int(seconds))\n return self.get()\n\n\nclass DeviceResource(Resource):\n\n def get(self, name):\n return serialize(get_device(name))\n\n def post(self, name):\n dev = get_device(name)\n if not isinstance(dev, Switch):\n abort(405, error='Only switches can have their state changed')\n action = (request.json or {}).get('state', (\n request.values or {}).get('state', 'toggle'))\n if action not in ('on', 'off', 'toggle', 'blink'):\n abort(400, error='{} is not a valid state'.format(action))\n if action == 'blink':\n delay = (request.json or {}).get('delay', (\n request.values or {}).get('delay', '1'))\n getattr(dev, action)(delay=int(delay))\n else:\n getattr(dev, action)()\n return serialize(dev)\n\n\napi.add_resource(EnvironmentResource, '/api/environment')\napi.add_resource(DeviceResource, '/api/device/')\n\n\nclass SocketNamespace(BaseNamespace):\n\n def update_state(self, sender, **kwargs):\n data = sender.serialise()\n data['state'] = kwargs.get('state', data['state'])\n self.emit(\"send:devicestate\", data)\n\n def on_statechange(self, data):\n ENV.get(data['name']).set_state(data['state'])\n\n def on_join(self, data):\n statechange.connect(self.update_state,\n dispatch_uid=id(self))\n for device in ENV:\n self.update_state(device)\n\n def __del__(self):\n statechange.disconnect(dispatch_uid=id(self))\n\n\n# Now for the WebSocket api\n@app.route(\"/socket.io/\")\ndef run_socketio(**kwargs):\n socketio_manage(request.environ, {'': SocketNamespace})\n return \"ok\"\n\n\n# routing for basic pages (pass routing onto the Angular app)\n@app.route('/')\ndef basic_pages(**kwargs):\n return make_response(open(here('templates/index.html')).read())\n\n\n# special file handlers and error handlers\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'img/favicon.ico')\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\napp.config.from_object('wemo.server.settings')\napp.url_map.strict_slashes = False\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"wemo/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"380758161","text":"from flask import Flask, render_template, request\nfrom werkzeug import secure_filename\nimport MySQLdb.cursors, os\nfrom datetime import datetime\n\napp = Flask(__name__)\nUPLOAD_FOLDER = 'static/images'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\ndb = MySQLdb.connect(host='localhost', db='trymysql', user='marc', passwd='123')\n\n@app.route(\"/\")\ndef home():\n dict_cursor = db.cursor(MySQLdb.cursors.DictCursor)\n dict_cursor.execute(\"select author, title, content, datee, imagee, id from blog ORDER BY datee2 DESC, datee2 DESC\")\n posts = dict_cursor.fetchall()[:3]\n return render_template('home.html', posts=posts)\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n@app.route(\"/create\", methods=['GET','POST'])\ndef createblog():\n if request.method == 'POST':\n now = datetime.now()\n req = request.form\n dict_cursor = db.cursor(MySQLdb.cursors.DictCursor)\n author = req['author']\n title = req['title']\n content = req['content']\n datee = now.strftime(\"%x\")\n datee2 = now.strftime(\"%X\")\n file = request.files['imagee']\n if file:\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'],filename))\n FileImage = file.filename\n image_string = format(FileImage)\n dict_cursor.execute('INSERT INTO blog (author, title, content, datee, datee2, imagee) VALUES (%s,%s,%s,%s,%s,%s)', (author, title, content, datee, datee2, image_string))\n db.commit()\n dict_cursor.close()\n return \"\"\n return render_template('blogdb.html', title='create a blog')\n\n@app.route(\"/readmore/\", methods=['GET'])\ndef readmore(id):\n dict_cursor = db.cursor(MySQLdb.cursors.DictCursor)\n dict_cursor.execute(\"select * from blog where id=%s\")\n blog = dict_cursor.fetchone()\n return render_template('blogtemplate.html', title=\"Blog\", blog=blog)\n\nif __name__ == \"__main__\":\n app.run(debug=True) \n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"239717308","text":"from .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['http://pantallaunicabackend.emtelco.co']\n\nSTATIC_ROOT = \"/var/www/pythml/pantalla_unica_backend/static/\"\nMEDIA_ROOT = \"/var/www/pythml/pantalla_unica_backend/media/\"\n\nAPI_KEY = '32252012a4110c9f49de548706d46ec325aac6e2'\nDEFAULT_AUTH_HEADERS = {'apiKey': API_KEY}\n\nURL_XML_AVAYA = 'http://eros/General/WebPopUpAvayaAgent/Default.aspx?id={}'\n\n# Servicios REST\nURL_WSMDM_COD_FAMILY = 'http://ws.une.com.co/wsMDM/GetByAccount/{}/1403'\nURL_WSMDM_ASSET = 'http://ws.une.com.co/wsMDM/GetByAsset/{}'\nURL_GUION_CCC_AFECTADO_SIEBEL = 'http://10.69.61.33:82/Servicio/AfectadoSiebel'\n# Mensaje personalizado\nREST_MENSAJE_PERSONALIZADO = 'http://10.69.61.33:82/Servicio/ClienteCritico'\n\n# Servicios SOAP\nSOAP_SIEBEL_CODFAMILIAR = 'http://unevm-pmap.epmtelco.com.co/wsProxyIVRSiebel8_1_version2/siebel8_1ConsultaCodFamiliar.php?wsdl'\nSOAP_WS_IVR_HOGARES = 'http://10.69.43.101:8088/WsIvrHogares.asmx?WSDL'\n\n\nWSGI_APPLICATION = 'pantalla_unica.wsgi_prod.application'\nCORS_ORIGIN_ALLOW_ALL = False\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\"\"\"\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, '../db.sqlite3'),\n }\n}\n\"\"\"\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'pantalla_unica',\n 'USER': 'postgres',\n 'PASSWORD': '123456',\n 'HOST': '127.0.0.1',\n 'PORT': '5432',\n }\n}\nINSTALLED_APPS += ['corsheaders']\n\nMIDDLEWARE += ['corsheaders.middleware.CorsMiddleware',]","sub_path":"pantalla_unica/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"565023805","text":"import z\nd1 = z.getp(\"prob_down\")\nd2 = z.getp(\"prob_down_5_10\")\nfrom sortedcontainers import SortedSet\n\nscores = SortedSet()\ndef doem():\n saveem = dict()\n stocks = z.getp(\"listofstocks\")\n for idx, astock in enumerate(stocks):\n if not idx % 100:\n print(\"idx: {}\".format( idx))\n try:\n score = d1[astock] + (1-d2[astock])\n except:\n continue\n scores.add((score, astock))\n z.setp(scores[-30:], \"probs_added_up\")\n\nif __name__ == '__main__':\n doem()\n","sub_path":"python/zen/probs_disc.py","file_name":"probs_disc.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"397459043","text":"import pickle\nimport numpy as np\nimport math\n\nclass map(object):\n def __init__(self,savepath = 'map/my'):\n self.pic2pos = pickle.load(open('reconstruct.p','rb'))\n self.precision = 100\n self.scaling = self.precision/1600\n self.f = np.zeros((self.precision,self.precision))\n self.c = np.zeros((self.precision,self.precision))\n self.maxc = np.zeros((self.precision,self.precision))\n self.i = np.zeros((self.precision,self.precision))\n self.midpoint = 1600/2\n self.pi = 3.1415926\n self.ang = self.pi/2.5\n self.stepsize = 1\n self.steps = 0\n self.savepath = savepath\n\n def getabsolutepoints(self,mask,val = True):\n points = []\n for i in range(0,84):\n pix = np.where(mask[42:,i]==val)[0]\n pix = pix + 42\n if len(pix) == 0:\n continue\n #print(pix,len(pix))\n pix = np.max(pix)\n pos = self.pic2pos[pix]\n #print(self.steps,pix,pos)\n y = pos\n theta = abs(i-42)/84*self.ang\n\n x = math.tan(theta) * y\n if i < 43:\n x = -x\n c = abs(i-44)\n c = c if (c>30 and pix < 50) else 0\n if pix > 0:\n points.append((x,y,c))\n return points\n def getabsoluteboundary(self,mask):\n points = []\n for i in range(0,84):\n pix = np.where(mask[42:,i]==True)[0]\n pix = pix + 42\n if len(pix) == 0:\n continue\n pix = np.max(pix)\n pos = self.pic2pos[pix]\n y = pos\n theta = abs(i-42)/84*self.ang\n\n x = math.tan(theta) * y\n if i < 43:\n x = -x\n c = abs(i-44)\n c = 0 if c < 20 else c\n points.append((x,y,c))\n return points\n\n def corrected(self,points,theta):\n for i in range(len(points)):\n x,y,c= points[i]\n nx = math.cos(theta)*x-math.sin(theta)*y\n ny = math.sin(theta)*x+math.cos(theta)*y\n points[i]=(nx,ny,c)\n return points\n\n def outbound(self,x,y):\n return x<0 or x >= self.precision or y<0 or y>=self.precision\n\n def world_coord_2_map_loc(self,coord):\n x,y = coord\n x,y = -y,x\n x,y = self.midpoint - y,self.midpoint + x\n x,y = int(x*self.scaling+0.5),int(y*self.scaling+0.5)\n return x,y\n\n def map_loc_2_world_coord(self,loc):\n x,y = loc\n x,y = x/self.scaling,y/self.scaling\n x,y = y - self.midpoint,self.midpoint - x\n x,y = y,-x\n return x,y\n\n def self_loc_2_map_loc(self,loc):\n x, y = loc\n x, y = self.midpoint - y, self.midpoint + x\n x, y = int(x * self.scaling+0.5), int(y * self.scaling+0.5)\n return x, y\n\n def set(self,f,x,y,v,c=-1):\n\n nx = self.midpoint - y\n ny = self.midpoint + x\n nx = int(nx*self.scaling+0.5)\n ny = int(ny*self.scaling+0.5)\n if self.outbound(nx,ny):\n return False\n if c == -1:\n f[nx,ny] = v\n else: # idea: if previosu max c is 20, and rn c is 25, then we want current c to have weight 25:20 in the composition\n # if previous is max c 20, total c 500, and rn c is 25, you want ratio of 20 * (sqrt(500/20)):25, not 100:25\n # total c is sum, max c is max\n if c < 1.11 and self.maxc[nx,ny] > 5:\n return\n f[nx, ny] = (f[nx, ny] * (self.maxc[nx,ny] * math.sqrt(self.c[nx,ny]/(self.maxc[nx,ny]+0.0001))) + v * c) / ((self.maxc[nx,ny] * math.sqrt(self.c[nx,ny]/(self.maxc[nx,ny]+0.0001))) + c + 0.000001)\n self.c[nx, ny] = self.c[nx, ny] + c\n self.maxc[nx, ny] = max(self.maxc[nx,ny],c)\n return True\n\n def raw_set(self,f,x,y,v,c=-1):\n\n\n if self.outbound(x,y):\n return False\n if c == -1:\n f[x,y] = v\n else:\n nx,ny = x,y\n if c < 1.11 and self.maxc[nx,ny] > 5:\n return\n\n f[nx, ny] = (f[nx, ny] * (self.maxc[nx, ny] * math.sqrt(self.c[nx, ny] / (self.maxc[nx, ny]+0.0001))) + v * c) / (\n (self.maxc[nx, ny] * math.sqrt(self.c[nx, ny] / (self.maxc[nx, ny]+0.0001))) + c + 0.000001)\n self.c[nx, ny] = self.c[nx, ny] + c\n self.maxc[nx, ny] = max(self.maxc[nx,ny], c)\n\n return True\n\n def _set(self,f,x,y,v,c=-1):\n for i in range(-1,2):\n for j in range(-1,2):\n self.set(f,x+i,y+j,v,c)\n return\n\n\n def get(self,f,x,y):\n return f[int(self.scaling*(self.midpoint-y)+0.5),int(self.scaling*(self.midpoint+x)+0.5)]\n\n def vec_uni(self,vec):\n x,y = vec\n if x == 0 and y == 0:\n #print('zero')\n return (0,0)\n len = math.sqrt(x*x+y*y)\n return (x/len,y/len)\n def updatemap(self,points,pos):\n for i in points:\n self.set(self.f,i[0]+pos[0],i[1]+pos[1],1,self.dist_2_conf(math.sqrt(i[0]*i[0]+i[1]*i[1])+i[2]*10))\n\n ori = self.self_loc_2_map_loc(pos)\n\n for i in points:\n tp = self.self_loc_2_map_loc((i[0]+pos[0],i[1]+pos[1]))\n vec = (tp[0]-ori[0],tp[1]-ori[1])\n uni_vec = self.vec_uni(vec)\n if uni_vec == (0,0):\n continue\n multiples = (abs(vec[0])+abs(vec[1])) / (abs(uni_vec[0])+abs(uni_vec[1]))\n tpdist = math.sqrt(i[0]*i[0]+i[1]*i[1])\n for j in range(int(multiples+1)):\n newpos = (int(ori[0]+j*uni_vec[0]+0.5),int(ori[1]+j*uni_vec[1]+0.5))\n if newpos != tp:\n self.raw_set(self.f,newpos[0],newpos[1],0,self.dist_2_conf(tpdist * j/ multiples+i[2]*10))\n\n\n\n\n def dist_2_conf(self,dist):\n dist = max(dist,1)\n #print('conf',100000/(dist*dist))\n return 100000/(dist*dist)\n def updateboundary(self,points,pos):\n #for i in points:\n # self.set(self.f,i[0]+pos[0],i[1]+pos[1],1,i[2])\n\n ori = self.self_loc_2_map_loc(pos)\n\n for i in points:\n tp = self.self_loc_2_map_loc((i[0] + pos[0], i[1] + pos[1]))\n vec = (tp[0] - ori[0], tp[1] - ori[1])\n uni_vec = self.vec_uni(vec)\n if uni_vec == (0,0):\n continue\n multiples = (abs(vec[0])+abs(vec[1])) / (abs(uni_vec[0])+abs(uni_vec[1]))\n tpdist = math.sqrt(i[0] * i[0] + i[1] * i[1])\n for j in range(1000):\n newpos = (int(ori[0] + j * uni_vec[0]+0.5), int(ori[1] + j * uni_vec[1]+0.5))\n if j < multiples:\n if newpos != tp:\n self.raw_set(self.f, newpos[0], newpos[1], 0,\n self.dist_2_conf(tpdist * j / multiples + i[2] * 10)) # original 1000 multiplier for conf\n else:\n kk = self.raw_set(self.f, newpos[0], newpos[1], 1,\n self.dist_2_conf(tpdist * (multiples - (j-multiples)) / multiples + i[2] * 10))\n if kk == False:\n break\n\n\n\n\n\n def update_obstacle(self,pos_,dir,mask):\n pos = (-pos_[1],pos_[0])\n points = self.getabsolutepoints(mask)\n if len(points) == 0:\n return\n points = self.corrected(points,dir)\n self.updatemap(points,pos)\n\n def update_self(self,pos_):\n pos = (-pos_[1], pos_[0])\n self.set(self.i,pos[0],pos[1],1)\n\n def update_obstacle_strong(self,pos_):\n pos = (-pos_[1], pos_[0])\n self.set(self.f,pos[0],pos[1],1,1000000)\n def update_boundary(self,pos_,dir,mask):\n pos = (-pos_[1], pos_[0])\n points = self.getabsolutepoints(mask,val = 255)\n # self.i[int(pos[1]),int(pos[0])] = 1\n if len(points) == 0:\n return\n points = self.corrected(points, dir)\n self.updateboundary(points, pos)\n\n\n","sub_path":"examples/hardsubmission/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"404268445","text":"def id_dfs(puzzle, tujuan, get_langkah):\n import itertools #digunakan untuk membuat urutan angka\n\n def dfs(rute, kedalaman):\n if kedalaman == 0:\n return\n if rute[-1] == tujuan:\n return rute\n for geser in get_langkah(rute[-1]):\n if geser not in rute:\n next_rute = dfs(rute + [geser], kedalaman - 1)\n if next_rute:\n return next_rute\n #menghitung banyaknya langkah yang dilakukan\n for kedalaman in itertools.count():\n rute = dfs([puzzle], kedalaman)\n if rute:\n return rute\n#menentukan batas limit solusi\ndef num_matrix(rows, cols, steps=20):\n import random\n#membuat arrai menggunakan random\n nums = list(range(1, rows * cols)) + [0]\n tujuan = [ nums[i:i+rows] for i in range(0, len(nums), rows) ]\n\n get_langkah = num_gesers(rows, cols)\n puzzle = tujuan\n for steps in range(steps):\n puzzle = random.choice(get_langkah(puzzle))\n\n return puzzle, tujuan\n\ndef num_gesers(rows, cols):\n def get_langkah(subject):\n gesers = []\n#\n zrow, zcol = next((r, c)\n for r, l in enumerate(subject)\n for c, v in enumerate(l) if v == 0)\n\n def swap(row, col):\n import copy\n s = copy.deepcopy(subject)\n s[zrow][zcol], s[row][col] = s[row][col], s[zrow][zcol]\n return s\n\n # atas\n if zrow > 0:\n gesers.append(swap(zrow - 1, zcol))\n # kanan\n if zcol < cols - 1:\n gesers.append(swap(zrow, zcol + 1))\n # bawah\n if zrow < rows - 1:\n gesers.append(swap(zrow + 1, zcol))\n # kiri\n if zcol > 0:\n gesers.append(swap(zrow, zcol - 1))\n\n return gesers\n return get_langkah\n\npuzzle, tujuan = num_matrix(4, 4)\nsolusi = id_dfs(puzzle, tujuan, num_gesers(4, 4))\n\nprint(solusi)\nprint(f'jumlah langkah : {len(solusi)}') \n","sub_path":"ai/uts/iterative.py","file_name":"iterative.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"391534799","text":"#!/usr/bin/env python\nimport json\nfrom urlparse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nBASE_URL = \"https://food.jumia.com.ng/\"\n\n\ndef get_restaurants():\n # TODO(tade): support sort and user_search query params\n results = []\n print(\"Getting restaurants in Lagos \\n\")\n for page_no in range(1, 6):\n url = urljoin(BASE_URL, \"restaurants/city/lagos?page={}\".format(page_no))\n page = fetch(url)\n # Create a BeautifulSoup object\n soup = BeautifulSoup(page, 'html.parser')\n vendor_selectors = soup.find_all('article', class_='vendor')\n for vendor_tag in vendor_selectors:\n cuisines = vendor_tag.find(\n class_=\"vendor__cuisines\").getText(separator=u', ')\n cuisines = [x.strip() for x in cuisines.split(',') if x.strip()]\n name = vendor_tag.find(class_=\"vendor__name\").get_text()\n vendor = {\n 'name': name.strip(),\n 'cuisines': cuisines,\n 'url': urljoin(BASE_URL, vendor_tag.find('a').get('href'))\n }\n details = get_vendor_details(vendor['url'])\n vendor.update(details)\n results.append(vendor)\n\n print(\"DONE \\n\")\n return results\n\n\ndef get_vendor_details(url):\n \"Returns vendor menu and other info\"\n # TODO(tade): include info (delivery hours, delivery fee, tagline, address)\n print(\"Getting vendor: {}\".format(url))\n page = fetch(url)\n soup = BeautifulSoup(page, 'html.parser')\n menu_selectors = soup.find_all(class_='menu__category')\n try:\n delivery_fee = soup.find(class_='vendor-info__overview__list').find_all('dd')[0].get_text()\n except:\n delivery_fee = None\n\n try:\n street_address = soup.find(class_='vendor-info__address__content').find(attrs={'itemprop': 'streetAddress'}).get_text()\n except:\n street_address = None\n try:\n geo_latitude = soup.find(class_='vendor-info__address__map').find(attrs={'itemprop': 'latitude'}).get('content')\n geo_longitude = soup.find(class_='vendor-info__address__map').find(attrs={'itemprop': 'latitude'}).get('content')\n geo_address = {'latitude': geo_latitude, 'longitude': geo_longitude}\n except:\n geo_address = {'latitude': None, 'longitude': None}\n\n delivery_hours = get_delivery_hours(soup)\n menus = get_menus(menu_selectors)\n\n return {\n 'menus': menus,\n 'delivery_fee': delivery_fee.strip() if delivery_fee else delivery_fee,\n 'delivery_hours': delivery_hours,\n # 'tagline': tagline,\n 'address': street_address,\n 'geo_address': geo_address\n }\n\n\ndef get_delivery_hours(soup):\n schedules = soup.find_all(class_=\"schedules__item__time\")\n delivery_hours = []\n for day in schedules:\n meta = day.find_all('meta')\n _day = dict([(x.get('itemprop'), x.get('content')) for x in meta])\n delivery_hours.append(_day)\n\n return [x for x in delivery_hours if x]\n\n\ndef get_menus(menu_selectors):\n menus = []\n for menu_tag in menu_selectors:\n name = menu_tag.find(class_=\"menu__category__title\").get_text().strip()\n items_selectors = menu_tag.find_all(class_=\"menu-item\")\n description = menu_tag.find(class_='menu__category__content')\n description = description.get_text().strip() if description else \"\"\n menu = {'name': name.strip(), 'description': description}\n items = []\n for item_tag in items_selectors:\n title = item_tag.find(class_=\"menu-item__title\").get_text().strip()\n description = menu_tag.find(class_=\"menu-item__description\")\n description = description.get_text().strip() if description else \"\"\n # has_variants = 'has-variations' in [x.strip() for x in item_tag.get(\"class\")]\n variants = get_variants(item_tag)\n if len(variants) == 1:\n item = {\n 'title': title,\n 'description': description,\n 'amount': variants[0]['price'],\n 'options': variants[0]['options']\n }\n else:\n item = {\n 'title': title,\n 'description': description,\n 'amount': variants[0]['price'],\n 'variants': variants\n }\n\n items.append(item)\n menu['items'] = items\n menus.append(menu)\n\n return menus\n\n\ndef fetch(url):\n return requests.get(url, verify=False).content\n\n\ndef get_variants(item_tag):\n variants_container = item_tag.find(class_=\"menu-item__variations\")\n variant_tags = variants_container.find_all('article', class_=\"menu-item__variation\")\n variants = []\n for x in variant_tags:\n title = x.find(class_=\"menu-item__variation__title\").get_text().strip()\n price = x.find(class_=\"menu-item__variation__price\").get_text().strip()\n price = float(''.join(price[1:].split('.')[0].split(',')))\n has_options = False\n url = x.find('form').get('action')\n has_options = not('simple' in url)\n # options_page = fetch(urljoin(BASE_URL, url))\n # soup = BeautifulSoup(options_page, 'html.parser')\n # opt_groups = soup.find_all('div', class_='choices-toppings__elements__wrapper')\n # for group in opt_groups:\n # title = group.find(class_=\"choices-toppings__title\").get_text().strip()\n # subtitle = group.find(class_=\"choices-toppings__subtitle\").get_text().strip()\n # is_optional = bool(group.find(class_=\"choices-toppings-optional\"))\n # elems = group.find_all(\"li\", class_=\"choices-toppings__element\")\n # _elems = []\n # for e in elems:\n # elem_price = e.find(class_=\"choices-toppings__element__price\")\n # elem_label = e.find(class_=\"choices-toppings__element__label\")\n # _elems.append({'price': elem_price, 'label': elem_label})\n\n # options.append({'title': title, 'subtitle': subtitle, 'options': _elems})\n # add variant to list\n variants.append({'title': title, 'price': price, 'options': has_options})\n\n return variants\n\n\ndef test_get_restaurants():\n results = get_restaurants()\n assert(isinstance(results, list))\n assert(len(results) > 0)\n vendor = results[0]\n assert(isinstance(vendor, dict))\n assert(set(vendor) == set(['name', 'cuisines', 'url']))\n\n\ndef test_get_vendor_details(url):\n pass\n\n\nif __name__ == '__main__':\n\n restaurants = get_restaurants()\n with open('lagos-stores.json', 'w') as f:\n json.dump(restaurants, f)\n","sub_path":"bin/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"246553238","text":"from qrs_precise_detection import QrsPreciseDetector\nfrom copy import copy\nimport numpy as np\nimport skfuzzy as fuzz\nfrom skfuzzy import control as ctrl\n\n\n\nclass QrsDetector:\n def __init__(self, f, channel, meta_data_buffer, fuzzyThreshold):\n self.signalFrequency = f\n self.signal = [] # queue.Queue(maxsize=4*self.signalFrequency)\n self.differ = [0]\n self.differ2 = [0]\n self.integer = [0]\n self.PIDwindow = self.time(116)\n self.prc_detector = QrsPreciseDetector(self.signalFrequency, int(self.PIDwindow/2)+self.time(300))\n self.mdb = meta_data_buffer\n self.channel = channel\n self.threshold_send_sample = [] # if last_sample_num = <- [1] then send <-[0] - [1]\n self.progress = -1\n self.diff_range = 0\n self.truncated = self.time(200)\n self.threshold = 0\n self.fuzzyThreshold = fuzzyThreshold\n\n\n def add_sample(self, sample):\n self.signal.append(sample)\n # qrs_window[3] -= 1\n self.sample_analyze()\n\n def sample_analyze(self):\n self.progress += 1\n if len(self.signal) > self.time(2000):\n self.signal = self.signal[1:]\n for qrs_window in self.threshold_send_sample:\n qrs_window[0] -= 1\n qrs_window[1] -= 1\n\n if len(self.signal) > 2:\n self.differ.append((self.signal[-3] - self.signal[-1]) * 1)\n while len(self.differ) > self.time(2000) - self.PIDwindow/2:\n self.truncated += 1\n self.differ = self.differ[1:]\n\n self.integral()\n self.differential()\n self.precise_detection()\n\n if self.progress > self.time(2000):\n self.check_threshold()\n else:\n self.truncated = 0\n return\n\n def precise_detection(self):\n if self.threshold_send_sample:\n if self.threshold_send_sample[0][2] <= self.progress:\n metadata = self.prc_detector.analyze(self.signal[self.threshold_send_sample[0][0]:self.threshold_send_sample[0][1]], self.threshold_send_sample[0][3])\n if metadata:\n self.mdb.add_meta_data(self.channel, metadata)\n self.threshold_send_sample = self.threshold_send_sample[1:]\n\n def check_threshold(self):\n # FUZZY - siły sygnału długości o podwyższonej mocy\n fuzzy_signal = self.differ2[:-1]\n if self.diff_range == 0:\n self.threshold = self.fuzzy_threshold(fuzzy_signal)\n if self.differ2[-1] >= self.threshold:\n self.diff_range += 1\n elif self.diff_range > 3: # minimal peak width\n diff_max = self.find_reverse_max(self.diff_range)\n self.threshold_send_sample.append([diff_max - self.time(300), diff_max + self.time(300), self.progress + self.time(400), self.truncated+diff_max])\n self.diff_range = 0\n\n def integral(self):\n if len(self.differ) > self.PIDwindow:\n while len(self.integer) < self.PIDwindow / 2:\n self.integer.append(self.integer[-1])\n self.integer.append(sum(power(self.differ[-self.PIDwindow:])))\n while len(self.integer) > self.time(2000) - self.PIDwindow / 2:\n self.integer = self.integer[1:]\n\n def differential(self):\n if len(self.integer) > self.PIDwindow:\n while len(self.differ2) < self.PIDwindow / 2:\n self.differ2.append(self.differ2[-1])\n self.differ2.append(sum(self.integer[-self.PIDwindow:]) / self.PIDwindow)\n while len(self.differ2) > self.time(2000) - self.PIDwindow:\n self.differ2 = self.differ2[1:]\n\n def find_reverse_max(self, max_range):\n return self.differ2.index(max(self.differ2[-1:-max_range:-1]))\n\n def time(self, time):\n return int(time * self.signalFrequency / 1000.0 + 0.5)\n\n def fuzzy_threshold(self, fuzzy_signal):\n return self.fuzzyThreshold.get_channel_threshold(self.channel, fuzzy_signal)\n\ndef power(list):\n return [x ** 2 for x in list]\n","sub_path":"qrs_detector.py","file_name":"qrs_detector.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143812662","text":"import csv\nfrom .node import Node\n\n\nclass Graph():\n def __init__(self, nodes_file, neighbours_file):\n self.nodes = self.load_nodes(nodes_file)\n self.load_neighbours(neighbours_file)\n\n def load_nodes(self, node_file):\n nodes = {}\n with open(node_file, 'r') as in_file:\n reader = csv.DictReader(in_file)\n\n for row in reader:\n nodes[row['id']] = Node(row['name'], row['id'])\n\n return nodes\n\n def load_neighbours(self, neighbour_file):\n with open(neighbour_file, 'r') as in_file:\n reader = csv.DictReader(in_file)\n\n for row in reader:\n neighbours = [neighbour.strip('[] ') for neighbour in row['neighbours'].split(',') if neighbour.strip('[] ') != \"\"]\n node_id = row['state']\n\n for neighbour in neighbours:\n neighbour = self.nodes[neighbour]\n self.nodes[node_id].add_neighbour(neighbour)\n\n def get_violations(self):\n violations = []\n\n for id, node in self.nodes.items():\n if not node.is_valid():\n violations.append(id)\n\n return violations\n\n def calculate_cost(self):\n pass\n","sub_path":"code/classes/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627672742","text":"\nfrom pyppl import PyPPL, Proc, Channel\n\n\npSort = Proc(desc = 'Sort files.')\npSort.input = {\"infile:file\": Channel.fromPattern(\"./data/*.txt\")}\npSort.output = \"outfile:file:{{i.infile | fn}}.sorted\"\npSort.forks = 5\npSort.script = \"\"\"\n sort -k1r {{i.infile}} > {{o.outfile}}\n\"\"\"\n\npAddPrefix = Proc(desc = 'Add line number to each line.')\npAddPrefix.depends = pSort\npAddPrefix.input = \"infile:file\" # automatically inferred from pSort.output\npAddPrefix.output = \"outfile:file:{{i.infile | fn}}.ln\"\npAddPrefix.forks = 5\npAddPrefix.script = \"\"\"\npaste -d. <(seq 1 $(wc -l {{i.infile}} | cut -f1 -d' ')) {{i.infile}} > {{o.outfile}}\n\"\"\"\n\npMergeFiles = Proc(desc = 'Merge files, each as a column.')\npMergeFiles.depends = pAddPrefix\n# [\"test1.ln\", \"test2.ln\", ..., \"test5.ln\"]\npMergeFiles.input = {\"infiles:files\": lambda ch: [ch.flatten()]}\npMergeFiles.output = \"outfile:file:mergedfile.txt\"\npMergeFiles.exdir = \"./export\"\npMergeFiles.script = \"\"\"\npaste {{i.infiles | asquote}} > {{o.outfile}}\n\"\"\"\n\nPyPPL().start(pSort).run()\n","sub_path":"tutorials/transformInputChannels/transformInputChannels.py","file_name":"transformInputChannels.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"64474039","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('br_addresses', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='address',\n options={'ordering': ['created'], 'verbose_name': 'Endere\\xe7o', 'verbose_name_plural': 'Endere\\xe7os'},\n ),\n migrations.AlterModelOptions(\n name='city',\n options={'ordering': ('state', 'name'), 'verbose_name': 'Cidade', 'verbose_name_plural': 'Cidades'},\n ),\n migrations.AlterField(\n model_name='address',\n name='city',\n field=models.ForeignKey(verbose_name='Cidade', to='br_addresses.City'),\n ),\n migrations.AlterField(\n model_name='address',\n name='complement',\n field=models.TextField(null=True, verbose_name='Complemento', blank=True),\n ),\n migrations.AlterField(\n model_name='address',\n name='kind_street',\n field=models.CharField(max_length=2, verbose_name='Tipo logradouro', choices=[(b'1', 'Aeroporto'), (b'2', 'Alameda'), (b'3', '\\xc1rea'), (b'4', 'Avenida'), (b'5', 'Campo'), (b'6', 'Chac\\xe1ra'), (b'7', 'Col\\xf4nia'), (b'8', 'Condom\\xednio'), (b'9', 'Conjunto'), (b'10', 'Distrito'), (b'11', 'Esplanada'), (b'12', 'Esta\\xe7\\xe3o'), (b'13', 'Estrada'), (b'14', 'Favela'), (b'15', 'fazenda'), (b'16', 'Feira'), (b'17', 'Jardim'), (b'18', 'Ladeira'), (b'19', 'Lago'), (b'20', 'Lagoa'), (b'21', 'Largo'), (b'22', 'Loteamento'), (b'23', 'Morro'), (b'24', 'N\\xfacleo'), (b'25', 'Parque'), (b'26', 'Passarela'), (b'27', 'P\\xe1tio'), (b'28', 'Pra\\xe7a'), (b'29', 'Quadra'), (b'30', 'Recanto'), (b'31', 'Resid\\xeancial'), (b'32', 'Rodovia'), (b'33', 'Rua'), (b'34', 'Setor'), (b'35', 'S\\xedtio'), (b'36', 'Travessa'), (b'37', 'Trecho'), (b'38', 'Trevo'), (b'39', 'Vale'), (b'40', 'Vereda'), (b'41', 'Estrada'), (b'42', 'Viaduto'), (b'43', 'Viela'), (b'44', 'Vila')]),\n ),\n migrations.AlterField(\n model_name='address',\n name='neighborhood',\n field=models.CharField(default='center', max_length=100, verbose_name='Bairro'),\n ),\n migrations.AlterField(\n model_name='address',\n name='number',\n field=models.IntegerField(default=1000, verbose_name='N\\xfamero'),\n ),\n migrations.AlterField(\n model_name='address',\n name='street',\n field=models.CharField(help_text='Rua ou avenida ou viela ou rodovia ... mais um endere\\xe7o', max_length=100, verbose_name='Rua'),\n ),\n migrations.AlterField(\n model_name='address',\n name='zip_code',\n field=models.CharField(max_length=9, verbose_name='Cep'),\n ),\n migrations.AlterField(\n model_name='city',\n name='state',\n field=models.CharField(max_length=2, verbose_name='Estado', choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amap\\xe1'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Cear\\xe1'), ('DF', 'Distrito Federal'), ('ES', 'Esp\\xedrito Santo'), ('GO', 'Goi\\xe1s'), ('MA', 'Maranh\\xe3o'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Par\\xe1'), ('PB', 'Para\\xedba'), ('PR', 'Paran\\xe1'), ('PE', 'Pernambuco'), ('PI', 'Piau\\xed'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rond\\xf4nia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'S\\xe3o Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')]),\n ),\n ]\n","sub_path":"br_addresses/migrations/0002_auto_20141115_2058.py","file_name":"0002_auto_20141115_2058.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"194754114","text":"from fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\n\n\nclass Corporative_Detection(object):\n\n def is_not_corp_email(self, email):\n \"\"\" Detect if an email is not corporative\n\n Keyword arguments:\n email -- the first part on an email (before the @)\n\n \"\"\"\n\n email_parts = email.split(\"@\")\n\n if self.corp_list is not None:\n # search for corporative mail in list\n for _corp in self.corp_list:\n if _corp in email_parts:\n return False\n\n # searching for corporative mails of the type @.com\n _choice = process.extractOne(\n email_parts[0], [email_parts[1]], scorer=fuzz.ratio)\n\n if _choice[1] > 60:\n return False\n\n return (True if self.email_model.predict(\n [email_parts[0]])[0] == \"1\" else False)\n\n def __init__(self, email_model, corp_list=None):\n \"\"\" Initalization\n\n Keyword arguments:\n email_model -- a model that detects corporative emails\n\n \"\"\"\n self.email_model = email_model\n self.corp_list = corp_list\n","sub_path":"faro/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"374012556","text":"from more_itertools import unique_everseen\nfrom collections import Counter\nfrom lxml import etree\nimport pandas as pd\nimport numpy as np\nfrom iteration import *\nimport os\n\ndef save_yearly_data(years, dirin, dirout):\n\n\tall_data = []\n\tfor y in years:\n\t\tpaths = []\n\t\trootdir = dirin+str(y)+'/'\n\t\tprint(rootdir)\n\t\tfor subdir_month, dirs, files in os.walk(rootdir):\n\t\t\tpaths.append(subdir_month)\n\n\n\t\tyear = min(paths, key=len)\n\t\tpaths.remove(year)\n\t\tmonthly_data = []\n\t\tlist_keys = []\n\n\t\tfor path in paths:\n\t\t\tif path != year:\n\t\t\t\tprint(path)\n\t\t\t\tmonthly = get_all_monthly_data(path)\n\t\t\t\tmonthly_data.append(monthly)\n\t\t\t\tlist_keys.append(path[-13:-6])\n\t\t# df_Master = pd.concat(monthly_data, keys=list_keys)\n\n\t\tdf_Master = pd.concat(monthly_data)\n\t\tdf_Master.to_pickle(dirout+\"/df\"+str(y)+\".pkl\")\n\t\tprint(\"Yearly data was written in\" ,dirout,\"df\",str(y),\".pkl\")\n\t\tall_data.append(df_Master)\n\treturn all_data\n\n\n\ndef get_all_monthly_data(path):\n\n\ttime = path[-13:-6]\n \n\tfiles = iterate_folder(path)\n\tbanks={}\n\tdf_list= []\n\tkeys_list = []\n\tfor key in files.keys():\n\t\tfor file in files[key]:\n\t\t\txml = file\n\t\t\twith open (xml) as fobj:\n\t\t\t\txml = fobj.read()\n\t\t\troot = etree.fromstring(xml)\n\n\t\t\tbank_name, bank = get_bank_data(root)\n\t\t\t# print(bank_name)\n\t\t\tbanks[bank_name]=bank\n\tfor k, v in banks.iteritems():\n\t\tdf_list.append(v)\n\t\tkeys_list.append(k)\n\tmonth = pd.concat(df_list)\n\n\t# month = pd.concat(df_list,keys=keys_list)\n\n\tmonth['time'] = time\n\tprint('all monthly data for all banks at time', time, 'was extracted')\t\n\treturn month \n\n\ndef get_bank_data(root):\n\n\tkeys = []\n\tdf_list = []\n\tfor i in range(19):\n\t\tidentifier = i+1\n\t\tROWS = get_table_description(root)\n\t\tfor dic in ROWS:\n\t\t\tif dic['TableNumber']== str(identifier):\n\t\t\t\ttablenumber = dic['TableNumber'] \n\t\tif isinstance(tablenumber, basestring):\n\t\t\tdf = get_table_data(tablenumber,root)\n\t\t\tprint(tablenumber), str(df['InstitutionDescription'].values[0])\n\t\t\tdf_list.append(df)\n\t\t\tkeys.append(tablenumber)\n\t\t\t\t# df.to_csv('/Users/Tina/Dropbox/phd/'+ str(identifier) + '.csv')\t\t\n\t\telse:\n\t\t\tpass\n\t\t\t\t \n\t\t# df.to_pickle(\"/Users/Tina/Dropbox/phd/df.pkl\") \n\tif len(df_list)>1:\n\t\t# df_Master = pd.concat(df_list, keys=keys)\n\t\tdf_Master = pd.concat(df_list)\n\t\tbank_key = str(df_Master['InstitutionDescription'].values[0])\n\t\treturn str(bank_key), df_Master # df_Master.to_csv('/Users/Tina/Dropbox/phd/alltables.csv') \n\n\ndef get_table_description(root):\n\n\ttable_description = []\n\n\tfor table in root.getchildren():\n\t\tfor elem in table.getchildren():\n\t\t\tfor key,val in elem.attrib.iteritems():\n\t\t\t\ttable_descrip = {}\n\t\t\t\tif key==\"TableDescription\" or key==\"TableNumber\":\n\t\t\t\t\ttable_descrip[key]=val\n\t\t\t\t\ttable_description.append(table_descrip)\n\n\tROWS = []\n\trows = list(zip(table_description[::2], table_description[1::2]))\n\n\tfor i in rows:\n\t\td = {}\n\t\ta, b = i\n\t\td.update(a)\n\t\td.update(b)\n\t\tROWS.append(d)\n\n\treturn ROWS\n\n\n\ndef get_table_data(tablenumber, root):\n\trows2=[]\n\trows3 = []\n\titems = []\n\n\tvalues = []\n\twhich_bank = []\n \n\tfor t in root.iter():\n\t\tif t.tag=='SARBForms':\n\t\t\twhich_bank.append(t.attrib)\n\n\tfor t in root.getchildren():\n\t\tfor elem in t.getchildren():\n\t\t\tfor key,val in elem.attrib.iteritems():\n\t\t\t\tif val == tablenumber:\n\t\t\t\t# if val==table[0][key] and len(table[0][key])<=2:\n\t\t\t\t \n\t\t\t\t\tfor item in elem.getchildren():\n\t\t\t\t\t\tfor key2, value2 in item.attrib.iteritems():\n\t\t\t\t\t\t\t# print key2, value2 \n\t\t\t\t\t\t\tif key2=='ColumnDescription':\n\t\t\t\t\t\t\t\tall2 = {}\n\t\t\t\t\t\t\t\tall2[key2]=value2\n\t\t\t\t\t\t\t\trows2.append(all2)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tfor key2, value2 in item.attrib.iteritems():\n\t\t\t\t\t\t\t# print key2, value2 \n\t\t\t\t\t\t\tif key2=='ColumnCode':\n\t\t\t\t\t\t\t\tcol3 = {}\n\t\t\t\t\t\t\t\tcol3[key2]=value2\n\t\t\t\t\t\t\t\trows3.append(col3)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\tfor i, ii in zip(rows2, rows3):\n\t\t\t\t\t\t\tfor k,v in ii.iteritems():\n\t\t\t\t\t\t\t\ti[k] = v\n\n\t\t\t\t\t\tfor value in item.getchildren():\n\t\t\t\t\t\t\tfor i in item.attrib:\n\t\t\t\t\t\t\t\tif i == 'ItemNumber':\n\t\t\t\t\t\t\t\t\titemnumber = item.attrib[i]\n\t\t\t\t\t\t\t\t\tx={i:item.attrib[i]}\n\t\t\t\t\t\t\t\t\ty=value.attrib, x\n\t\t\t\t\t\t\t\t\tvalues.append(y)\n\t\t\t\t\t\t\t\t\tbreak\t \n\t\t\t\t\t\ttempd = {}\n\t\t\t\t\t\tfor l, item in item.attrib.iteritems():\n\t\t\t\t\t \t\tif l==\"ItemNumber\" or l==\"ItemDescription\":\n\t\t\t\t\t \t\t\ttempd[l] = item\n\t\t\t\t\t \t\t\titems.append(tempd)\n\t\t\t\t\tbreak\n\n\tall_val = []\t\t\n\tfor i in values:\n\t\td = {}\n\t\ta, b = i\n\t\td[a.keys()[0]]=a[a.keys()[0]]\n\t\td[a.keys()[1]]=a[a.keys()[1]]\n\t\td.update(b)\n\t\tall_val.append(d)\n \t\n\n\ttotal = []\n\tcount =0\n\tfor i in items:\n\t \ti2 = dict(i)\n\n\t \tfor ro in rows2:\n\t \t\tt = ro, i2\n\t \t\ttotal.append(t)\n\t \t\tcount+=1\n\t\n\n\ttotal = list(unique_everseen(total))\n\n\tTOT = []\n\tfor i in total:\n\t\td = {}\n\t\ta, b = i\n\t\td.update(a)\n\t\td.update(b)\n\t\tTOT.append(d)\n\tfor i in TOT:\n\t\tfor ii in all_val:\n\t\t\tif i['ItemNumber'] ==ii['ItemNumber']:\n\t\t\t\tif \"000\"+i['ColumnCode']==ii['ColumnNumber']:\n\t\t\t\t\ti['Value']= ii['Value']\n\t\t\t\t\tbreak\n\n\n\ttemp=pd.DataFrame(TOT)\n\n\ttemp['TableNumber']=tablenumber\n\n\tfor i in which_bank:\n\t\tfor key in i.keys():\n\t\t\ttemp[key]=i[key]\n\n\treturn temp \n\n\n\ndef get_path(root):\n\tfrom lxml import etree\n\tparser = etree.XMLParser(recover=True)\n\ttree = etree.ElementTree(root)\n\n\tfor tag in root.iter():\n\t path = tree.getpath(tag)\n\t print(path)\n\t # print root.xpath(\"SARBForms/SARBForm/Table[1]/ColumnHeader[1]\")\n\ndef get_individual_table(id, root, output_path):\n\tdf = get_table_data(str(id),root)\n\t# df.to_csv(output_path+ str(id) + '.csv')\n\t# print 'output generated in', output_path\n\treturn df\t \n \n","sub_path":"ba900/extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":5360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"576501164","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import BernoulliNB\nimport string\nimport unicodedata\nimport requests\nfrom bs4 import BeautifulSoup\nfrom sklearn.cluster import KMeans\nimport nltk\nimport scipy as scs\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\nfrom nltk.util import ngrams\nfrom nltk import pos_tag\nfrom nltk import RegexpParser\nimport pickle\n\n\"\"\"\nInput: pd dataframe of raw features with no 'acct_type'.\nCan be applied to training AND testing data.\n\"\"\"\n\n\ndef delivery_method_categorize(data):\n data['delivery_method_0'] = data['delivery_method'] == 0\n data['delivery_method_1'] = data['delivery_method'] == 1\n data['delivery_method_3'] = data['delivery_method'] == 3\n return data\n\n\ndef payout_type_categorize(data):\n data['payout_type_check'] = data['payout_type'] == 'CHECK'\n data['payout_type_ach'] = data['payout_type'] == 'ACH'\n return data\n\n\ndef currency_categorize(data):\n data['usd'] = data['currency'] == 'USD'\n data['gbp'] = data['currency'] == 'GBP'\n data['cad'] = data['currency'] == 'CAD'\n data['aud'] = data['currency'] == 'AUD'\n data['eur'] = data['currency'] == 'EUR'\n data['nzd'] = data['currency'] == 'NZD'\n return data\n\n\ndef user_type_categorize(data):\n data['user_type_1'] = data['user_type'] == 1\n data['user_type_2'] = data['user_type'] == 2\n data['user_type_3'] = data['user_type'] == 3\n data['user_type_4'] = data['user_type'] == 4\n data['user_type_5'] = data['user_type'] == 5\n return data\n\n\ndef email_categorize(data):\n \"\"\"\n Define a \"rare_email\" domain as one that occurs one or zero times within the\n training data.\n \"\"\"\n emails = pd.DataFrame(data['email_domain'].value_counts() <= 1)\n emails['rare_email'] = emails['email_domain']\n common_emails = emails.index[emails['rare_email'] == False]\n data['rare_email'] = [domain not in common_emails for domain in data['email_domain']]\n return data\n\n\ndef event_data(data):\n \"\"\"\n Calculate event duration from event end and start timestamps.\n \"\"\"\n data['event_duration'] = data['event_end'] - data['event_start']\n return data\n\n\ndef listed_categorize(data):\n \"\"\"\n Categorizes the 'listed' column in the pandas dataframe.\n\n INPUT:\n - data: pandas dataframe with 'listed' column as 'y' or 'n'\n\n OUTPUT:\n - data: pandas dataframe with 'listed' column replaced with booleans\n \"\"\"\n data['listed'] = data['listed'] == 'y'\n return data\n\n\ndef country_data(data):\n \"\"\"\n Takes a pandas dataframe and does some undetermined stuff with the countries\n\n INPUT:\n - data: pandas dataframe to get country data from and add engineered\n columns to.\n\n OUTPUT:\n - data: pandas dataframe with engineered country features added.\n \"\"\"\n data['venue_country_change'] = (data['venue_country'] != data['country'])\n data['is_us'] = data['country'] == 'US'\n data['is_gb'] = data['country'] == 'GB'\n data['is_ca'] = data['country'] == 'CA'\n return data\n\n\ndef final_columns(data):\n wanted_columns = ['delivery_method_0', 'delivery_method_1', 'delivery_method_3',\n 'payout_type_check', 'payout_type_ach', 'usd', 'gbp', 'cad',\n 'aud', 'eur', 'nzd', 'user_type_1', 'user_type_2', 'user_type_3',\n 'user_type_4', 'user_type_5', 'rare_email', 'event_duration',\n 'listed', 'venue_country_change', 'is_us', 'is_gb', 'is_ca',\n 'body_length', 'channels', 'fb_published',\n 'has_analytics', 'has_logo', 'listed', 'name_length', 'show_map',\n 'user_age', 'description']\n data = data[wanted_columns]\n return data\n\n\ndef clean_data(data):\n \"\"\"\n Cleans the entire data set.\n\n INPUT:\n - data: dataframe\n \"\"\"\n clean_data = delivery_method_categorize(data)\n clean_data = country_data(data)\n clean_data = listed_categorize(data)\n clean_data = event_data(data)\n clean_data = email_categorize(data)\n clean_data = user_type_categorize(data)\n clean_data = currency_categorize(data)\n clean_data = payout_type_categorize(data)\n\n clean_data = final_columns(data)\n\n return clean_data\n","sub_path":"data_cleanup.py","file_name":"data_cleanup.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"237190495","text":"from __future__ import absolute_import\n\nimport os\nimport torch\nimport numpy as np\nimport numpy.random as npr\nfrom collections import deque\nfrom time import time\nfrom copy import deepcopy\n\nfrom vizdoom import DoomGame, Mode, ScreenFormat, ScreenResolution, GameVariable\nfrom DOOM.FrameProcessing import processImage, gameStateToTensor\nfrom DOOM.Params import doomParams\nfrom RAINBOW.ModelMemory import PrioritizedReplayMemory, LinearSchedule, GameState, blank_trans\nfrom RAINBOW.DQN import DQNRecurrent\nresultsPath = os.path.join(os.getcwd(), 'Results')\n\ndef initGameWithParams(configFilePath):\n game = DoomGame()\n game.load_config(configFilePath)\n game.set_window_visible(False)\n game.set_mode(Mode.PLAYER)\n game.set_screen_format(ScreenFormat.CRCGCB)\n game.set_screen_resolution(ScreenResolution.RES_400X225)\n game.init()\n return game\n\n\nclass DoomGameEnv(object):\n def __init__(self, params=doomParams):\n self.params = params\n self.frameskip = params.frameskip\n self.game = initGameWithParams(params.scenarioPath)\n self.numChannels, self.imageHeight, self.imageWidth = params.inputShape\n self.actions = self.game.get_available_buttons()\n self.numActions = len(self.actions)\n self.actions = np.identity(self.numActions, dtype=np.int32).tolist() # One-hot encoding of actions\n self.gameVariables = params.gameVariables\n self.numGameVariables = len(self.gameVariables)\n self.reset()\n\n # Return last history states\n def step(self, action):\n reward = 0\n is_done = False\n reward += self.game.make_action(action, self.frameskip)\n is_done = self.game.is_episode_finished() or self.game.is_player_dead()\n newState = self.game.get_state()\n if not is_done:\n processedState = gameStateToTensor(newState)\n self.stateBuffer.append(processedState)\n else:\n pass\n return list(self.stateBuffer), reward, is_done\n\n def reset(self):\n self.game.new_episode()\n # Start with a queue of all blank frames\n self.stateBuffer = deque([], maxlen=self.params.recurrenceHistory+self.params.numRecurrentUpdates)\n newState = self.game.get_state()\n processedState = gameStateToTensor(newState)\n for i in range(self.params.recurrenceHistory+self.params.numRecurrentUpdates):\n self.stateBuffer.append(processedState)\n return list(self.stateBuffer)\n\n def getEpisodeReward(self):\n return self.game.get_total_reward()\n\n\ndef updateTargetNet(policyNet, targetNet):\n targetNet.module.load_state_dict(policyNet.module.state_dict())\n\n# Perform the update step on my policy network\ndef optimizeNet(policyNet, targetNet, memory, optimizer, params):\n indices, states, actions, returns, nextStates, isDones, weights = memory.sample(params.batchSize)\n loss = policyNet.f_train(states, actions, returns, isDones, targetNet) # batchSize*numRecurrentUpdates\n if params.prioritizedReplay:\n loss = (loss.transpose(0,1) * weights).transpose(0,1) # Multiply by priority weights\n optimizer.zero_grad()\n loss.mean().backward()\n optimizer.step()\n\n # If using prioritized replay, update priorities\n if params.prioritizedReplay:\n memory.updatePriorities(indices, loss.detach().cpu().numpy()[:, -1])\n\n# Evaluate on episodes\ndef evalEpisode(env, policyNet, numEpisodes=10):\n policyNet.module.eval()\n episodeReturns = np.zeros(numEpisodes)\n episodeLengths = np.zeros(numEpisodes)\n with torch.no_grad():\n for episode in range(numEpisodes):\n episodeFrameCounter = 0\n isDone = False\n currentState = env.reset()\n while not isDone:\n action = policyNet.next_action(currentState)\n newState, reward, isDone = env.step(oneHotList(action, env.numActions))\n episodeFrameCounter+=1\n episodeReturns[episode] = env.getEpisodeReward()\n episodeLengths[episode] = episodeFrameCounter\n\n policyNet.module.train()\n return np.mean(episodeReturns), np.mean(episodeLengths)\n\n\n\ndef train(env, params):\n # Policy net\n policyNet = DQNRecurrent(params)\n policyNet.module.train()\n if params.double:\n # Target net\n targetNet = DQNRecurrent(params)\n updateTargetNet(policyNet, targetNet)\n targetNet.module.train()\n for parameter in targetNet.module.parameters():\n parameter.requires_grad = False\n else:\n targetNet = None\n\n memory = PrioritizedReplayMemory(params) # Create a memory buffer\n\n # Anneal beta over time\n if params.prioritizedReplay:\n beta = LinearSchedule(params.priorityBetaSteps, params.priorityBetaEnd, params.priorityBetaStart)\n else:\n beta = None\n\n # Anneal epsilon over time\n if not params.noisyLinear:\n epsilon = LinearSchedule(params.epsSteps, params.endEps, params.startEps) # If using noisy linear layer, this doesn't applyS\n else:\n epsilon = None\n\n # Optimize with Adam\n optimFunction = torch.optim.Adam(policyNet.module.parameters(), lr=params.learningRate)\n\n frameCounter = 0\n framesBeforeTraining = params.framesBeforeTraining # Don't start training or annealing before this\n trainingFrameCounter = 0\n episodeRewards = list()\n evalEpisodeRewards = list() # For running in eval mode\n episodeLengths = list()\n evalEpisodeLengths = list() # For running in eval mode\n episodeTimes = list()\n episodeCounter = 1\n framesBetweenSaves = 200000\n\n\n while frameCounter < params.numFrames:\n print('Starting episode ' + str(episodeCounter))\n episodeFrameCounter = 0\n isDone = False\n currState = env.reset()\n startTime = time()\n while not isDone:\n\n # Periodically save\n if frameCounter % framesBetweenSaves == 0:\n try:\n saveResults(str(9), episodeRewards, episodeLengths, episodeTimes, evalEpisodeRewards, evalEpisodeLengths, policyNet)\n except:\n pass\n\n # Periodically test in eval mode\n if frameCounter % params.framesBetweenEvaluations == 0:\n print('Evaluating')\n evalReward, evalLength = evalEpisode(env, policyNet)\n evalEpisodeRewards.append(evalReward)\n evalEpisodeLengths.append(evalLength)\n # Start a new episode afterward\n episodeFrameCounter = 0\n isDone = False\n currState = env.reset()\n startTime = time()\n\n # If using noisy linear, reset noise on training frequency\n if params.noisyLinear and frameCounter % params.trainingFrequency == 0:\n policyNet.resetNoise()\n\n # Take action based on current state or on eps\n if epsilon is not None and npr.rand() < epsilon.value(trainingFrameCounter):\n action = npr.randint(0, env.numActions)\n else:\n with torch.no_grad():\n action = policyNet.next_action(currState)\n\n newState, reward, isDone = env.step(oneHotList(action, env.numActions))\n episodeFrameCounter += 1\n frameCounter += 1\n memory.append(currState[-1], action, reward, isDone) # Add last frame of game state\n currState = deepcopy(newState) # Change to next state\n # Populate with random experiences first\n if frameCounter > framesBeforeTraining:\n # If it's time to train\n if frameCounter % params.trainingFrequency == 0:\n optimizeNet(policyNet, targetNet, memory, optimFunction, params)\n\n # If it's time to update target net\n if params.double and frameCounter % params.targetUpdateFrequency == 0:\n updateTargetNet(policyNet, targetNet)\n\n # Anneal priority beta if needed\n if params.prioritizedReplay:\n memory.priority_weight = beta.value(trainingFrameCounter)\n\n trainingFrameCounter += 1\n\n episodeRewards.append(env.getEpisodeReward())\n episodeLengths.append(episodeFrameCounter)\n episodeTimes.append(time()-startTime)\n episodeCounter += 1\n\n return episodeRewards, episodeLengths, episodeTimes, evalEpisodeRewards, evalEpisodeLengths, policyNet\n\n\n\ndef oneHotList(action, numActions):\n oneHot = list(np.zeros(numActions, dtype=np.int32))\n oneHot[action] = 1\n return oneHot\n\ndef saveResults(indexStr, rewards, lengths, times, evalRewards, evalLengths, model):\n np.savez(os.path.join(resultsPath, indexStr+'results.npz'), rewards=rewards, lengths=lengths, times=times, evalRewards=evalRewards, evalLengths=evalLengths)\n torch.save(model.module.state_dict(), os.path.join(resultsPath, 'model.pth'))\n\n\n\nif __name__ == \"__main__\":\n bareParams = doomParams(0, 0, 0, 0, 1)\n noPriorityParams = doomParams(0)\n noNoisyParams = doomParams(1, 0)\n noDuelingParams = doomParams(1, 1, 0)\n noDoubleParams = doomParams(1, 1, 1, 0)\n noMultiParams = doomParams(1, 1, 1, 1, 1)\n rainbowParams = doomParams()\n\n paramList = [bareParams, rainbowParams, noPriorityParams, noNoisyParams, noDuelingParams, noDoubleParams, noMultiParams]\n\n for index, paramSet in enumerate(paramList):\n torch.cuda.empty_cache() # make sure GPU memory is empty\n gameEnv = DoomGameEnv(paramSet)\n rewards, lengths, times, evalRewards, evalLengths, model = train(gameEnv, paramSet)\n saveResults(str(index), rewards, lengths, times, evalRewards, evalLengths, model)\n\n","sub_path":"MyCode/doomMain.py","file_name":"doomMain.py","file_ext":"py","file_size_in_byte":9702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"77766695","text":"#!/usr/bin/env python\n\"\"\"Test the ais compatibility layer.\"\"\"\n\nimport json\nimport os\nimport re\nimport subprocess\nimport unittest\n\nimport ais.compatibility.gpsd\nimport ais.stream\nimport six\n\n\nknown_bad = set((\n 'addressed',\n 'app_id',\n 'data',\n 'eta', # TODO(schwehr): Fix this.\n 'radio',\n 'regional',\n 'reserved',\n 'structured',\n))\nprecision = 5.0\n\nknown_problems = {\n 2: set(('turn', 'status_text')),\n 9: set(['speed']),\n 15: set(['mmsi2']),\n 17: set(('lat', 'lon')),\n 20: set((\n 'increment3', 'number3', 'offset3', 'timeout3',\n 'increment4', 'number4', 'offset4', 'timeout4',\n )),\n 27: set(['status']),\n}\n\n\ndef HaveGpsdecode():\n \"\"\"Return true if the gpsdecode binary is on the path or false if not.\"\"\"\n try:\n subprocess.check_call(['gpsdecode', '-V'])\n return True\n except OSError:\n return False\n\n\ndef TextToNumber(s):\n try:\n return float(s)\n except (TypeError, ValueError):\n return s\n\n\ndef IsNumber(value):\n if isinstance(value, float):\n return True\n if isinstance(value, six.integer_types):\n return True\n return False\n\n\ndef DictDiff(a, b):\n def Compare(x, y):\n if x == y:\n return True\n x = TextToNumber(x)\n y = TextToNumber(y)\n if isinstance(x, six.string_types) and isinstance(y, six.string_types):\n # Collapse strings to just lower case a-z to avoid simple mismatches.\n new_x = re.sub(r'[^a-z]', r'', six.text_type(x).lower())\n new_y = re.sub(r'[^a-z]', r'', six.text_type(y).lower())\n if new_x == new_y:\n return True\n if IsNumber(x) and IsNumber(y):\n if abs(float(x) - float(y)) < precision:\n return True\n return False\n\n # TODO(redhog): Use sets and make this easier to follow.\n return {\n 'removed': {key: a[key] for key in a\n if key not in b and key not in known_bad},\n 'changed': {key: (a[key], b[key]) for key in a\n if key in b\n and key not in known_bad\n and not Compare(a[key], b[key])},\n 'added': {key: b[key] for key in b\n if key not in a and key not in known_bad}\n }\n\n\nclass GPSDCompatibility(unittest.TestCase):\n\n def setUp(self):\n self.dir = os.path.split(__file__)[0]\n self.nmea = os.path.join(self.dir, 'typeexamples.nmea')\n self.json = os.path.join(self.dir, 'typeexamples.gpsdecode.json')\n\n subprocess.check_call('gpsdecode < %s > %s' % (self.nmea, self.json),\n shell=True)\n\n def tearDown(self):\n os.unlink(self.json)\n\n @unittest.skipIf(not HaveGpsdecode(), 'gpsdecode not in the path')\n def testAll(self):\n def Gpsd():\n with open(self.json) as f:\n for msg in f:\n yield json.loads(msg)\n\n def Libais():\n with open(os.path.join(self.dir, 'typeexamples.nmea')) as f:\n for msg in ais.stream.decode(f):\n yield ais.compatibility.gpsd.mangle(msg)\n\n g = iter(Gpsd())\n a = iter(Libais())\n\n try:\n while True:\n gmsg = six.advance_iterator(g)\n amsg = six.advance_iterator(a)\n while amsg['type'] != gmsg['type']:\n amsg = six.advance_iterator(a)\n\n if gmsg['type'] in known_problems:\n for key in known_problems[gmsg['type']]:\n if key in gmsg: del gmsg[key]\n if key in amsg: del amsg[key]\n\n diff = DictDiff(gmsg, amsg)\n self.assertFalse(diff['changed'])\n self.assertFalse(\n diff['removed'],\n 'Removed not empty: %s\\n %s\\n %s' % (\n diff['removed'],\n amsg,\n gmsg))\n\n except StopIteration:\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_compatibility_gpsd.py","file_name":"test_compatibility_gpsd.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"435999852","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 31 08:31:37 2018\r\n\r\n@author: Duncan\r\n\r\nMCMC Implementation Using Metropolis Hastings algorithm, for INF alpha and IFN\r\nbeta model. This code is not generalizeable at the moment. I intend to eventually\r\ndo this, but for now it is pretty much only useful to me.\r\n\"\"\"\r\nimport os\r\nscript_dir = os.path.dirname(__file__)\r\nresults_dir = os.path.join(script_dir, 'MCMC_Results/')\r\nchain_results_dir = results_dir+'Chain_Results/'\r\n\r\n\r\nif not os.path.isdir(results_dir):\r\n os.makedirs(results_dir)\r\nif not os.path.isdir(chain_results_dir):\r\n os.makedirs(chain_results_dir)\r\n \r\nimport Experimental_Data as ED\r\n# Global data import since this script will be used exclusively on IFN data \r\nIFN_exps = [ED.data.loc[(ED.data.loc[:,'Dose (pM)']==10) & (ED.data.loc[:,'Interferon']==\"Alpha\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==10) & (ED.data.loc[:,'Interferon']==\"Beta\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==90) & (ED.data.loc[:,'Interferon']==\"Alpha\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==90) & (ED.data.loc[:,'Interferon']==\"Beta\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==600) & (ED.data.loc[:,'Interferon']==\"Alpha\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==600) & (ED.data.loc[:,'Interferon']==\"Beta\"),['0','5','15','30','60']].values[0]]\r\n\r\nIFN_sigmas =[ED.data.loc[(ED.data.loc[:,'Dose (pM)']==10) & (ED.data.loc[:,'Interferon']==\"Alpha_std\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==10) & (ED.data.loc[:,'Interferon']==\"Beta_std\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==90) & (ED.data.loc[:,'Interferon']==\"Alpha_std\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==90) & (ED.data.loc[:,'Interferon']==\"Beta_std\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==600) & (ED.data.loc[:,'Interferon']==\"Alpha_std\"),['0','5','15','30','60']].values[0],\r\n ED.data.loc[(ED.data.loc[:,'Dose (pM)']==600) & (ED.data.loc[:,'Interferon']==\"Beta_std\"),['0','5','15','30','60']].values[0]]\r\n\r\n\r\nimport numpy as np\r\nfrom scipy.optimize import minimize\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns; \r\nsns.set(color_codes=True)\r\nsns.set_style(\"darkgrid\")\r\nimport pandas as pd\r\n\r\nfrom pysb.export import export\r\nfrom multiprocessing import Process, Queue, JoinableQueue, cpu_count\r\nimport itertools\r\nimport time\r\n\r\n# =============================================================================\r\n# Takes an initial condition and generates nChains randomized initial conditions\r\n# from prior distributions. This is not generalized, it is designed\r\n# specifically for IFN alpha and IFN beta model.\r\n# Inputs:\r\n# theta_0 (list) = the initial parameter vector, defining priors to draw from\r\n# priors_dict={'variable_name':[minval,maxval,logmean,logstd]}\r\n# nChains (int) = the number of unique points to start from (ie. number of chains)\r\n# Returns:\r\n# theta_list (list) = list of parameter vectors \r\n# =============================================================================\r\ndef disperse_chains(theta_0, priors_dict, nChains):\r\n theta_list=[]\r\n for j in range(nChains):\r\n new_theta=[]\r\n for parameter in theta_0:\r\n if parameter[0] not in priors_dict.keys():#Can't disperse variables without priors\r\n new_theta.append(parameter)\r\n elif priors_dict[parameter[0]][2]!=None:\r\n new_theta.append([parameter[0],\r\n np.random.lognormal(mean=priors_dict[parameter[0]][2], \r\n sigma=priors_dict[parameter[0]][3]),\r\n parameter[2],parameter[3]])\r\n else:\r\n new_theta.append([parameter[0],\r\n np.random.uniform(low=priors_dict[parameter[0]][0], \r\n high=priors_dict[parameter[0]][1]),\r\n parameter[2],parameter[3]]) \r\n theta_list.append(new_theta)\r\n return theta_list\r\n\r\n\r\n# =============================================================================\r\n# get_prior_logp() scores the variables against the prior distribution given \r\n# variables = [['name',value],['name',value]....]\r\n# priors_dict={'variable_name':[minval,maxval,logmean,logstd]}\r\n# =============================================================================\r\ndef get_prior_logp(variables, priors_dict):\r\n # Check bounds on parameters\r\n for variable in variables:\r\n # Make an exception for variables not in priors_dict (usually kd3, k_d3)\r\n name=variable[0]\r\n val=variable[1]\r\n if name in priors_dict.keys(): \r\n if valpriors_dict[name][1]:\r\n return 1E8\r\n # Otherwise calculate log probability of parameter values\r\n logp = 0\r\n for i in range(len(variables)):\r\n name=variables[i][0] \r\n val=variables[i][1] \r\n if name in priors_dict.keys() and priors_dict[name][2]!=None:\r\n logp += ((np.log(val)-priors_dict[name][2])/priors_dict[name][3])**2 \r\n return logp\r\n \r\n# =============================================================================\r\n# logp_helper() is a target function for scipy optimization of scale factor gamma\r\n# =============================================================================\r\ndef logp_helper(gamma, alpha_parameters, beta_parameters, I_index_Alpha, I_index_Beta):\r\n # import models\r\n import ODE_system_alpha\r\n alpha_mod = ODE_system_alpha.Model()\r\n import ODE_system_beta\r\n beta_mod = ODE_system_beta.Model()\r\n # set constants\r\n NA = 6.022E23\r\n volEC = 1E-5 \r\n t=[0,5*60,15*60,30*60,60*60]\r\n all_sims=[] \r\n # run simulations under experimental conditions\r\n alpha_parameters[I_index_Alpha] = NA*volEC*10E-12\r\n (_, sim) = alpha_mod.simulate(t, param_values=alpha_parameters)\r\n all_sims.append(sim['TotalpSTAT'])\r\n beta_parameters[I_index_Beta] = NA*volEC*10E-12\r\n (_, sim) = beta_mod.simulate(t, param_values=beta_parameters)\r\n all_sims.append(sim['TotalpSTAT'])\r\n \r\n alpha_parameters[I_index_Alpha] = NA*volEC*90E-12\r\n (_, sim) = alpha_mod.simulate(t, param_values=alpha_parameters)\r\n all_sims.append(sim['TotalpSTAT'])\r\n beta_parameters[I_index_Beta] = NA*volEC*90E-12\r\n (_, sim) = beta_mod.simulate(t, param_values=beta_parameters)\r\n all_sims.append(sim['TotalpSTAT'])\r\n\r\n alpha_parameters[I_index_Alpha] = NA*volEC*600E-12\r\n (_, sim) = alpha_mod.simulate(t, param_values=alpha_parameters)\r\n all_sims.append(sim['TotalpSTAT'])\r\n beta_parameters[I_index_Beta] = NA*volEC*600E-12\r\n (_, sim) = beta_mod.simulate(t, param_values=beta_parameters)\r\n all_sims.append(sim['TotalpSTAT'])\r\n logp = 0\r\n for i in range(len(all_sims)):\r\n logp += np.sum(np.square(np.divide(np.subtract(np.multiply(gamma,all_sims[i]),IFN_exps[i]),IFN_sigmas[i])))\r\n return logp\r\n# =============================================================================\r\n# get_likelihood_logp() is designed specifically for IFN alpha and IFN beta model \r\n# least-squares likelihood with data used in paper\r\n# =============================================================================\r\ndef get_likelihood_logp(fit_list):\r\n # import models\r\n import ODE_system_alpha\r\n alpha_mod = ODE_system_alpha.Model()\r\n import ODE_system_beta\r\n beta_mod = ODE_system_beta.Model()\r\n # Build parameter lists\r\n if 'kd4' in [el[0] for el in fit_list]:\r\n kd4Index=[el[0] for el in fit_list].index('kd4')\r\n q1 = 3.321155762205247e-14/1\r\n q2 = 4.98173364330787e-13/0.015\r\n q4 = 3.623188E-4/fit_list[kd4Index][1]\r\n q3 = q2*q4/q1\r\n kd3 = 3.623188E-4/q3 \r\n fit_list.insert(kd4Index+1,['kd3',kd3]) \r\n if 'k_d4' in [el[0] for el in fit_list]:\r\n k_d4Index = [el[0] for el in fit_list].index('k_d4')\r\n q_1 = 4.98E-14/0.03\r\n q_2 = 8.30e-13/0.002\r\n q_4 = 3.623188e-4/fit_list[k_d4Index][1]\r\n q_3 = q_2*q_4/q_1\r\n k_d3 = 3.623188e-4/q_3\r\n fit_list.insert(k_d4Index+1,['k_d3',k_d3]) \r\n alpha_parameters=[]\r\n beta_parameters=[]\r\n for p in alpha_mod.parameters:\r\n isInList=False\r\n for y in fit_list:\r\n if p[0]==y[0]:\r\n alpha_parameters.append(y[1])\r\n isInList=True\r\n break\r\n if isInList==False:\r\n alpha_parameters.append(p.value)\r\n for p in beta_mod.parameters:\r\n isInList=False\r\n for y in fit_list:\r\n if p[0]==y[0]:\r\n beta_parameters.append(y[1])\r\n isInList=True\r\n break\r\n if isInList==False:\r\n beta_parameters.append(p.value)\r\n I_index_Alpha = [el[0] for el in alpha_mod.parameters].index('I')\r\n I_index_Beta = [el[0] for el in beta_mod.parameters].index('I')\r\n # optimize choice of scale factor gamma:\r\n # set initial guess for gamma at 40 to ensure we get the largest local minimum in the optimization\r\n opt = minimize(logp_helper,[40],args=(alpha_parameters, beta_parameters, I_index_Alpha, I_index_Beta))\r\n gamma = opt['x'].item()\r\n logp = opt['fun']\r\n return [logp, gamma]\r\n \r\n# =============================================================================\r\n# variables (list) = list of variables in the form [['name',value],...]\r\n# priors_dict (dict) = dictionary defining priors for variables \r\n# beta, rho (floats) = values of beta and rho used in simulation\r\n# Returns -log(probability of model)\r\n# =============================================================================\r\ndef score_model(variables, priors_dict, beta, rho, debugging=False):\r\n meanR=2E3\r\n delR=0\r\n remove_meanR=False\r\n remove_delR=False\r\n for el in variables:\r\n if el[0]=='meanR':\r\n meanR=el[1]\r\n meanRIndex=[el[0] for el in variables].index('meanR')\r\n remove_meanR=True \r\n elif el[0]=='delR':\r\n delR=el[1]\r\n delRIndex=[el[0] for el in variables].index('delR')\r\n remove_delR=True\r\n R1=meanR-delR/2\r\n R2=meanR+delR/2\r\n if remove_meanR==True and remove_delR==True:\r\n del variables[min(meanRIndex,delRIndex)]\r\n del variables[max(meanRIndex,delRIndex)-1]\r\n variables.insert(meanRIndex,['R1',R1]) \r\n variables.insert(meanRIndex,['R2',R2])\r\n elif remove_meanR==True:\r\n del variables[meanRIndex]\r\n variables.insert(meanRIndex,['R2',R2])\r\n variables.insert(meanRIndex,['R1',R1])\r\n elif remove_delR==True:\r\n del variables[delRIndex]\r\n variables.insert(delRIndex,['R2',R2])\r\n variables.insert(delRIndex,['R1',R1])\r\n \r\n [lk,gamma] = get_likelihood_logp(variables)\r\n pr = get_prior_logp(variables, priors_dict)\r\n if debugging==True:\r\n print(str(lk)+\", \"+str(pr))\r\n return [(lk/rho+pr)/beta, gamma]\r\n\r\n# =============================================================================\r\n# J() is the jumping distribution\r\n# Inputs:\r\n# theta (list) = elements of the form ['name', current value, std dev for jumps,'distribution']\r\n# Jumping distributions:\r\n# log - used for reaction rates; lognormal distributed jump lengths\r\n# linear - used for concentrations/species counts; normally distributed jump lengths \r\n# uniform - used for data scale factor, for example; restricted uniform distributed jump lengths\r\n# Returns:\r\n# new_theta (list) = same form as theta but with new values stored in second position of subarray \r\n# =============================================================================\r\ndef J(theta):\r\n new_theta=[]\r\n for parameter in theta:\r\n if parameter[3]=='log': # lognormal random walk\r\n new_theta.append([parameter[0],\r\n np.random.lognormal(mean=np.log(parameter[1]), sigma=parameter[2]),\r\n parameter[2],parameter[3]])\r\n elif parameter[3]=='linear': # normal random walk (restricted by prior to avoid unphysical values)\r\n new_theta.append([parameter[0],\r\n np.random.normal(loc=parameter[1], scale=parameter[2]),\r\n parameter[2],parameter[3]])\r\n elif parameter[3]=='uniform': # restricted uniform distributed random walk\r\n new_theta.append([parameter[0],\r\n np.random.uniform(low=parameter[2], high=min(parameter[1]*1.4,parameter[4])),\r\n parameter[2],parameter[3],parameter[4]])\r\n return new_theta\r\n\r\n# =============================================================================\r\n# get_acceptance_rate() performs 100 samples to estimate the acceptance rate with \r\n# current hyperparameters. Returns the acceptance rate as a percentage (eg. 24) \r\n# and the theta with variances that were good enough \r\n# =============================================================================\r\ndef get_acceptance_rate(theta, priors_dict, beta, rho):\r\n old_theta=theta\r\n [old_score, old_gamma] = score_model([[old_theta[j][0],old_theta[j][1]] for j in range(len(old_theta))], priors_dict, beta, rho)\r\n asymmetric_indices = [el[0] for el in enumerate(old_theta) if el[1][3]=='log']\r\n acceptance = 0 \r\n for i in range(100):\r\n proposal = J(old_theta)\r\n [new_score, new_gamma] = score_model([[proposal[j][0],proposal[j][1]] for j in range(len(proposal))], priors_dict, beta, rho)\r\n asymmetry_factor = 1 # log normal proposal distributions are asymmetric\r\n for j in asymmetric_indices:\r\n asymmetry_factor *= proposal[j][1]/old_theta[j][1]\r\n if new_score < old_score or np.random.rand() < np.exp(-(new_score-old_score))*asymmetry_factor:\r\n # if rand() < probability of proposed/probability of old\r\n old_theta=proposal\r\n old_score = new_score\r\n old_gamma = new_gamma\r\n acceptance += 1\r\n return (acceptance, old_theta, old_gamma) # = acceptance/100*100\r\n\r\n# =============================================================================\r\n# hyperparameter_fitting() attempts to alter the input temperature\r\n# to achieve a good acceptance rate during simulation\r\n# Inputs:\r\n# theta_0 and beta - see MCMC() documentation\r\n# max_attempts (int) = the max number of attempts to get a good acceptance rate\r\n# =============================================================================\r\ndef hyperparameter_fitting(theta_0, priors_dict, beta, rho, max_attempts):\r\n print(\"Choosing optimal temperature\")\r\n theta = [el for el in theta_0]\r\n if max_attempts==0:\r\n return (theta_0, beta)\r\n # Try to find variances that give an good acceptance rate\r\n for attempt in range(max_attempts):\r\n print(\"Attempt {}\".format(attempt+1))\r\n acceptance, new_theta, new_gamma = get_acceptance_rate(theta, priors_dict, beta, rho)\r\n \r\n if acceptance >= 15 and acceptance <= 50:\r\n print(\"Acceptance rate was {}%\".format(acceptance))\r\n print(\"New temperature will be: \"+str(beta))\r\n return (new_theta, beta)\r\n else:\r\n if acceptance < 15:\r\n print(\"Acceptance rate was too low: {}%\".format(acceptance))\r\n beta = 2*beta\r\n if acceptance > 50:\r\n print(\"Acceptance rate was too high: {}%\".format(acceptance))\r\n beta = 0.75*beta\r\n raise RuntimeError(\" Failed to optimize hyperparameters.\\n\\\r\n Please initialise with different variances or temperatures,\\n\\\r\n or check uniform prior ranges, and try again.\")\r\n \r\n# =============================================================================\r\n# plot_parameter_distributions() creates a kde plot for each parameter\r\n# Inputs:\r\n# df (DataFrame or list of DataFrames) = the chain or chains that were simulated\r\n# title (string) = default is 'parameter_distributions.pdf'\r\n# save (Boolean) = whether or not to save the plot (default is True)\r\n# =============================================================================\r\ndef plot_parameter_distributions(df, title='parameter_distributions.pdf', save=True):\r\n # different plot asthetics if multiple chains vs one chain\r\n if type(df)==list:\r\n k = len(df[0].columns) # total number subplots\r\n n = 2 # number of chart columns\r\n m = (k - 1) // n + 1 # number of chart rows\r\n fig, axes = plt.subplots(m, n, figsize=(n * 5, m * 3))\r\n if k % 2 == 1: # avoids extra empty subplot\r\n axes[-1][n-1].set_axis_off()\r\n palette = itertools.cycle(sns.color_palette(\"GnBu_d\", 10)) # make chain colours nice\r\n for j in range(len(df)):\r\n for i, (name, col) in enumerate(df[j].iteritems()):\r\n color_code = next(palette)\r\n r, c = i // n, i % n\r\n ax = axes[r, c] # get axis object\r\n # determine whether or not to plot on log axis\r\n try:\r\n if abs(int(np.log10(np.max(col)))-int(np.log10(np.min(col)))) >= 4:\r\n ax.set(xscale='log', yscale='linear')\r\n except ValueError or OverflowError:\r\n print('Some parameters were negative-valued or infinity')\r\n # Plot histogram with kde for chain\r\n sns.distplot(col, ax=ax, hist=False, kde=True, \r\n color = color_code, \r\n kde_kws={'linewidth': 4})\r\n fig.tight_layout() \r\n if save==True:\r\n if title=='':\r\n plt.savefig(results_dir+'parameter_distributions.pdf')\r\n else:\r\n plt.savefig(results_dir+title+'.pdf')\r\n return [fig, axes]\r\n\r\n else:\r\n k = len(df.columns) # total number subplots\r\n n = 2 # number of chart columns\r\n m = (k - 1) // n + 1 # number of chart rows\r\n fig, axes = plt.subplots(m, n, figsize=(n * 5, m * 3))\r\n if k % 2 == 1: # avoids extra empty subplot\r\n axes[-1][n-1].set_axis_off()\r\n for i, (name, col) in enumerate(df.iteritems()):\r\n r, c = i // n, i % n\r\n ax = axes[r, c] # get axis object\r\n # determine whether or not to plot on log axis\r\n if abs(int(np.log10(np.max(col)))-int(np.log10(np.min(col)))) >= 4:\r\n ax.set(xscale='log', yscale='linear')\r\n # Plot histogram with kde for chain\r\n sns.distplot(col, ax=ax, hist=True, kde=True, \r\n color = 'darkblue', \r\n hist_kws={'edgecolor':'black'},\r\n kde_kws={'linewidth': 4})\r\n fig.tight_layout() \r\n if save==True:\r\n if title=='':\r\n plt.savefig(results_dir+'parameter_distributions.pdf')\r\n else:\r\n plt.savefig(results_dir+title+'.pdf')\r\n return (fig, axes)\r\n# =============================================================================\r\n# Computes the Gelman-Rubin statistic for each parameter to test for convergence\r\n# Inputs: chain_record (list of Dataframes) = list of Markov chains\r\n# Outputs: Gelman-Rubin_Statistics.csv file summarizing the test results\r\n# Returns: stats_list (list) = summary of test results, [['variable name', Rhat], ...] \r\n# =============================================================================\r\ndef gelman_rubin_convergence(chain_record):\r\n numChains = len(chain_record)\r\n column_names = list(chain_record[0].columns.values)\r\n stats_list = []\r\n n = min([chain_record[i].shape[0] for i in range(len(chain_record))])\r\n for variable in column_names:\r\n chain_mean = [chain_record[i][variable].mean() for i in range(numChains)]\r\n W = np.mean([(chain_record[i][variable].std())**2 for i in range(numChains)])\r\n B = chain_record[0].shape[0]*np.std(chain_mean, ddof=1)**2\r\n Var = (1-1/n)*W+B/n\r\n Rhat = np.sqrt(Var/W)\r\n stats_list.append([variable, Rhat])\r\n df = pd.DataFrame.from_records(stats_list, columns=['variable','GR Statistic'])\r\n df.to_csv(results_dir+'Gelman-Rubin_Statistics.csv')\r\n return stats_list\r\n\r\n# =============================================================================\r\n# get_parameter_distributions() takes a chain or list of chains and performs\r\n# data formatting such as burn in, thinning, kde plots for each parameter,\r\n# and statistical summaries\r\n# Inputs:\r\n# pooled_results (list) = list of Markov chains from MCMC\r\n# burn_rate (float in [0,1)) = the fraction of each chain to discard as burn in\r\n# down_sample (int) = the stride length for using draws from the chain\r\n# Outputs:\r\n# kde plot of each parameter's posterior distribution, saved as parameter_distributions.pdf\r\n# posterior_samples.csv = a file containing all the independent posterior samples\r\n# Returns:\r\n# the combined chains' posterior samples\r\n# =============================================================================\r\ndef get_parameter_distributions(pooled_results, burn_rate, down_sample):\r\n sns.palplot(sns.color_palette(\"GnBu_d\"))\r\n chain_record=[] # for plotting chain histograms\r\n total_record=[] # to save all results to allow reanalysis\r\n chain_Lengths=[] # list of ints, each a chain's length; used in reanalysis\r\n GR_record = [] # record of chains after burn-in but before thinning, used in GR diagnostic\r\n for i in pooled_results:\r\n total_record+=i\r\n chain_Lengths.append(len(i))\r\n complete_samples = pd.DataFrame([[el[1] for el in r] for r in total_record],\r\n columns=[l[0] for l in total_record[0]])\r\n complete_samples.to_csv(results_dir+\"complete_samples.csv\")\r\n with open(results_dir+'chain_lengths.txt', 'w') as f:\r\n f.write(str(chain_Lengths)[1:-1])\r\n first=True\r\n for chain in range(len(pooled_results)): #iterate over the chains\r\n model_record = pooled_results[chain] #current chain of interest\r\n # Build dataframe\r\n # Account for burn in and down sampling\r\n sample_record = model_record[int(len(model_record)*burn_rate):-1:down_sample]\r\n GR_list = model_record[int(len(model_record)*burn_rate):-1]\r\n\r\n GR_record.append(pd.DataFrame([[el[1] for el in r] for r in GR_list],\r\n columns=[l[0] for l in GR_list[0]]))\r\n if first==True:\r\n combined_samples = pd.DataFrame([[el[1] for el in r] for r in sample_record],\r\n columns=[l[0] for l in sample_record[0]])\r\n first=False\r\n else: # otherwise add to existing DataFrame\r\n combined_samples.append(pd.DataFrame([[el[1] for el in r] for r in sample_record],\r\n columns=[l[0] for l in sample_record[0]]))\r\n if len(pooled_results)==1: #If there was only one chain, pass DataFrame to plot function\r\n # Plot parameter distributions\r\n plot_parameter_distributions(pd.DataFrame([[el[1] for el in r] for r in sample_record],\r\n columns=[l[0] for l in sample_record[0]]))\r\n else: # add DataFrame to list of prepared chains to plot later\r\n chain_record.append(pd.DataFrame([[el[1] for el in r] for r in sample_record],\r\n columns=[l[0] for l in sample_record[0]]))\r\n # Plot multiple chains on same axes if there were multiple chains\r\n if len(pooled_results)>1:\r\n plot_parameter_distributions(chain_record)\r\n gelman_rubin_convergence(GR_record) #Pretty sure I should use full chains for GR-diagnostic\r\n \r\n # Save combined chains dataframe\r\n combined_samples.to_csv(results_dir+\"posterior_samples.csv\") \r\n print(\"After thinning and burn-in, you sampled {} times from the posterior distribution\".format(len(combined_samples))) \r\n # Return the downsampled data frame\r\n return combined_samples\r\n\r\n# =============================================================================\r\n# plot_parameter_autocorrelations() plots the autocorrelation of each parameter\r\n# from a given chain, to check that downsampling was sufficient to create\r\n# independent samples from the posterior\r\n# Inputs:\r\n# df (DataFrame) = the downsampled draws from posterior, typically the object \r\n# returned from get_parameter_distributions()\r\n# Outputs:\r\n# chain_autocorrelation.pdf (file) = the plots for the input chain, saved as a pdf\r\n# Returns: nothing \r\n# =============================================================================\r\ndef plot_parameter_autocorrelations(df):\r\n k = len(df.columns) # total number subplots\r\n n = 2 # number of chart columns\r\n m = (k - 1) // n + 1 # number of chart rows\r\n fig, axes = plt.subplots(m, n, figsize=(n * 5, m * 3))\r\n if k % 2 == 1: # avoids extra empty subplot\r\n axes[-1][n-1].set_axis_off()\r\n for i, (name, col) in enumerate(df.iteritems()):\r\n r, c = i // n, i % n\r\n ax = axes[r, c] # get axis object\r\n ax.set_title(name)\r\n corrLen=30\r\n if len(col)<30: corrLen=len(col)-1\r\n ax.acorr(col, maxlags=corrLen)\r\n ax.set_xlabel('Lag')\r\n ax.set_ylabel('Autocorrelation')\r\n fig.tight_layout() \r\n plt.savefig(results_dir+'chain_autocorrelation.pdf')\r\n \r\n# =============================================================================\r\n# get_summary_statistics() computes the summary statistics for each parameter \r\n# from a chain, and writes this summary to a csv file titled parameter_summary.csv\r\n# Inputs:\r\n# df (DataFrame) = the downsampled draws from posterior, typically the object \r\n# returned from get_parameter_distributions()\r\n# Returns: nothing\r\n# =============================================================================\r\ndef get_summary_statistics(df):\r\n headers = ['name', 'mean', 'std dev', '2.5%', '25%', '75%', '97.5%']\r\n summary=[]\r\n for (name, col) in df.iteritems():\r\n summary.append([name, np.mean(col), np.std(col), np.percentile(col, 2.5), np.percentile(col, 25), np.percentile(col, 75), np.percentile(col, 97.5)])\r\n summary_df = pd.DataFrame.from_records(summary,columns=headers)\r\n summary_df.to_csv(results_dir+\"parameter_summary.csv\")\r\n\r\n\r\n# =============================================================================\r\n# check_proposals() allows the user to get a sense of what the random walk\r\n# for each parameter looks like with the given values\r\n# Inputs:\r\n# theta (list) = the input parameter vector\r\n# n (int) = the number of steps to take in the sample random walk\r\n# =============================================================================\r\ndef check_proposals(theta, n):\r\n walk=[theta]\r\n for j in range(n):\r\n walk.append(J(walk[j]))\r\n rearrange = [[walk[i][j][1] for i in range(len(walk))] for j in range(len(theta))]\r\n \r\n k = len(theta) # total number subplots\r\n n = 2 # number of chart columns\r\n m = (k - 1) // n + 1 # number of chart rows\r\n fig, axes = plt.subplots(m, n, figsize=(n * 5, m * 3))\r\n if k % 2 == 1: # avoids extra empty subplot\r\n axes[-1][n-1].set_axis_off()\r\n for ind, w in enumerate(rearrange):\r\n r, c = ind // n, ind % n\r\n ax = axes[r, c] # get axis object\r\n if theta[r*n+c][3]=='log':\r\n ax.set(xscale='linear',yscale='log')\r\n ax.plot(range(len(w)), w) \r\n ax.set_title(theta[r*n+c][0])\r\n plt.savefig(results_dir+'typical_priors_rw.pdf')\r\n\r\n# =============================================================================\r\n# Sanity checks for MCMC() \r\n# =============================================================================\r\ndef mcmcChecks(n, theta_0, beta, rho, chains, burn_rate, down_sample, max_attempts):\r\n if burn_rate>1:\r\n raise ValueError('Burn rate should be in the range [0,1)')\r\n if down_sample > n:\r\n raise ValueError('Cannot thin more than there are samples')\r\n if type(theta_0[0][0])!=str or type(theta_0[0][3])!=str:\r\n raise ValueError('Order of inputs for theta_0 seems incorrect')\r\n if type(chains) != int:\r\n raise ValueError('Type for input \"chains\" must be int')\r\n if max_attempts < 0:\r\n raise ValueError('max_attempts must be positive')\r\n lenPost = int(np.floor([chains*(n+1)*(1-burn_rate)/down_sample])[0])\r\n print(\"It's estimated this simulation will produce {} posterior samples.\".format(lenPost))\r\n return True\r\n\r\n# =============================================================================\r\n# MCMC() takes an IFN model and fits it using Markov Chain Monte Carlo\r\n# Inputs: \r\n# n (int) = number of samples to collect per chain\r\n# theta_0 (list) = the initial guesses and jumping distribution definitions for each parameter to fit\r\n# Order of theta_0 is [kpa, kSOCSon, kd4, k_d4, R1, R2] \r\n# eg. [['kpa',1E-6,0.2,'log'],['R2',2E3,250,'linear'],['kSOCSon',4,2,'uniform',40]]\r\n# priors_dict (dict) = dictionary defining priors for variables\r\n# priors_dict={'variable_name':[minval,maxval,logmean,logstd]} \r\n# beta (float) = effectively temperature, this factor controls the \r\n# tolerance of the probabilistic parameter search\r\n# rho (float) = the scale factor for Bayesian cost, to make priors more important\r\n# chains (int) = number of unique Markov chains to simulate \r\n# Optional Inputs: \r\n# burn_rate (float) = initial fraction of samples to discard as 'burn in'\r\n# default is to discard the first 10% \r\n# down_sample (int) = step size for down sampling to reduce autocorrelation\r\n# default is 1 (no down sampling unless user specifies) \r\n# max_attempts (int) = the number of attempts to try and choose hyperparameters\r\n# default is 6 \r\n# pflag (Boolean) = plot a typical random walk for each parameter after hyperparameter selection \r\n# =============================================================================\r\ndef mh(ID, jobs, result, countQ):\r\n while True:\r\n mGet = jobs.get()\r\n if mGet is None:\r\n break\r\n hyper_theta, beta, rho, n, priors_dict = mGet\r\n model_record=[hyper_theta]\r\n asymmetric_indices = [el[0] for el in enumerate(hyper_theta) if el[1][3]=='log']\r\n [old_score, old_gamma] = score_model([[model_record[0][j][0],model_record[0][j][1]] for j in range(len(model_record[0]))], priors_dict, beta, rho)\r\n gamma_list=[old_gamma]\r\n old_index = 0\r\n acceptance = 0\r\n attempts = 0\r\n # Metropolis-Hastings algorithm\r\n progress_bar = n/10\r\n while acceptanceprogress_bar:\r\n print(\"{:.1f}% done\".format(acceptance/n*100))\r\n print(\"Chain {} acceptance rate = {:.1f}%\".format(ID, acceptance/attempts*100))\r\n with open(results_dir+'progress.txt','a') as f:\r\n f.write(\"Chain {} is {:.1f}% done, currently averaging {:.1f}% acceptance.\\n\".format(ID, acceptance/n*100,acceptance/attempts*100))\r\n with open(chain_results_dir+str(ID)+'chain.txt','w') as g:\r\n temp_record = []\r\n for i in range(len(model_record)):\r\n temp_record.append(model_record[i]+[['gamma',gamma_list[i]]])\r\n g.write(str(temp_record))\r\n progress_bar += n/10 \r\n proposal = J(model_record[old_index])\r\n [new_score, new_gamma] = score_model([[proposal[j][0],proposal[j][1]] for j in range(len(proposal))], priors_dict, beta, rho)\r\n asymmetry_factor = 1 # log normal proposal distributions are asymmetric\r\n for j in asymmetric_indices:\r\n asymmetry_factor *= proposal[j][1]/model_record[old_index][j][1]\r\n if new_score < old_score or np.random.rand() < np.exp(-(new_score-old_score))*asymmetry_factor:\r\n model_record.append(proposal)\r\n gamma_list.append(new_gamma) \r\n old_score = new_score\r\n old_index += 1\r\n acceptance += 1\r\n for i in range(len(model_record)):\r\n model_record[i].append(['gamma',gamma_list[i]])\r\n result.put(model_record)\r\n countQ.put([ID,acceptance/attempts*100])\r\n\r\ndef MCMC(n, theta_0, priors_dict, beta, rho, chains, burn_rate=0.1, down_sample=1, max_attempts=6, \r\n pflag=True, cpu=None, randomize=True):\r\n # Check input parameters\r\n mcmcChecks(n, theta_0, beta, rho, chains, burn_rate, down_sample, max_attempts)\r\n print(\"Performing MCMC Analysis\")\r\n # Selecting optimal temperature\r\n hyper_theta, beta = hyperparameter_fitting(theta_0, priors_dict, beta, rho, max_attempts)\r\n if pflag==True:\r\n check_proposals(hyper_theta, 50)\r\n # Overdisperse chains\r\n if randomize==True:\r\n print(\"Dispersing chains\")\r\n if chains > 1:\r\n chains_list = disperse_chains(hyper_theta, priors_dict, chains)\r\n else:\r\n chains_list = [hyper_theta]\r\n else:\r\n chains_list = [hyper_theta for i in range(chains)]\r\n # Sample using MCMC \r\n print(\"Sampling from posterior distribution\") \r\n if chains >= cpu_count():\r\n NUMBER_OF_PROCESSES = cpu_count()-1\r\n else:\r\n NUMBER_OF_PROCESSES = chains\r\n if cpu != None: NUMBER_OF_PROCESSES = cpu # Manual override of core number selection\r\n print(\"Using {} threads\".format(NUMBER_OF_PROCESSES))\r\n with open(results_dir+'progress.txt','w') as f: # clear previous progress report\r\n f.write('')\r\n jobs = Queue() # put jobs on queue\r\n result = JoinableQueue()\r\n countQ = JoinableQueue()\r\n for m in range(chains):\r\n jobs.put([chains_list[m],beta,rho,n,priors_dict])\r\n [Process(target=mh, args=(i, jobs, result, countQ)).start()\r\n for i in range(NUMBER_OF_PROCESSES)]\r\n # pull in the results from each thread\r\n pool_results=[]\r\n chain_attempts=[]\r\n for m in range(chains):\r\n r = result.get()\r\n pool_results.append(r)\r\n result.task_done()\r\n a = countQ.get()\r\n chain_attempts.append(a)\r\n # tell the workers there are no more jobs\r\n for w in range(NUMBER_OF_PROCESSES):\r\n jobs.put(None)\r\n # close all extra threads\r\n result.join()\r\n jobs.close()\r\n result.close()\r\n countQ.close()\r\n\r\n # Perform data analysis\r\n average_acceptance = np.mean([el[1] for el in chain_attempts])\r\n print(\"Average acceptance rate was {:.1f}%\".format(average_acceptance))\r\n samples = get_parameter_distributions(pool_results, burn_rate, down_sample)\r\n plot_parameter_autocorrelations(samples.drop('gamma',axis=1))\r\n get_summary_statistics(samples.drop('gamma',axis=1))\r\n with open(results_dir+'simulation_summary.txt','w') as f:\r\n f.write('Temperature used was {}\\n'.format(beta))\r\n f.write('Number of chains = {}\\n'.format(chains))\r\n f.write(\"Average acceptance rate was {:.1f}%\\n\".format(average_acceptance))\r\n f.write(\"Initial conditions were\\n\")\r\n for i in chains_list:\r\n f.write(str(i))\r\n f.write(\"\\n\")\r\n\r\n\r\n\r\ndef main():\r\n plt.close('all')\r\n modelfiles = [\"ifnmodels.IFN_detailed_model_alpha_ppCompatible\",\"ifnmodels.IFN_detailed_model_beta_ppCompatible\"]\r\n# Write modelfiles\r\n print(\"Importing models\")\r\n alpha_model = __import__(modelfiles[0],fromlist=['ifnmodels'])\r\n py_output = export(alpha_model.model, 'python')\r\n with open('ODE_system_alpha.py','w') as f:\r\n f.write(py_output)\r\n beta_model = __import__(modelfiles[1],fromlist=['ifnmodels'])\r\n py_output = export(beta_model.model, 'python')\r\n with open('ODE_system_beta.py','w') as f:\r\n f.write(py_output)\r\n \r\n p0=[['kpa',1,0.1,'log'],['kSTATbinding',1E-6,0.4,'log'],\r\n ['kSOCSon',1.70E-6,0.1,'log'],['kd4',0.87,0.2,'log'],\r\n ['k_d4',0.86,0.5,'log'],['delR',-1878,500,'linear'],['meanR',2000,300,'linear'],\r\n ['kloc',1.23E-3,0.1,'log'],['kSOCSmRNA',1E-3,0.1,'log'],['mRNAdeg',5E-4,0.1,'log'],\r\n ['mRNAtrans',1E-3,0.1,'log'],['kSOCS',5E-3,0.1,'log']]\r\n \r\n our_priors_dict={'R1':[100,12000,None,None],'R2':[100,12000,None,None],\r\n 'kpa':[1.5E-8,10,np.log(1),4],'kSOCSon':[1.5E-11,0.07,np.log(1E-6),4],\r\n 'k_d4':[4E-5,0.9,np.log(0.006),1.8],'kd4':[0.002,44,np.log(0.3),1.8],\r\n 'kSTATbinding':[1E-11,1,np.log(1E-6),4],'kloc':[1E-5,10,np.log(1.25E-3),4],\r\n 'kSOCSmRNA':[1E-7,10,np.log(1E-3),4],'mRNAdeg':[5E-8,10,np.log(5E-4),4],\r\n 'mRNAtrans':[1E-7,10,np.log(1E-3),4],'kSOCS':[5E-7,10,np.log(5E-3),4]}\r\n \r\n # (n, theta_0, beta, rho, chains, burn_rate=0.1, down_sample=1, max_attempts=6,\r\n # pflag=True, cpu=None, randomize=True)\r\n MCMC(1000, p0, our_priors_dict, 2, 1, 5, burn_rate=0.2, down_sample=40, max_attempts=6)\r\n\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"Old_Scripts/MCMC_detailed_model.py","file_name":"MCMC_detailed_model.py","file_ext":"py","file_size_in_byte":37723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"534680512","text":"# https://leetcode.com/problems/unique-paths/\r\n\r\nfrom scipy.special import binom\r\n\r\n\r\nm = 7\r\nn = 3\r\n# expected = 28\r\n\r\n# n = 7-1\r\n# k = 3\r\n# print(binom(n+k-1, n))\r\n\r\n\r\ndp = [[0] * m for _ in range(n)]\r\n\r\nfor i in range(n):\r\n dp[i][0] = 1\r\n\r\nfor j in range(m):\r\n dp[0][j] = 1\r\n\r\n\r\nfor i in range(1, n):\r\n for j in range(1, m):\r\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\r\n\r\n\r\nprint(dp)\r\n","sub_path":"062UniquePaths.py","file_name":"062UniquePaths.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"257937239","text":"from ir_measures import measures\nfrom .base import Measure, ParamInfo\n\n\nclass _AP(measures.Measure):\n \"\"\"\n The [Mean] Average Precision ([M]AP). The average precision of a single query is the mean\n of the precision scores at each relevant item returned in a search results list.\n \n AP is typically used for adhoc ranking tasks where getting as many relevant items as possible is. It is commonly referred to as MAP,\n by taking the mean of AP over the query set.\n \n \"\"\"\n __name__ = 'AP'\n NAME = __name__\n SUPPORTED_PARAMS = {\n 'cutoff': measures.ParamInfo(dtype=int, required=False, desc='ranking cutoff threshold'),\n 'rel': measures.ParamInfo(dtype=int, default=1, desc='minimum relevance score to be considered relevant (inclusive)')\n }\n\n\nAP = _AP()\nMAP = AP\nmeasures.register(AP, ['MAP'])\n","sub_path":"ir_measures/measures/ap.py","file_name":"ap.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"485635670","text":"import sys, datetime\n\ntry:\n\tuser = sys.argv[1]\nexcept:\n\tuser = input(\"Kaltura user: \")\nelse:\n\tuser = sys.argv[1]\n \nfrom KalturaClient import *\nfrom KalturaClient.Plugins.Core import *\n\nsys.path.append('../')\nimport private\n\nconfig = KalturaConfiguration()\nconfig.serviceUrl = \"https://api.kaltura.nordu.net/\"\nclient = KalturaClient(config)\nks = client.session.start(\n\tprivate.secret,\n\tprivate.adminuser_id,\n\tKalturaSessionType.ADMIN,\n\tprivate.partner_id)\nclient.setKs(ks)\n\n# Get list\nfilter = KalturaMediaEntryFilter()\nfilter.orderBy = \"-createdAt\" # Newest first\n#filter.orderBy = \"+createdAt\" # Oldest first\nfilter.userIdEqual = user\npager = KalturaFilterPager()\n#pager.pageSize = 500\npager.pageIndex = 1\n\nentrylist = client.media.list(filter, pager)\ntotalcount = entrylist.totalCount\n\nwith open('list-media-by-user.txt', 'w') as f:\n text =\"entry.id\\tcreatedAt\\tentry.name\\tentry.downloadUrl\"\n #print(text)\n f.write(text+\"\\n\")\n\n# Loop\nwith open('list-media-by-user.txt', 'a') as f:\n\tnid = 1\n\twhile nid < totalcount :\n\t\tentrylist = client.media.list(filter, pager)\n\t\tfor entry in entrylist.objects:\n\t\t\tcreatedat = datetime.datetime.fromtimestamp(int(entry.createdAt)).strftime('%Y-%m-%d %H:%M:%S')\n\t\t\ttext = entry.id+\"\\t\"+createdat+\"\\t\"+entry.name+\"\\t\"+entry.downloadUrl\n\t\t\t#print (text)\n\t\t\tf.write(text+\"\\n\")\n\t\t\tnid = nid + 1\n\t\tpager.pageIndex = pager.pageIndex + 1\n","sub_path":"list-media-by-user/list-media-by-user.py","file_name":"list-media-by-user.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"227484377","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 22:19:58 2018\n\n@author: Yacine Mahdid at BIAPT LAB\n@contact: yacine.mahdid@mail.mcgill.ca\n@website: yacinemahdid.com\n\nFor question about this script please post on the subreddit: CodeForResearch,\nor send an email @contact.\n\"\"\"\nimport tkinter as tk\nimport pandas\nfrom tkinter import filedialog\nfrom tkinter import simpledialog\n\nroot = tk.Tk()\nroot.withdraw()\n\nprint(\"Program Launched...\")\n\nprint(\"Please select the first excel file:\")\nfile_path_1 = filedialog.askopenfilename(title = \"Select first excel file\")\nprint(\"You have choose this file: \" + file_path_1)\n\nprint(\"Please select the second excel file:\")\nfile_path_2 = filedialog.askopenfilename(title = \"Select second excel file\")\nprint(\"You have choose this file: \" + file_path_2)\n\nprint(\"Loading CSV Files...\")\n\nprint('CSV File 1...')\ncsv1_df = pandas.read_excel(file_path_1)\n\nprint('CSV File 2...')\ncsv2_df = pandas.read_excel(file_path_2)\n\nlabel_to_check = simpledialog.askstring('Duplicate Removal', 'Please state the column name to compare')\nprint('Removing Duplicates...')\n\nunique_csv_df = csv1_df.copy(deep=True)\ndup_csv_df = csv1_df.copy(deep=True)\nfor index1, row1 in csv1_df.iterrows():\n isDuplicate = False;\n for index2, row2 in csv2_df.iterrows():\n #Here we remove the row that have similar value in the given column\n if(row1[label_to_check] == row2[label_to_check]):\n isDuplicate = True;\n unique_csv_df = unique_csv_df.drop(index1)\n break;\n #Here we remove the row that have disimilar value in the given column\n if(isDuplicate == False):\n dup_csv_df = dup_csv_df.drop(index1)\n \nprint('Selecting CSV Name...') \nunique_name = filedialog.asksaveasfile(mode='w',title = \"Select unique element filename\", defaultextension=\".csv\")\n\nprint('Saving CSV...')\nunique_csv_df.to_csv(unique_name , sep=',', encoding='utf-8',index=False)\nunique_name.close()\n\nprint('Selecting CSV Name...') \ndup_name = filedialog.asksaveasfile(mode='w',title = \"Select duplicate element filename\", defaultextension=\".csv\")\nprint('Saving CSV...')\ndup_csv_df.to_csv(dup_name, sep=',', encoding='utf-8',index=False)\ndup_name.close()\n\nprint(\"Program Terminated!\")","sub_path":"separate_duplicate_CSV.py","file_name":"separate_duplicate_CSV.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"205989660","text":"from django.conf.urls import patterns, include, url\nfrom explore import views\n\nurlpatterns = patterns('explore.views',\n\turl(r'^(?P\\w+)/dashboard/', include('dashboard.urls', namespace='dashboard')),\n\turl(r'^(?P\\w+)/member/(?P\\w+)/', 'show_member', name='show_member'),\n\turl(r'^(?P\\w+)/story/edit', 'family_story_edit', name='family_story_edit'),\n\turl(r'^(?P\\w+)/story', 'family_story', name='family_story'),\n\turl(r'^(?P\\w+)', 'explore_home', name='explore_home'),\n\t\n\t#url(r'^add-member', 'add_member', name='add_member'),\n\t#url(r'^subscribe', 'subscribe', name='subscribe'),\n)","sub_path":"explore/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"189201036","text":"'''\nCreate simple annotations for WIDERFace dataset\nPlease download WIDERFace dataset and extract in following structure\n+ data\n___+ WIDER2015\n______+ wider_face_split\n______+ WIDER_test\n______+ WIDER_train\n______+ WIDER_val\n'''\n\n\nOUTPUT_FILE = \"widerface.txt\"\noutF = open(OUTPUT_FILE, \"w\")\n\nfor sub_dataset in [\"train\", \"val\"]:\n anno_file = \"./data/WIDER2015/wider_face_split/wider_face_{}_bbx_gt.txt\".format(sub_dataset)\n\n # Open WIDER annotation file and process\n with open(anno_file) as fp:\n\n img_file = fp.readline()[:-1]\n while img_file:\n img_path = \"./data/WIDER2015/WIDER_{}/images/{}\".format(sub_dataset, img_file)\n n_objects = int(fp.readline())\n for i in range(n_objects):\n object_info = fp.readline().split()\n x1 = int(object_info[0])\n y1 = int(object_info[1])\n width = int(object_info[2])\n height = int(object_info[3])\n x2 = x1 + width\n y2 = y1 + height\n\n # Write line to output file\n outF.write(\"{},{},{},{},{},face\".format(img_path, x1, y1, x2, y2))\n outF.write(\"\\n\")\n\n # Read a new image file\n img_file = fp.readline()[:-1]\n\n\n# Close output file\noutF.close()\n","sub_path":"create_simple_anno_wider.py","file_name":"create_simple_anno_wider.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"273218970","text":"__author__ = 'shmakovs'\n\ndef GetSpacerStartStop(Start, Stop, AliStart, AliStop, SpacerLength):\n if Start < Stop:\n RealStart = Start - (AliStart - 1)\n RealStop = Stop + (SpacerLength - AliStop)\n else:\n RealStart = Start + AliStart - 1\n RealStop = Stop - (SpacerLength - AliStop)\n\n return minmax(RealStart, RealStop)\n\ndef minmax(Int1, Int2):#Str1, Str2):\n # Int1 = int(Str1)\n # Int2 = int(Str2)\n\n if Int2 > Int1:\n return Int1, Int2\n else:\n return Int2, Int1\n\n\ndef GetContigToTaxIDDict():\n ContigToTaxCategory = dict()\n\n TaxTabFileName = \"//panfs/pan1/prokdata/db_tmp/all1603.tax.tab\"\n ContigsFileName = \"//panfs/pan1/prokdata/db_tmp/all1603.pp.txt\"\n\n OrganismNameToTaxID = dict()\n\n with open(TaxTabFileName, \"r\") as TaxTabFile:\n for Line in TaxTabFile:\n if Line[0] == \"#\":\n continue\n LineValues = Line[:-1].split(\"\\t\")\n OrganismNameToTaxID[LineValues[0]] = LineValues[3].split(\";\")[3]\n\n with open(ContigsFileName, \"r\") as ContigsFile:\n for Line in ContigsFile:\n if Line[0] == \"#\":\n continue\n LineValues = Line[:-1].split(\"\\t\")\n ContigToTaxCategory[LineValues[1]] = OrganismNameToTaxID[LineValues[0]]\n\n return ContigToTaxCategory\n\ndef GetCRISPRID(SpacerID):\n LineValues = SpacerID.split(\"_\")\n return LineValues[0] + \"_\" + LineValues[1] + \"_\" + LineValues[2]\n\n\n\nContigToTaxCategory = GetContigToTaxIDDict()\n\nCRISPRTypesFileName = \"/panfs/pan1/prokdata/CRISPRicity/SpacerAnalysis/AllIteration/TmpFolder/CRISPR_info_known_071117.tsv\"\n\nClearTaxCategoryDict = dict()\nfor Line in open(CRISPRTypesFileName, \"r\"):\n LineValues = Line[:-1].split(\"\\t\")\n ClearTaxCategoryDict[LineValues[1] + \"_\" + LineValues[2] + \"_\" + LineValues[3]] = LineValues[11]\n\nClearCRISPRTypesDict = dict()\nfor Line in open(CRISPRTypesFileName, \"r\"):\n LineValues = Line[:-1].split(\"\\t\")\n ClearCRISPRTypesDict[LineValues[1] + \"_\" + LineValues[2] + \"_\" + LineValues[3]] = LineValues[9]\n\nSpacerLengthsDict = dict()\nfor Line in open(\"SpacersLengths.tsv\"):\n LineValues = Line[:-1].split(\"\\t\")\n SpacerLengthsDict[LineValues[0]] = int(LineValues[1])\n\nUniqueSpacerHitsFileName = \"/panfs/pan1/prokdata/CRISPRicity/SpacerAnalysis/AllIteration/TmpFolder/BlastResults/All1603_Spacers.hits_new_good_uniq\"\nSpacerToCoordDict = dict()\nfor Line in open(UniqueSpacerHitsFileName):\n LineValues = Line[:-1].split(\"\\t\")\n SpacerToCoordDict[LineValues[0]] = [LineValues[1], int(LineValues[3]), int(LineValues[4]), int(LineValues[8]), int(LineValues[9])]\n\nSpacersToGIFileName = \"/panfs/pan1/prokdata/CRISPRicity/SpacerAnalysis/AllIteration/TmpFolder/All1603_good_Islands/ORFHits_new.tsv\"\nSpacersToGIDict = dict()\nfor Line in open(SpacersToGIFileName):\n LineValues = Line[:-1].split(\"\\t\")\n SpacersToGIDict[LineValues[1]] = LineValues[0]\n\n\nCount = 0\nSpacerHitsIslandsFileName = \"/panfs/pan1/prokdata/CRISPRicity/SpacerAnalysis/AllIteration/TmpFolder/All1603_good_Islands/Islands.ann_CRISPR\"\nfor Line in open(SpacerHitsIslandsFileName):\n LineValues = Line[:-1].split(\"\\t\")\n\n if LineValues[0] in SpacersToGIDict:\n SpacerID = SpacersToGIDict[LineValues[0]]\n ProteinLength = int(LineValues[9]) * 3\n\n SpacerStart, SpacerStop = GetSpacerStartStop(SpacerToCoordDict[SpacerID][1], SpacerToCoordDict[SpacerID][2],\n SpacerToCoordDict[SpacerID][3], SpacerToCoordDict[SpacerID][4],\n SpacerLengthsDict[SpacerID])\n Coords = LineValues[1].split(\"..\")\n ORFStart = int(Coords[0])\n ORFStop = int(Coords[1])\n\n if LineValues[2] == \"+\":\n Offset = SpacerStart - ORFStart\n else:\n Offset = ORFStop - SpacerStop\n\n if Offset >= 0:\n CRISPRID = GetCRISPRID(SpacerID)\n if CRISPRID not in ClearCRISPRTypesDict:\n CRISPRType = \"Unidentified\"\n else:\n CRISPRType = ClearCRISPRTypesDict[CRISPRID]\n\n TaxCategory = ContigToTaxCategory[SpacerID.split(\"_\")[0]]\n print(SpacerID + \"\\t\" + str(Offset % 3) + \"\\t\" + TaxCategory + \"\\t\" + CRISPRType + \"\\t\" + str(Offset) + \"\\t\" + str(ProteinLength) + \"\\t\" + LineValues[2])\n\n#print(Count)","sub_path":"onetimers/SpacerAnalysis/CodonBias/GetSpacersStartDisctributionInCodons.py","file_name":"GetSpacersStartDisctributionInCodons.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"129676544","text":"import numpy as np\n\n\ndef region_to_bbox(region, center=True):\n\n n = len(region)\n assert n == 4 or n == 8, 'Groundtruth region format is invalid, should have 4 or 8 entries.'\n\n return rectangle(region, center) if n == 4 else polygon(region, center)\n\n\n# Assuming bounding boxes are saved with 0-indexing\ndef rectangle(region, center):\n\n if center:\n top_left_corner_x, top_left_corner_y, width, height = region\n center_x = top_left_corner_x + width / 2\n center_y = top_left_corner_y + height / 2\n\n return center_x, center_y, width, height\n\n return region\n\n\ndef polygon(region, center):\n\n center_x = np.mean(region[::2])\n center_y = np.mean(region[1::2])\n\n x1 = np.min(region[::2])\n x2 = np.max(region[::2])\n y1 = np.min(region[1::2])\n y2 = np.max(region[1::2])\n\n a1 = np.linalg.norm(region[0:2] - region[2:4]) * np.linalg.norm(region[2:4] - region[4:6])\n a2 = (x2 - x1) * (y2 - y1)\n s = np.sqrt(a1 / a2)\n width = s * (x2 - x1) + 1\n height = s * (y2 - y1) + 1\n\n if center:\n return center_x, center_y, width, height\n\n return center_x - width / 2, center_y - height / 2, width, height\n","sub_path":"improvement/src/bounding_box.py","file_name":"bounding_box.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"188622803","text":"nums = [x**2 for x in range(10)]\n\nprint(nums)\n\nnumb = []\nfor x in range (10):\n nums.append(x**2)\nprint(nums)\n\n# names = [\"David\", 'Alex', \"Tom\"\n# lower = [name.lower()]]\n\nyears = [1995 ,200, 2004, 2011]\nleap_years = [year for year in years if year % 4 == 0]\nprint(leap_years)\n\nnumbers = [1,2,3,4,5]\ndouble_odds = []\nfor x in numbers:\n if x % 2 == 1:\n double_odds.append(x)\nprint(double_odds)\n\ndouble_odds = [n*2 for n in numbers if n % 2 == 1]\n\nprint({x // 10 for x in range(100)})\n\nnames_to_ages = {'Amanda': '90',\"David\": \"50\"}\nprint({name: int(age)for name, age in names_to_ages.items()})","sub_path":"Students/Ben/10_26_Assignment/11_02.py","file_name":"11_02.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"271402665","text":"##===============================================\n# @Author : Ngtuong\n# File : create_hdf5.py\n# Time : 24/09/2019\n# Description : Create hdf5 file\n##===============================================\n\n\nimport glob\nimport h5py\nimport cv2\nimport numpy as np\nfrom random import shuffle\n\n# prepare data\nshuffle_data = True # shuffle the adress\n\nhdf5_path = \"./dataset/cat_and_dog.hdf5\"\n\ndataset_train_cats_path = \"./dataset/training_set/cats/*.jpg\"\ndataset_train_dogs_path = \"./dataset/training_set/dogs/*.jpg\"\n\naddress_cat = glob.glob(dataset_train_cats_path)\naddress_dog = glob.glob(dataset_train_dogs_path)\n\nadress = address_cat + address_dog\n\n# create labels data\nlabels = []\nfor i in range(len(address_cat)):\n labels.append(0)\nfor i in range(len(address_dog)):\n labels.append(1)\n\n# shuffle data\nif shuffle_data:\n c = list(zip(adress, labels)) # blind the images and labels together\n shuffle(c)\n\n (adress, labels) = zip(*c)\n\n# Divide the data to 80% train and 20% test\ntrain_cat_and_dog = adress[:int(0.8*len(adress))]\ntrain_labels = labels[:int(0.8*len(labels))]\n\ntest_cat_and_dog = adress[int(0.8*len(adress)):]\ntest_labels = labels[int(0.8*len(labels)):]\n\n# Create hdf5 file\ntrain_shape = (len(train_cat_and_dog), 224, 224, 3)\ntest_shape = (len(test_cat_and_dog), 224, 224, 3)\n\n# open hdf5 file path\nwith h5py.File(hdf5_path, 'w') as f:\n\n f.create_dataset(\"train_img\", train_shape, np.uint8)\n f.create_dataset(\"test_img\", test_shape, np.uint8)\n\n f.create_dataset(\"train_labels\", (len(train_cat_and_dog),), np.uint8)\n f[\"train_labels\"][...] = train_labels\n\n f.create_dataset(\"test_labels\", (len(test_cat_and_dog),), np.uint8)\n f[\"test_labels\"][...] = test_labels\n\n # write images\n for i in range(len(train_cat_and_dog)):\n\n img = cv2.imread(train_cat_and_dog[i])\n img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n f[\"train_img\"][i, ...] = img[None]\n\n for i in range(len(test_cat_and_dog)):\n\n img = cv2.imread(test_cat_and_dog[i])\n img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n f[\"test_img\"][i, ...] = img[None]\n\n f.close()\nprint('create complex')","sub_path":"dataset/create_hdf5.py","file_name":"create_hdf5.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"5346816","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect, HttpResponseForbidden\nfrom django.urls import reverse\nfrom django.views.generic import DeleteView\n\nfrom .models import Comment\n\nclass CommentDeleteView(LoginRequiredMixin, DeleteView):\n login_url = '/permissiondenied/'\n template_name = \"comments/comment_delete.html\"\n\n def get_object(self):\n id_ = self.kwargs.get(\"id\")\n return get_object_or_404(Comment, id=id_)\n\n def get_success_url(self):\n return reverse('posts:posts-list')\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n #print(self.object.children())\n if self.object.user == request.user:\n success_url = self.get_success_url()\n if self.object.is_parent:\n for child in self.object.children():\n child.delete()\n self.object.delete()\n return HttpResponseRedirect(success_url)\n else:\n return HttpResponseForbidden(\"Cannot delete other's comments\")\n\n def get_context_data(self,**kwargs):\n context = super().get_context_data(**kwargs)\n context['id'] = self.kwargs.get(\"id\")\n context['slug'] = self.kwargs.get(\"slug\")\n return context\n","sub_path":"src/comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"76292071","text":"# 6.00.2.x Intro to Computational Thinking & Data Science\n# Wayne H Nixalo - 23-Nov-2016 14:50\n# ==============================================================================\n# Unit 3 Exericse 3: Write a function, stdDevOfLengths(L) that takes in a\n# list of strings, L, and outputs the standard deviation of the lengths of the\n# strings. Return float('NaN') if L is empty.\n# ==============================================================================\ndef stdDevOfLengths(L):\n '''\n Takes a list of strings, L; outputs the standard deviation of the\n lengths of the strings. Returns float('NaN') if L empty.\n '''\n nX = len(L)\n if nX == 0:\n return float('NaN')\n lens = []\n for i in range(nX):\n lens.append(len(L[i]))\n u = sum(lens)/float(nX)\n sigma = 0\n for i in range(nX):\n sigma += (lens[i] - u)**2\n return(sigma / nX)**0.5\n\n#L = ['hello', ' what', 'who\\'s your daddy?', 'yeah no way jose']\n#print(stdDevOfLengths(L))\n\ndef stdDevNums(X):\n '''\n in: X: list of numbers\n out: standard deviation of X, and mean u, or float(NaN) if X empty.\n '''\n nX = len(X)\n if nX == 0:\n return float('NaN')\n nums = []\n for i in range(nX):\n nums.append(X[i])\n u = sum(nums)/float(nX)\n sigma = 0\n for i in range(nX):\n sigma += (nums[i] - u)**2\n return (sigma / nX)**0.5, u\n\ndef coefVarNums(X):\n '''\n IN: X, list of numbers\n OUT: coefficient of variation of X, or float(NaN) if X empty\n '''\n if len(X) == 0:\n return float('NaN')\n sdX, u = stdDevNums(X)\n return sdX/u\n\n#C = [10, 4, 12, 15, 20, 5]\n#print (coefVarNums(C))\n\nA,B,G = [1,2,3],[11,12,13],[0.1,0.1,0.1]\nTHING = [A,B,G]\nfor smth in THING:\n print (coefVarNums(smth))","sub_path":"6.00x/6.00.2x-DS/U3Ex3_stdDevOfLengths.py","file_name":"U3Ex3_stdDevOfLengths.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"521135205","text":"import unittest\nfrom portfolio_utils.investor import Investor\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_get_username(self):\n investor = Investor(\"Laurynas\")\n self.assertEqual(investor.get_username(), \"Laurynas\")\n\nif __name__ == '__main__':\n unittest.main()\n ","sub_path":"Tests/test_portfolio_utils/test_investor.py","file_name":"test_investor.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"431309442","text":"import sqlite3\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom cachecontrol import CacheControl\nfrom cachecontrol.caches.file_cache import FileCache\n\nsess = CacheControl(requests.Session(), cache=FileCache('relaxdays_cache')) # wir wollen den Server ja nicht unnötig foltern\nstandard_url = 'https://relaxdays.de/catalogsearch/result/index/?q={}&product_list_dir=asc&product_list_order=sale_rank&product_list_limit=48'\n\ndb = sqlite3.connect(\"daten.db\")\nc = db.cursor()\n\nworking = []\nbroken = []\n\n\ndef get_items(category: str):\n url = standard_url.format(category)\n r = sess.get(url)\n soup = BeautifulSoup(r.text, 'lxml')\n try:\n warning = soup.find('div', {'class': 'message notice'})\n if warning is None:\n items_html = soup.find('ol', {'class': 'products list items product-items'}).find_all('li')\n c.execute('CREATE TABLE IF NOT EXISTS \"{}\" (id NUMERIC UNIQUE, name TEXT, price NUMERIC, url TEXT, image TEXT)'.format(category))\n else:\n print(\"{} hat eine Warning: {}\".format(category, warning.text))\n broken.append(category)\n return\n except AttributeError:\n print(\"{} hat keine Einträge.\".format(category))\n broken.append(category)\n return\n for x, item in enumerate(items_html):\n i = {\n 'url': item.find('a', {'class': 'product-item-link'})['href'],\n 'image': item.find('img')['src'],\n 'name': item.find('a', {'class': 'product-item-link'}).text,\n 'price': item.find('span', {'class': 'price'}).text.replace('\\xa0€', '').replace(',', '.')\n }\n c.execute(\"INSERT INTO '{}' (id, name, price, url, image) VALUES (?, ?, ?, ?, ?);\".format(category), (x, i['name'], str(i['price']), i['url'], i['image']))\n db.commit()\n working.append(category)\n\n\nif __name__ == '__main__':\n with open('bereinigte_kategorien.txt', 'r') as f:\n lines = f.readlines()\n\n for l in lines:\n print('Suche: {}'.format(l.strip()))\n get_items(l.strip())\n\n\n with open('broken.txt', 'w') as f:\n for cat in broken:\n f.write(\"%s\\n\" % cat)\n with open('working.txt', 'w') as f:\n for cat in working:\n f.write(\"%s\\n\" % cat)\n c.close()\n","sub_path":"rd-daten/crawler_relaxdays.py","file_name":"crawler_relaxdays.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"74537556","text":"import asyncio\nfrom aiohttp import web\n\n\nasync def index(request):\n await asyncio.sleep(0.5)\n # return web.Response(body=b'

Index

',content_type='text/html')\n # return web.Response(body='

Index

',content_type='text/html')\n # return web.Response(body='

Index

')\n return web.Response(text='

Index

',content_type='text/html')\n\n\nasync def hello(request):\n await asyncio.sleep(0.2)\n text = '

hello, %s!

' % request.match_info['name']\n # return web.Response(body=text.encode('utf-8'),content_type='text/html')\n # return web.Response(text=text,content_type='text/html')\n return web.Response(text=text)\n\n\n# async def init(loop):\n# aapp = web.Application(loop=loop)\n# aapp.router.add_route('GET','/',index)\n# aapp.router.add_route('GET','/hello/{name}',hello)\n# srv = await loop.create_server(aapp.make_handler(),'127.0.0.1',8000)\n# print('Server started at http://127.0.0.1:8000...')\n# return srv\nasync def wshandle(request):\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n\n async for msg in ws:\n if msg.type == web.WSMsgType.text:\n await ws.send_str('Hello,{}'.format(msg.data))\n elif msg.type == web.WSMsgType.binary:\n await ws.send_bytes(msg.data)\n elif msg.type == web.WSMsgType.close:\n break\n \n return ws\n\n# loop = asyncio.get_event_loop()\n# loop.run_until_complete(init(loop))\n# loop.run_forever()\n\napp = web.Application()\napp.add_routes([\n web.get('/', index),\n web.get('/hello/{name}',hello),\n web.get('/echo',wshandle)\n])\nweb.run_app(app,host='127.0.0.1',port=8080)\n\n\n'''\n asyncio可以实现单线程并发IO操作。如果仅用在客户端,发挥的威力不大。如果把asyncio用在服务器端,例如Web服务器,由于http\n 连接就是IO操作,因此可以用单线程+coroutine实现多用户的高并发支持。\n asyncio实现了TCP、UDP、SSL等协议,aiohttp则是基于asyncio实现的http框架。\n'''","sub_path":"异步IO/asyncHttp.py","file_name":"asyncHttp.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"569749177","text":"from OpenGL.error import NullFunctionError\nfrom OpenGL.GLUT import *\nimport ModernGL as GL\nimport struct\n\ntry:\n\tglutInit(sys.argv)\n\nexcept NullFunctionError as ex:\n\tprint('(Windows) Maybe missing glut32.dll.')\n\tprint('(Linux) Maybe apt-get install python-opengl will help.')\n\tprint(ex)\n\texit()\n\nglutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\nglutInitWindowSize(800, 600)\nglutCreateWindow(b'01 HelloWorld')\n\nGL.Init()\n\nvert = GL.NewVertexShader('''\n\t#version 330\n\tin vec2 vert;\n\tvoid main() {\n\t\tgl_Position = vec4(vert, 0.0, 1.0);\n\t}\n''')\n\nfrag = GL.NewFragmentShader('''\n\t#version 330\n\tout vec4 color;\n\tvoid main() {\n\t\tcolor = vec4(0.30, 0.50, 1.00, 1.0);\n\t}\n''')\n\nprog = GL.NewProgram([vert, frag])\n\nvbo = GL.NewVertexBuffer(struct.pack('6f', 0.0, 0.8, -0.6, -0.8, 0.6, -0.8))\nvao = GL.NewVertexArray(prog, vbo, '2f', ['vert'])\n\ndef display():\n\tGL.Clear(240, 240, 240)\n\tGL.RenderTriangles(vao, 3)\n\tglutSwapBuffers()\n\nglutDisplayFunc(display)\nglutMainLoop()\n","sub_path":"Examples/PyOpenGL/01_HelloWorld.py","file_name":"01_HelloWorld.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"80281669","text":"from datetime import datetime, timedelta\nfrom flask import flash, render_template, request, redirect, url_for\nfrom flask.ext.login import current_user, login_required, login_user, logout_user\nimport json\nfrom vigil import app, model, utils, content_type\n\n\n@app.route(\"/\")\n@login_required\ndef index():\n today = datetime.now().strftime(app.config['DATE_FORMAT'])\n return redirect(url_for('day_view', day=today))\n\n\n@app.route(\"/day/\", methods=['GET', 'POST'])\n@login_required\ndef day_view(day):\n day_datetime = datetime.strptime(day, app.config['DATE_FORMAT'])\n if request.method == 'POST':\n answers = [(x.replace('question_', ''), request.form[x],) for x in request.form if x[0:8] =='question']\n model.save_answers(current_user, day_datetime, answers)\n flash('success', 'Saved!')\n return redirect(url_for('day_view', day=day))\n\n user_answers = model.get_answers_dict(current_user, day_datetime)\n burns_score = model.get_burns_score(current_user, day_datetime)\n groups = model.get_users_question_groups(current_user)\n prev_day, next_day = utils.bracketing_days(day_datetime)\n\n edit = False\n if ('edit' in request.args and request.args['edit'] == 'true') or not user_answers:\n edit = True\n\n template = 'edit.html' if edit else 'view.html'\n\n return render_template(template, groups=groups, day=day_datetime, user_answers=user_answers,\n burns_score=burns_score, day_raw=day,\n prev_day=prev_day, next_day=next_day)\n\n\n@app.route(\"/edit-questions\")\n@login_required\ndef edit_questions():\n groups = model.get_only_users_question_groups(current_user)\n\n return render_template('edit_questions.html', groups=groups)\n\n\n@app.route(\"/ajax/top-graph\")\n@login_required\n@content_type(\"application/json\")\ndef ajax_top_graph():\n current_date = request.args.get('date', None)\n if current_date:\n current_date_datetime = datetime.strptime(current_date, app.config['DATE_FORMAT'])\n else:\n current_date_datetime = datetime.now()\n\n return json.dumps(model.get_top_graph(current_user, current_date_datetime))\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n user = model.authenticate(request.form['username'], request.form['password'])\n if user:\n login_user(user, remember=True)\n url = request.form['next'] if 'next' in request.form else '/'\n return redirect(url)\n else:\n flash('Invalid login', 'error')\n return redirect('/login')\n return render_template('login.html', next=request.args.get('next'))\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect('/login')\n","sub_path":"vigil/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"283528961","text":"from Classes.Motor import Motors\nfrom Classes.PWM import PWM\nfrom Classes.Sensors import *\nimport RPi.GPIO as GPIO\nimport time\n\nMAX_DIS = 50\nMIN_DIS = 30\n\nGPIO.setmode(GPIO.BCM)\n\ndef Handle_Range(SA,car):\n if SA.FR.last_range > MAX_DIS and SA.FC.last_range > MAX_DIS and SA.FL.last_range > MAX_DIS:\n car.forward()\n elif SA.FR.last_range > MIN_DIS and SA.FC.last_range > MAX_DIS and SA.FL.last_range > MAX_DIS:\n car.turnLeft()\n time.sleep(.2)\n elif SA.FR.last_range > MAX_DIS and SA.FC.last_range > MAX_DIS and SA.FL.last_range > MIN_DIS:\n car.turnRight()\n time.sleep(.2)\n else:\n car.reverse()\n time.sleep(.1)\n if SA.FR.last_range > SA.FL.last_range:\n car.turnRight()\n time.sleep(.5)\n else:\n car.turnLeft()\n time.sleep(.5)\n\ntry:\n car = Motors(4,17,22,27)\n En = PWM(13, 18, hertz = 100, duty_cycle = .5)\n SA = SensorArray()\n En.ChangeDutyCycle(50)\n time.sleep(1)\n SA.start()\n En.start()\n car.brake()\n start = time.time()\n while time.time() - start <= 20:\n Handle_Range(SA,car)\n \n En.stop()\n SA.stop()\n\nfinally:\n GPIO.cleanup()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"527632846","text":"def main():\n import sys\n input = sys.stdin.readline\n\n N, K = map(int, input().split())\n A = list(map(int, input().split()))\n\n visited = [0]\n visited_set = set([0])\n now = 0\n ind = 0\n my_append = visited.append\n my_add = visited_set.add\n for _ in range(K):\n a = A[now]-1\n if a in visited_set:\n ind = visited.index(a)\n break\n else:\n my_add(a)\n my_append(a)\n now = a\n\n loop_visited = visited[ind:]\n ans_ind = (K-ind) % len(loop_visited)\n\n print(loop_visited[ans_ind]+1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"legacy/abc167/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"133135245","text":"# Proof of concept for using the python set() implementation as the unordered set of amino acids that total a given mass\n# Python set() is implemented as hash table with no keys thus removing duplicate vales by definition\n# I believe you could also modify a basic hash table to use keys = mass and values = sequence\n# for simplicity the masses of each amino acid are represented by integers 1-20\n# conveniently this results in the first element of each set represents the number of amino acids in each tier\n\n\ndef Make_Tier(tier_set):\n temp_set = set({})\n tier_set_calc = tier_set.copy()\n\n for x in tier_set_calc:\n for y in tier_set:\n temp_set.add(x + y)\n\n # tier_set.clear()\n # tier_set = temp_set.copy()\n tier_set = temp_set.copy() #.difference(tier_set)\n temp_set.clear()\n\n return tier_set\n\n\naa_list = [71, 156, 114, 115, 103, 129, 128, 57, 137, 113, 131, 147, 97, 87, 101, 186, 163, 99]\naa_list.sort()\ntier_1 = set(aa_list)\n\ntier_list = [tier_1.copy()]\n\nfor i in range(4):\n tier = Make_Tier(tier_list[i].copy())\n tier_list.append(tier)\n\ntier = 0\nfor item in tier_list:\n print(\"Mass possibility tier: %d\" % tier)\n print(\"Number of amino acids: %d\" % (2 ** (tier)))\n print(\"Number of distinct masses: %d\" % len(item))\n print(item)\n print()\n tier += 1\n","sub_path":"Python/protein_tree/proof_of_concept.py","file_name":"proof_of_concept.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"15301625","text":"import random\n\nprint('Guessing game!')\ninc = 0\nnum = random.randint(1, 10)\nprint('Pick a number between 1 and 10!')\n\nwhile inc < 5:\n print('Go ahead and guess!')\n guess = input()\n guess = int(guess)\n inc = inc + 1\n if guess > num:\n print('Sorry, your guess was too high!')\n if guess < num:\n print('Sorry, your guess is too low!')\n if guess == num:\n break\n\nif guess == num:\n guess = str(guess)\n print('Great job! You picked the correct number in ' + guess + ' guesses!')\n guess = int(guess)\n\nif guess != num:\n num = str(num)\n print('Aww sorry! The number I was thinking was ' + num + '!')\n","sub_path":"python/projects/_main_trunk/guess_a_number.py","file_name":"guess_a_number.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158342929","text":"\"\"\"A very simple version of grid soccer\"\"\"\n# coding: utf-8\n\nfrom __future__ import division, absolute_import\nfrom __future__ import print_function, unicode_literals\n\nfrom gym.core import Env\n\nfrom gym.spaces import Discrete, Box\nfrom gym.spaces import Dict\nfrom gym.utils import seeding\n\nfrom .utils import socceragent\nimport pygame\nimport numpy as np\nimport math\nimport os\nfrom PIL import Image\n\ngreen = 0, 255, 0\nred = 255, 0, 0\nblue = 0, 0, 255\nblack = 0, 0, 0\nyellow = 255, 255, 0\nscale = 20\nwhite = 255, 255, 255\n\n\"\"\"\n Time of passing is ignored in this grid soccer domain.\n\"\"\"\n\nclass GridSoccerPassing(Env):\n def __init__(self,\n length=10,\n width=10,\n num_agents=(5, 4),\n max_steps=500,\n scale=20,\n masking=False,\n mask_reward=False,\n use_state_feature=False,\n use_tensor_action=True,\n ground_truth_mask=False):\n \"\"\"\n Initialize the environments.\n\n length x width is the size for the field\n length --x axis, width --y axis\n num_agents record the numbers for both teams\n\n TODO: take chip kick into account\n \"\"\"\n self.length = length\n self.width = width\n self.num_agents = num_agents\n self.scale = scale\n\n self.steps = 0\n #self.chip_kick = False # noqa: E265 \n self.soccerfield = socceragent.SoccerField(self.length, self.width,self.scale,\n self.num_agents)\n self.max_steps = max_steps\n self.masking = masking\n self.mask_reward = mask_reward\n \n self.use_tensor_action = use_tensor_action\n self.use_state_feature = use_state_feature\n \n self.ground_truth_mask = ground_truth_mask\n \"\"\"\n Action space:\n \"teamA\":\n \"teamB\":\n \"passing\": True, False\n\n \"\"\"\n self.teamA_names = [\"agent%02d\" % (i) for i in range(num_agents[0])]\n self.teamB_names = [\"agent%02d\" % (i) for i in range(num_agents[1])]\n\n self.action_teamA = Dict(\n {agent: Discrete(5)\n for agent in self.teamA_names})\n self.action_teamB = Dict(\n {agent: Discrete(5)\n for agent in self.teamB_names})\n\n self.action_space = Discrete(self.length * self.width)\n\n if self.use_state_feature is True:\n self.observation_space = Box(0, 100, shape=(3 * (self.num_agents[0] + self.num_agents[1] + 1),))\n elif self.masking is True:\n self.observation_space = Box(\n 0,\n 255,\n shape=(self.length * self.scale, self.width * self.scale, 2))\n else:\n self.observation_space = Box(\n 0,\n 255,\n shape=(self.length * self.scale, self.width * self.scale, 1))\n\n self.obs = None\n \"\"\"\n \"teamA\": locations of agents in team A\n \"teamB\": locations of agents in team B\n \"the agent with the ball\": the agent number in team A with the ball\n \"target cell\": Could be just random unoccupied cell, or the cell of\n a teammate\n \"\"\"\n if self.use_tensor_action is True:\n self.action_space = Discrete(self.length * self.width)\n else:\n # Action would be the agent's index, including teammates\n # and opponents\n self.action_space = Discrete(num_agents[0])\n\n self._seed()\n self.reset()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reset(self):\n \"\"\"\n reset the environment.\n\n Default mode is random, which means that it will\n randomly choose a target point\n\n Locations of every agent is randomly given\n\n self.obs is the self.obs of the envionment,\n It should contain several contents:\n self.obs[0] -Locations of every agent\n self.obs[1] -The agent that carries the ball\n self.obs[2] -Location of the target cell\n \"\"\"\n\n self.obs = self.soccerfield.reset()\n self.draw()\n self.steps = 0\n self._first_passing_opportunity_time = None\n\n if self.use_state_feature is False:\n return self.obs['image']\n else:\n return self.rtr_state_feature(self.obs)\n\n def reward(self, debug_info):\n done = False\n\n flag = debug_info[\"flag\"]\n intercept = debug_info[\"intercept\"]\n reward = 0\n if self.mask_reward is False:\n reward = -1\n if flag == socceragent.PassStatus.PASS_IN_ROW_OR_COLUMN:\n if intercept:\n reward = -10\n else:\n reward = 10\n done = True\n elif flag == socceragent.PassStatus.PASS_NOT_TO_TEAMMATE or flag == socceragent.PassStatus.PASS_NOT_IN_ROW_OR_COLUMN:\n reward = -10\n done = True\n else:\n if flag ==socceragent.PassStatus.PASS_IN_ROW_OR_COLUMN and not intercept:\n reward = 0\n done = True\n else:\n reward = -10\n done = True\n if flag ==socceragent.PassStatus.HOLDBALL:\n reward = 0\n done = False\n\n return reward, done\n\n def rtr_act(self, action):\n if self.use_tensor_action is True:\n return action\n else:\n if action < self.num_agents[0]:\n x = self.obs[\"teamA\"][action][0]\n y = self.obs[\"teamA\"][action][1]\n else:\n x = self.obs[\"teamB\"][action-self.num_agents[0]][0]\n y = self.obs[\"teamB\"][action-self.num_agents[0]][1]\n return x * self.width + y\n\n def step(self, passing):\n \"\"\"\n Step function\n\n If an agent's action hits another agent or hits the boundary,\n it would stay still.\n \"\"\"\n # if self._first_passing_opportunity_time is None:\n # if self._can_pass():\n # self._first_passing_opportunity_time = self.steps\n\n passing = self.rtr_act(passing)\n self.action_teamA = {\n agent: np.random.randint(5)\n for agent in self.teamA_names\n }\n self.action_teamB = {\n agent: np.random.randint(5)\n for agent in self.teamB_names\n }\n\n action = dict()\n action[\"teamA\"] = self.action_teamA\n action[\"teamB\"] = self.action_teamB\n action[\"passing\"] = [\n math.floor(passing / self.width), passing % self.width\n ]\n\n self.obs, debug_info = self.soccerfield.step(action)\n\n reward, done = self.reward(debug_info)\n\n self.steps = self.steps + 1\n\n if self.steps >= self.max_steps:\n done = True\n self.draw()\n if self.use_state_feature is False:\n return self.obs['image'], reward, done, {\n 'intercepted':\n debug_info[\"intercept\"]\n # 'first_passing_opportunity_time':\n # self._first_passing_opportunity_time\n }\n else:\n return self.rtr_state_feature(self.obs), reward, done, {\n 'intercepted': debug_info[\"intercept\"]}\n\n def rtr_state_feature(self, obs):\n \"\"\"\n If we are using state feature, the representation should be as full as full view of image.\n each agent would be location x, y + index\n each opponent would be location x, y + index \n \n \"\"\"\n rtr_obs = []\n rtr_obs = self.obs[\"teamA\"][self.obs[\"ball_carrier\"]] + [self.obs[\"ball_carrier\"]]\n i = 0\n for loc in self.obs[\"teamA\"]:\n rtr_obs = rtr_obs + loc + [i]\n i = i + 1\n for loc in self.obs[\"teamB\"]:\n rtr_obs = rtr_obs + loc + [i]\n i = i + 1\n return rtr_obs\n \n def draw(self):\n size = self.length * self.scale, self.width * self.scale\n action_mask = np.ndarray(shape=(size[0], size[1], 1), dtype=np.int32)\n action_mask[:, :, 0] = 0\n ball_carrier = self.obs[\"ball_carrier\"]\n\n if self.ground_truth_mask is True:\n x_min = self.obs[\"teamA\"][ball_carrier][0] - 1\n x_max = self.obs[\"teamA\"][ball_carrier][0] + 1\n y_min = self.obs[\"teamA\"][ball_carrier][1] - 1\n y_max = self.obs[\"teamA\"][ball_carrier][1] + 1\n\n while ([x_min, self.obs[\"teamA\"][ball_carrier][1]] not in self.soccerfield.agent_locations and x_min >=0):\n x_min -= 1\n\n if [x_min, self.obs[\"teamA\"][ball_carrier][1]] in self.soccerfield.agent_locations[0:self.num_agents[0]]:\n x_min -= 1\n\n x_min += 1\n\n while (x_max < self.length and [x_max, self.obs[\"teamA\"][ball_carrier][1]] not in self.soccerfield.agent_locations):\n x_max += 1\n\n if [x_max, self.obs[\"teamA\"][ball_carrier][1]] in self.soccerfield.agent_locations[0:self.num_agents[0]]:\n x_max +=1\n\n while ([self.obs[\"teamA\"][ball_carrier][0], y_min] not in self.soccerfield.agent_locations and y_min >=0):\n y_min -= 1\n\n if [self.obs[\"teamA\"][ball_carrier][0], y_min] in self.soccerfield.agent_locations[0:self.num_agents[0]]:\n y_min -= 1\n\n y_min += 1\n\n while (y_max < self.width and [self.obs[\"teamA\"][ball_carrier][0], y_max] not in self.soccerfield.agent_locations):\n y_max +=1\n\n if [self.obs[\"teamA\"][ball_carrier][0], y_max] in self.soccerfield.agent_locations[0:self.num_agents[0]]:\n y_max += 1\n\n x = self.obs[\"teamA\"][ball_carrier][0] * self.scale\n y = self.obs[\"teamA\"][ball_carrier][1] * self.scale\n\n if self.ground_truth_mask is True:\n action_mask[x:x + self.scale, (y_min)*self.scale:y_max*self.scale, 0] = 255\n action_mask[(x_min)*self.scale:x_max*self.scale, y:y + self.scale, 0] = 255\n else:\n action_mask[x:x + self.scale, :, 0] = 255\n action_mask[:, y:y + self.scale, 0] = 255\n\n try:\n screen = pygame.display.set_mode(size, 0, 32)\n\n pygame.display.set_caption(\"GridSoccer\")\n\n except:\n os.environ[\"SDL_VIDEODRIVER\"] = 'dummy'\n screen = pygame.display.set_mode(size, 0, 32)\n pygame.display.set_caption(\"GridSoccer\")\n\n screen.fill(green)\n\n for location in self.obs[\"teamA\"]:\n pygame.draw.rect(screen, blue, [\n location[0] * self.scale, location[1] * self.scale,\n self.scale, self.scale\n ])\n\n for location in self.obs[\"teamB\"]:\n\n pygame.draw.rect(screen, yellow, [\n location[0] * self.scale, location[1] * self.scale,\n self.scale, self.scale\n ])\n\n pygame.draw.rect(screen, red, [\n self.obs[\"teamA\"][ball_carrier][0] * self.scale,\n self.obs[\"teamA\"][ball_carrier][1] * self.scale, self.scale,\n self.scale\n ])\n img = pygame.surfarray.array3d(screen)\n img = Image.fromarray(img).convert('L')\n img = np.array(img)[...,np.newaxis]\n\n self.obs[\"image\"] = img\n if self.masking is True:\n self.obs[\"image\"] = np.concatenate(\n (self.obs[\"image\"], action_mask), axis=2)\n\n return\n\n def render(self, close=False):\n \"\"\"\n rendering of the environment\n blue for team A\n yellow for team B\n \"\"\"\n self.draw()\n \"\"\"\n When the display mode does not work, such as running on a cluster.\n \"\"\"\n try:\n pygame.display.update()\n except:\n return\n\n\n\n def _can_pass(self):\n # find the agent with the ball\n agent_with_ball = self.soccerfield.teamA[self.soccerfield.ball]\n\n for agent in self.soccerfield.teamA:\n # skip if this is the agent with the ball\n if agent == agent_with_ball:\n continue\n # is this agent in the same row or column as the one with\n # the ball?\n if (agent_with_ball.loc_x != agent.loc_x\n and agent_with_ball.loc_y != agent.loc_y):\n continue\n # if the same column, verify that there are no\n # interceptions\n if agent_with_ball.loc_x == agent.loc_x:\n if agent_with_ball.loc_y < agent.loc_y:\n (agent_with_smaller_y,\n agent_with_bigger_y) = (agent_with_ball, agent)\n else:\n (agent_with_smaller_y,\n agent_with_bigger_y) = (agent, agent_with_ball)\n for opponent in self.soccerfield.teamB:\n if (opponent.loc_x == agent_with_ball.loc_x\n and opponent.loc_y > agent_with_smaller_y.loc_y\n and opponent.loc_y < agent_with_bigger_y.loc_y):\n # opponent is in the same column and is in\n # between the agent and this teammate\n continue\n\n # Either there is only empty space between the agent\n # and the teammate, or there is another teammate\n # between the agent and this teammate. In either case\n # a successful pass could be made\n return True\n elif agent_with_ball.loc_y == agent.loc_y:\n # if the same row, verify no interceptions\n if agent_with_ball.loc_x < agent.loc_x:\n (agent_with_smaller_x,\n agent_with_bigger_x) = (agent_with_ball, agent)\n else:\n (agent_with_smaller_x,\n agent_with_bigger_x) = (agent, agent_with_ball)\n for opponent in self.soccerfield.teamB:\n if (opponent.loc_y == agent_with_ball.loc_y\n and opponent.loc_x > agent_with_smaller_x.loc_x\n and opponent.loc_x < agent_with_bigger_x.loc_x):\n # opponent is in the same column and is in\n # between the agent and this teammate\n continue\n\n # Either there is only empty space between the agent\n # and the teammate, or there is another teammate\n # between the agent and this teammate. In either case\n # a successful pass could be made\n return True\n\n return False\n\n \n","sub_path":"tensor_state_action_env/grid_soccer_passing.py","file_name":"grid_soccer_passing.py","file_ext":"py","file_size_in_byte":14906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"479631445","text":"import json\nfrom messages import BaseMessage\nfrom msg_codes import REMOTE_HANDSHAKE as REMOTE_HANDSHAKE\n__author__ = 'Mike'\n\n\nclass RemoteHandshakeMessage(BaseMessage):\n def __init__(self, id=None, key=None, cert=None):\n super(RemoteHandshakeMessage, self).__init__()\n self.type = REMOTE_HANDSHAKE\n self.id = id\n self.key = key\n self.cert = cert\n\n @staticmethod\n def deserialize(json_dict):\n msg = RemoteHandshakeMessage()\n # msg.type = json_dict['type']\n # ^ I think it's assumed\n msg.id = json_dict['id']\n msg.key = json_dict['key']\n msg.cert = json_dict['cert']\n return msg\n\n","sub_path":"message_gen/legacy/messages/RemoteHandshakeMessage.py","file_name":"RemoteHandshakeMessage.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"58856836","text":"# Entrypoint for processing data according to dynamodb config \"sort_dou_1\".\n\nimport pandas as pd\nimport re\nimport dou_preparer as dp\n\ndef sel_dou_1(input_data):\n \"\"\"\n Use hard-coded criteria to pre-select articles from DOU section 1.\n Input and output are Pandas DataFrames.\n \"\"\"\n identifica_regex = '(?:portaria|decreto|resolu|medida provisória|lei )'\n veto_orgao_regex = '(?:universidade|instituto federal|superintendência regional|superintendência estadual|colégio|coordenação de processos migratórios|secretaria de fomento e incentivo à cultura|departamento de radiodifusão comercial)'\n veto_orgao_root = ['Conselho Nacional do Ministério Público',\n 'Entidades de Fiscalização do Exercício das Profissões Liberais', \n 'Governo do Estado', 'Ineditoriais', 'Defensoria Pública da União', \n 'Ministério Público da União', 'Poder Judiciário', 'Prefeituras', \n 'Tribunal de Contas da União', 'Atos do Poder Judiciário']\n\n # Get secao 1:\n sel_data = input_data.loc[input_data['secao'] == 1]\n \n # Apply cuts:\n sel_data = sel_data.loc[(~sel_data['identifica'].isnull()) & \n (sel_data['identifica'].str.lower().str.contains(identifica_regex))]\n sel_data = sel_data.loc[~sel_data['orgao'].str.lower().str.contains(veto_orgao_regex)]\n sel_data = sel_data.loc[~sel_data.orgao.apply(lambda s: s.split('/')[0]).isin(veto_orgao_root)]\n \n return sel_data\n \n\ndef process(code, input_data):\n \"\"\"\n Process `input_data` (Pandas DataFrame) using hard-coded instructions below\n and the python object `code`.\n \"\"\"\n \n # Pre-process DOU data in place, to reproduce transformations made with SQL in the \n # model training data.\n dp.prepare_dou_df(input_data)\n\n # Select relevant data:\n input_data = sel_dou_1(input_data)\n \n # Predict:\n predicted_class = code.predict(input_data)\n\n # Join prediction to data:\n input_data['predicted_rank'] = pd.Series(predicted_class, index=input_data.index)\n \n return input_data\n","sub_path":"lambda/python-process/sort_dou_1.py","file_name":"sort_dou_1.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"642053249","text":"TITLE = 'Остановитязь'\n\nSIZE = WIDTH, HEIGHT = 1280, 720\nTILE_SIZE = 64\n\nFPS = 60\nFRAME_DUR = 1000 // FPS\nIDLE_ACC_MARGIN = 1\n\nPLAYER_ACC = 1.75\nPLAYER_JUMP_STR = 25\nJUMP_CUT_THRESHOLD = 10\n\nBASE_ENEMY_DETECT_RANGE = 600\nBASE_ENEMY_ATTACK_RANGE = 70\nBASE_ENEMY_SPEED = 0.875\nATTACK_MARGIN = 10\nKNOCKBACK = 15\n\nFRICTION = 0.18\nGRAVITY = 1","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"389038344","text":"\"\"\"\nDescription :\n-------------\n This script describes feature classes. Available arcpy.Describe object\n attributes are reported in the output.\n\nUsage :\n-------\n The tool when executed requires the following user input parameters:\n\n 1) Feature Classes Table - list of feature classes to describe\n 2) Output Workspace (optional) - location for results\n\n The following output is produced:\n 1) A table indicating the result of each process and the attribute values\n\nPrerequisites :\n---------------\n ArcGIS 10.1\n\nAuthor :\n--------\n NSW OEH EMS-KST\n\nLicense :\n---------\n To be determined\n\nHistory :\n---------\n 0.0 : xx-05-14 - Creation and debug revisions.\n 0.1 : 23-07-14 - For release into GG 0.1\n 0.1a : 24-07-14 - Added version attribute\n 0.1b : 31-07-14 - Refactor\n\"\"\"\n#------------------------------------------------------------------------------\n# imports and parameters\n\n__version__ = '0.1b'\nimport arcpy\nimport ggUtil\n\n\n# list fields from the various 'describe' objects\n\nFIELDS_ID = ('id,item')\n\nFIELDS_FILE = ('FileSizeKB,FileModified')\n\nFIELDS_DESCRIBE = ('baseName,catalogPath,children,childrenExpanded,'\n 'dataElementType,dataType,extension,file,fullPropsRetrieved,'\n 'metadataRetrieved,name,path')\n\nFIELDS_DATASET = ('canVersion,datasetType,DSID,extent,'\n 'isVersioned,MExtent,spatialReference,ZExtent')\n\nFIELDS_TABLE = ('hasOID,OIDFieldName,fields,CUSTOM:fields,indexes,sensorType')\n\nFIELDS_FCLASS = ('featureType,hasM, hasZ,hasSpatialIndex,shapeFieldName,'\n 'shapeType')\n\nFIELDS_GDBFC = ('areaFieldName,lengthFieldName,representations')\n\n# full set of result fields\nRF = ','.join([FIELDS_ID, FIELDS_FILE, FIELDS_DESCRIBE, FIELDS_DATASET,\n FIELDS_TABLE, FIELDS_FCLASS, FIELDS_GDBFC])\n\n\n# parameter dictionary\nIN = {\n # script parameters\n 'arc_extensions' : '' , # ArcGIS extensions required for this tool\n 'result_fields' : RF , # fields that will be in the results table\n 'item_table_fields' : 'item' , # must contain at least 'item'\n # runtime parameters\n 'items_table' : 1 , # do not change the key name\n 'out_workspace' : 2 , # do not change the key name\n 'result_index' : 3 # do not change the key name\n }\n\n#-------------------------------------------------------------------------------\n# code\n\ndef describe(runPars):\n \"\"\"\n Describe a feature class based on current run parameters\n\n Parameters:\n runPars (dictionary) = The runtime parameters dictionary\n Output:\n 1. A record in the result table\n \"\"\"\n\n # localise run parameter refs\n LOG = runPars['log']\n LOG.debug('>> describe(runPars): runPars= {0} IN= {1}'.format(runPars,IN))\n count = str(runPars['count'])\n fc = runPars['item'][0]\n\n # do the business\n try:\n r = [str(count), fc]\n x = r.extend #!\n x([str(ggUtil.get_file_size_kb(fc))])\n x([str(ggUtil.get_file_modified(fc))])\n d = arcpy.Describe(fc)\n get = ggUtil.get_attributes #!\n x(get(d, FIELDS_DESCRIBE.split(','), ['children'], ['name']))\n x(get(d, FIELDS_DATASET.split(','), ['spatialReference'], ['name']))\n x(get(d, FIELDS_TABLE.split(','), ['fields','indexes'], ['name']))\n x(get(d, FIELDS_FCLASS.split(',')))\n x(get(d, FIELDS_GDBFC.split(',')))\n\n LOG.result(','.join(r))\n\n except Exception as e:\n e = ggUtil.strip_breaks(str(e))\n LOG.warn(e, exc_info=True)\n LOG.result(','.join([count, '', e, fc]))\n\n#-------------------------------------------------------------------------------\n# auto function run sequences\n\nRUN = {\n 1 : [], # firstly, general functions to be run in left-to-right order\n 2 : [describe], # secondly, special functions to be iterated over in left-to-right order\n 3 : [] # thirdly, general functions to be run in left-to-right order\n }\n\n#-------------------------------------------------------------------------------\n# execute\n\ndef main():\n ggUtil.GridGarageTool(__file__, __version__).run(IN, RUN)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"src/ggFeatureDescribe.py","file_name":"ggFeatureDescribe.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323886406","text":"import logging\nimport random\n\nimport luigi\nimport numpy as np\nfrom tsne import bh_sne\n\nimport FastKnn, Utils, Config\nfrom LuigiUtils import MTimeMixin, TimestampedLocalTarget, getSampleIds\nfrom PreReqs import WikiBrainNumbering\nfrom cartograph.PreReqs import SampleCreator\n\nlogger = logging.getLogger('cartograph.coordinates')\n\n\nclass CreateEmbedding(MTimeMixin, luigi.Task):\n '''\n Use TSNE to reduce high dimensional vectors to x, y coordinates for\n mapping purposes\n '''\n def output(self):\n config = Config.get()\n return TimestampedLocalTarget(config.getSample(\"ExternalFiles\",\n \"article_embedding\"))\n\n def requires(self):\n config = Config.get()\n return (\n WikiBrainNumbering(),\n SampleCreator(config.get(\"ExternalFiles\", \"best_vecs_with_id\"))\n )\n\n def run(self):\n config = Config.get()\n # Create the embedding.\n featureDict = Utils.read_features(config.getSample(\"ExternalFiles\",\n \"best_vecs_with_id\"),\n id_set=getSampleIds())\n keys = list(featureDict.keys())\n vectors = np.array([featureDict[vID][\"vector\"] for vID in keys])\n out = bh_sne(vectors,\n pca_d=None,\n theta=config.getfloat(\"PreprocessingConstants\", \"tsne_theta\"))\n X, Y = list(out[:, 0]), list(out[:, 1])\n Utils.write_tsv(config.getSample(\"ExternalFiles\", \"article_embedding\"),\n (\"index\", \"x\", \"y\"), keys, X, Y)\n\n\nclass CreateFullAnnoyIndex(MTimeMixin, luigi.Task):\n def __init__(self, *args, **kwargs):\n config = Config.get()\n super(CreateFullAnnoyIndex, self).__init__(*args, **kwargs)\n self.vecPath = config.get(\"ExternalFiles\", \"vecs_with_id\")\n self.knn = FastKnn.FastKnn(self.vecPath)\n\n def requires(self):\n return WikiBrainNumbering()\n\n def output(self):\n return TimestampedLocalTarget(self.knn.pathAnnoy), TimestampedLocalTarget(self.knn.pathIds)\n\n def run(self):\n self.knn.rebuild()\n\n\nclass CreateSampleAnnoyIndex(MTimeMixin, luigi.Task):\n def __init__(self, *args, **kwargs):\n config = Config.get()\n super(CreateSampleAnnoyIndex, self).__init__(*args, **kwargs)\n self.vecPath = config.getSample(\"ExternalFiles\", \"vecs_with_id\")\n self.knn = FastKnn.FastKnn(self.vecPath)\n\n def requires(self):\n config = Config.get()\n return WikiBrainNumbering(), SampleCreator(config.get(\"ExternalFiles\", \"vecs_with_id\"))\n\n def output(self):\n return TimestampedLocalTarget(self.knn.pathAnnoy), TimestampedLocalTarget(self.knn.pathIds)\n\n def run(self):\n self.knn.rebuild()\n\n\nclass CreateSampleCoordinates(MTimeMixin, luigi.Task):\n '''\n Use TSNE to reduce high dimensional vectors to x, y coordinates for\n mapping purposes.\n '''\n def output(self):\n config = Config.get()\n return TimestampedLocalTarget(config.getSample(\"GeneratedFiles\",\n \"article_coordinates\"))\n\n def requires(self):\n return CreateEmbedding()\n\n def run(self):\n config = Config.get()\n\n # Rescale sampled embedded points\n points = Utils.read_features(config.getSample(\"ExternalFiles\",\n \"article_embedding\"))\n keys = list(points.keys())\n X = [float(points[k]['x']) for k in keys]\n Y = [float(points[k]['y']) for k in keys]\n maxVal = max(abs(v) for v in X + Y)\n scaling = config.getint(\"MapConstants\", \"max_coordinate\") / maxVal\n X = [x * scaling for x in X]\n Y = [y * scaling for y in Y]\n\n Utils.write_tsv(config.getSample(\"GeneratedFiles\",\n \"article_coordinates\"),\n (\"index\", \"x\", \"y\"), keys, X, Y)\n\n\nclass CreateFullCoordinates(MTimeMixin, luigi.Task):\n def output(self):\n config = Config.get()\n return TimestampedLocalTarget(config.get(\"GeneratedFiles\", \"article_coordinates\"))\n\n def requires(self):\n return CreateSampleCoordinates(), WikiBrainNumbering(), CreateSampleAnnoyIndex()\n\n def run(self):\n config = Config.get()\n sampleCoords = Utils.read_features(config.getSample(\"GeneratedFiles\", \"article_coordinates\"),\n required=('x', 'y'))\n vecs = Utils.read_features(config.get(\"ExternalFiles\", \"vecs_with_id\"))\n knn = FastKnn.FastKnn(config.getSample(\"ExternalFiles\", \"vecs_with_id\"))\n assert(knn.exists())\n knn.read()\n ids = []\n X = []\n Y = []\n\n def dist2(x0, y0, x1, y1):\n dx = x0 - x1\n dy = y0 - y1\n return (dx * dx + dy * dy) ** 0.5\n\n threshold = config.getfloat('MapConstants', 'max_coordinate') / 100.0\n noise = threshold / 10.0 # for points with only one surrogate, add this much random noise\n\n for i, (id, row) in enumerate(vecs.items()):\n if i % 10000 == 0:\n logger.info('interpolating coordinates for point %d of %d' % (i, len(vecs)))\n if id in sampleCoords:\n x = float(sampleCoords[id]['x'])\n y = float(sampleCoords[id]['y'])\n else:\n if len(row['vector']) == 0: continue\n centroids = []\n for id2, score in knn.neighbors(row['vector'], 10):\n if id2 not in sampleCoords: continue\n x = float(sampleCoords[id2]['x'])\n y = float(sampleCoords[id2]['y'])\n if score >= 0.0:\n closestIndex = -1\n closestDist = 1000000000000\n for i, (s, n, xs, ys) in enumerate(centroids):\n d = dist2(x, y, xs / s, ys / s)\n if d < closestDist:\n closestDist = d\n closestIndex = i\n if closestDist < threshold:\n centroids[closestIndex][0] += score\n centroids[closestIndex][1] += 1\n centroids[closestIndex][2] += x * score\n centroids[closestIndex][3] += y * score\n else:\n centroids.append([score, 1, x * score, y * score])\n\n if not centroids: continue\n\n sumScores, n, sumXs, sumYs = sorted(centroids, reverse=True)[0]\n\n if sumScores == 0.0: continue\n\n x = sumXs / sumScores\n y = sumYs / sumScores\n\n # make sure points aren't right on top of each other!\n if n == 1:\n x += random.uniform(-noise, +noise)\n y += random.uniform(-noise, +noise)\n\n X.append(x)\n Y.append(y)\n ids.append(id)\n\n Utils.write_tsv(config.get(\"GeneratedFiles\",\n \"article_coordinates\"),\n (\"index\", \"x\", \"y\"), ids, X, Y)\n","sub_path":"cartograph/Coordinates.py","file_name":"Coordinates.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"589446543","text":"## Last Update: 10.48 PM\n## Ready for optimization\n\n#!/usr/bin/env python\n# encoding: utf-8\n\n# Before running this program, first Start HFO server:\n# $> ./bin/HFO --offense-agents 1\n\n'''\ns: \n - bool (kickable) (0-1)\n - bool (teammate within a ball of radius R) (0-1)\n - bool (closest enemy within a ball of radius R, where closest enemy is NOT goalie) (0-1)\n - bool (goalie within a ball of radius, R) (0-1) goalie proximity (0-10)\n - goal center proximity (0-10)\n - goal opening angle (0-10)\na:\n - MOVE\n - SHOOT\n - PASS\n - DRIBBLE\n - NOOP\n ./bin/HFO --defense-agents=1 --defense-npcs=1 --offense-team=helios --offense-npcs=2 --no-logging --fullstate --headless --trials=100\n'''\n\nimport itertools\nfrom hfo import *\n\nimport random\nimport math\nimport numpy as np\n\nfrom operator import add\n\nGOALIE_STATE = 3\nACTIONS = 4\n\nTEAMMATES = 0\nOPPONENTS = 1\n\nEPSILON = 0.05 \nALPHA = 0.125\nGAMMA =0.95\nXI = 0.5\nINTERCEPT_RADIUS = 0.05\n\nTILE_BASE_NUM = 5\nSTATE_NUM =TILE_BASE_NUM * TILE_BASE_NUM\n\nTRAIN = True\nRANDOM = False\nSARSA = True\n\nRADIUS = 0.10\n\ndef dist(x1, y1, x2, y2):\n return ((x2-x1)**2 + (y2-y1)**2)**0.5\n\ndef heuristic(qvals, t_state, state):\n rtn = [10, 1, 1, 1]\n\n ball_x, ball_y = state[3], state[4]\n def_x, def_y = state [0], state[1]\n\n distance = dist(def_x, def_y, ball_x, ball_y)\n\n if distance <= INTERCEPT_RADIUS:\n rtn = [10, 1, 1, 1]\n\n if distance >= 0.5 and def_x>=0:\n rtn = [0, 2, 0, 10]\n\n return rtn\n\ndef getTile(x, y):\n x = int(math.floor(2.5*(x+1)))\n y = int(math.floor(2.5*(y+1)))\n if x > 4:\n x = 4\n if y > 4:\n y = 4\n\n return TILE_BASE_NUM * y + x\n\ndef getGoalieTile(y):\n if y < -0.1:\n return 0\n if y < 0.1:\n return 1\n return 2\n\ndef oppHasBall(state):\n ball_tile = getTile(state[3], state[4])\n for o in range(OPPONENTS):\n o_x,o_y = (state[9+6*TEAMMATES+(3*o)], state[9+6*TEAMMATES+(3*o)+1])\n opp_tile = getTile(o_x, o_y)\n if ball_tile == opp_tile:\n return True\n return False\n\n# Return the trimmed state space\ndef getTrimmedState(state):\n team_prox = 0\n opp_prox = 0\n\n\n robot_tile = getTile(state[0], state[1])\n ball_tile = getTile(state[3], state[4])\n goalie_tile = getGoalieTile(state[9+3*TEAMMATES+1])\n\n\n\n o_x, o_y = (state[0], state[1])\n for i in range(TEAMMATES):\n x, y = (state[9+3*TEAMMATES+3*i+1], state[9+3*TEAMMATES+3*i+2])\n if dist(o_x, o_y, x, y) < RADIUS:\n team_prox = 1\n break\n\n for i in range(OPPONENTS):\n x, y = (state[9+6*TEAMMATES+3*i+1], state[9+6*TEAMMATES+3*i+2])\n if dist(o_x, o_y, x, y) < RADIUS and state[9+6*TEAMMATES+3*i+3] != 1: #not goalie\n opp_prox = 1\n \n return (robot_tile, ball_tile, goalie_tile, team_prox, opp_prox)\n\ndef getQvals(qvals, state):\n return qvals[state[0]][state[1]][state[2]][state[3]][state[4]]\n\n\ndef getAction(qvals, t_state, state):\n if TRAIN:\n if random.random() < EPSILON:\n return random.randint(0, ACTIONS-1)\n qs = map(add, getQvals(qvals, t_state), heuristic(qvals, t_state, state))\n else:\n qs = getQvals(qvals, t_state)\n\n tmp = qs[:]\n\n return qs.index(max(tmp))\n\ndef main():\n\n # Create the HFO Environment\n hfo = HFOEnvironment()\n\n # Connect to the server with the specified\n # feature set. See feature sets in hfo.py/hfo.hpp.\n hfo.connectToServer(HIGH_LEVEL_FEATURE_SET,\n 'bin/teams/base/config/formations-dt', 6000,\n 'localhost', 'base_right', False)\n \n if TRAIN:\n\n qvals = [[[[[[0 for k in range(ACTIONS)] for op_pr in range(2)] for tm_pr in range (2)] for gli_tile in range(GOALIE_STATE)] for j in range(STATE_NUM)] for i in range (STATE_NUM)]\n for i in range(STATE_NUM):\n for j in range(STATE_NUM):\n for gli_tile in range (GOALIE_STATE):\n for tm_pr in range(2):\n for op_pr in range (2):\n for k in range(ACTIONS):\n qvals[i][j][gli_tile][tm_pr][op_pr][k] = 0\n\n else:\n qvals = np.load('sarsa_defense_RL_heuristic_1000.npy').tolist()\n\n episode_num = 0\n for episode in itertools.count():\n episode_num += 1\n status = IN_GAME\n\n state = hfo.getState()\n t_state = getTrimmedState(state)\n\n if not RANDOM:\n # Pick new action, a', to take with epsilon-greedy strategy\n a = getAction(qvals, t_state, state)\n else:\n a = random.randint(0, ACTIONS-1)\n\n\n while status == IN_GAME:\n if a == 0:\n hfo.act(INTERCEPT)\n elif a == 1:\n hfo.act(GO_TO_BALL)\n elif a == 2:\n hfo.act (NOOP)\n else:\n hfo.act (DEFEND_GOAL)\n\n # Advance the environment and get the game status\n status = hfo.step()\n\n # Grab the state features from the environment\n next_state = hfo.getState()\n #print(len(state)) 23?!\n next_t_state = getTrimmedState(next_state)\n\n # Get reward, update Q-val\n\n #Get the reward!\n r = 0\n if status == GOAL:\n r -= 10\n if status == OUT_OF_TIME:\n r += 15\n if status == CAPTURED_BY_DEFENSE:\n r += 15\n if status == OUT_OF_BOUNDS:\n r += 15\n\n if TRAIN:\n getQvals(qvals, t_state)[a] += ALPHA*(r + (GAMMA*max(getQvals(qvals, next_t_state))) - getQvals(qvals, t_state)[a])\n\n if not RANDOM:\n # Pick new action, a', to take with epsilon-greedy strategy\n next_a = getAction(qvals, next_t_state, next_state)\n else:\n next_a = random.randint(0, ACTIONS-1)\n\n if TRAIN and SARSA:\n getQvals(qvals, t_state)[a] += ALPHA*(r + (GAMMA*getQvals(qvals, next_t_state)[next_a]) - getQvals(qvals, t_state)[a])\n\n\n state = next_state\n t_state = next_t_state\n a = next_a\n\n # Check the outcome of the episode\n print(('Episode %d ended with %s'%(episode, hfo.statusToString(status))))\n # Quit if the server goes down\n\n if TRAIN and episode_num % 499 == 0:\n q = np.array(qvals)\n np.save(('sarsa_defense_RL_heuristic_' + str(episode_num) + '.npy'), q)\n\n if status == SERVER_DOWN:\n hfo.act(QUIT)\n exit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dev/defensive_qagent.py","file_name":"defensive_qagent.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"623036962","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom django.urls import include\nfrom MyApp import views\nfrom rest_framework_jwt.views import refresh_jwt_token\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^rest-auth/', include('rest_auth.urls')),\n url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),\n url(r'^refresh-token/', refresh_jwt_token),\n url(r'^addMsgToRoom/$', views.addMsgToRoom),\n url(r'^getRoomMsgs/(?P[0-9]+)$', views.getRoomMsgs),\n url(r'^getLastRoomMsg/(?P[0-9]+)$', views.getLastRoomMsg),\n url(r'^addRoom/$', views.addRoom),\n url(r'^getUserRooms/(?P[0-9]+)$', views.getUserRooms),\n url(r'^addFreind/$', views.addFreind),\n url(r'^accept_ignore_Freind/(?P[0-9]+)$', views.accept_ignore_Freind),\n url(r'^getUsersByEmail_Nom_Prenom/(?P[\\w.@+-]+)/$', views.getUsersByEmail_Nom_Prenom),\n url(r'^getUsersFreinds/(?P[0-9]+)$', views.getUserFreinds),\n url(r'^getAllUsers/', views.getAllUsers),\n url(r'^getUserDetails/(?P[0-9]+)$', views.getUserDetails),\n url(r'^getRoomDetails/(?P[0-9]+)/(?P[0-9]+)$', views.getRoomDetails),\n]\n\n\n\n","sub_path":"SampleApiProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"487720572","text":"from os import path, mkdir, listdir, remove\nimport fitz\n\n\nclass Prepare:\n \"\"\" Operates on folders and files to be processed \"\"\"\n\n def __init__(self, date_tour, tour_name1, tour_name2, nurse1, nurse2):\n \"\"\" Initializing variables \"\"\"\n\n self.base_path = path.dirname(path.abspath(__file__))\n self.documents_dir = path.join(self.base_path, \"documents\")\n self.pref_dir = path.join(self.documents_dir, \"prefecture\")\n self.vill_dir = path.join(self.documents_dir, \"village\")\n self.date_tour = date_tour\n self.tour_name1 = tour_name1\n self.tour_name2 = tour_name2\n self.nurse1 = nurse1\n self.nurse2 = nurse2\n self.resolution = 200\n\n\n def createFolders(self):\n \"\"\" Creates empty directories or delete all files within pdf/ and jpg/ \"\"\"\n\n try:\n mkdir(self.documents_dir)\n mkdir(self.pref_dir)\n mkdir(self.vill_dir)\n\n except FileExistsError:\n for directory in [self.pref_dir, self.vill_dir]:\n for file in listdir(directory):\n file = path.join(directory, file)\n remove(file)\n\n\n def saveOnLocal(self, files_list):\n \"\"\" Saves PDF file(s) to PDF folder \"\"\"\n\n for pos, directory in enumerate([self.pref_dir, self.vill_dir]):\n with open(path.join(directory, files_list[pos].name), 'wb+') as destination:\n for chunk in files_list[pos].chunks():\n destination.write(chunk)\n\n\n def converPdf2Jpg(self, file, location):\n \"\"\" Converts pdf file to jpg file(s) for each foler\"\"\"\n\n file = fitz.open(location + file)\n for page in file:\n image = page.get_pixmap(dpi=self.resolution)\n image.save(file.name.rsplit('.', 1)[0].lower() + \"_%i.jpg\" % page.number)\n\n\n def del_pdf_files(self):\n \"\"\" Moves jpg files \"\"\"\n\n for directory in [self.pref_dir, self.vill_dir]:\n for file in listdir(directory):\n if file.endswith('.pdf'):\n file = path.join(directory, file)\n remove(file)\n\n\n\n\n\n","sub_path":"mainsite/l_orchidee/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"32816954","text":"from collections import defaultdict\n\ndef dfs(graph, start, cost_table, visit, n, cost, cur_min = 1000000, prev_prev = None):\n if set(visit) == set(list(range(n))):\n return cost\n prev = start\n for node in graph[start]:\n if node != prev_prev:\n cost = dfs(graph, node, cost_table, visit+[node], n, cost+cost_table[prev][node], cur_min, prev_prev = start)\n if cost< cur_min:\n cur_min = cost\n\n return cur_min\n\ndef solution(n, costs):\n graph = defaultdict(list)\n cost_table = [[0 for _ in range(4)] for _ in range(4)]\n for is1, is2, cost in costs:\n graph[is1].append(is2)\n graph[is2].append(is1)\n cost_table[is1][is2] = cost\n cost_table[is2][is1] = cost\n\n costs = []\n for start in range(n):\n cost = dfs(graph, start, cost_table, [start], n, 0)\n costs.append(cost)\n\n return costs\n\nprint(solution(4, [[0,1,1],[0,2,2],[1,2,5],[1,3,1],[2,3,8]]\t))","sub_path":"bfs/bridge_construction.py","file_name":"bridge_construction.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"149457388","text":"import string, os, json\nfrom typing import TextIO\n\ndef get_texts(book: TextIO) -> list:\n \"\"\"Returns a list of all 970 characters long texts from the given book file.\"\"\"\n content = book.read()\n chars_limit = 970\n texts = [content[i:i + chars_limit] for i in range(0, len(content), chars_limit)]\n return [\"...\" + t + \"...\" if t != texts[0] else t + \"...\" for t in texts]\n\ndef remove_punctuation(word: str) -> str:\n \"Removes any leading spaces or punctuation from the given word.\"\n return word.strip().strip(string.punctuation)\n\ndef word_frequencies(word_list: TextIO) -> dict:\n \"\"\"Returns a dictionary where each key-value pair is a word and its frequency in frequency list of words. \"\"\"\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies\n\ndef frequency(w: str) -> float:\n \"\"\"Returns the frequency of the given word by looking it up in the dictionary of words and their frequencies, frequency_list\"\"\"\n return frequency_list.get(remove_punctuation(w), 0)\n\ndef complexity(text:str) -> float:\n \"\"\"Returns the complexity of the given text by adding up the frequencies of all its words.\"\"\"\n words = text.split(' ')\n freqs = [frequency(w) for w in words]\n return sum(freqs) / (len(frequency_list) - freqs.count(0)) #sum of the frequencies / all the words that were in the list\n\ndef difficulty(score: float) -> str:\n \"\"\"Returns the difficulty category of score. It can be one of: Easy, Medium or Hard.\"\"\"\n if score <= 0.000055:\n return \"Easy\"\n elif score <= 0.000099:\n return \"Medium\"\n else:\n return \"Hard\"\n\ndef keywords(text:str) -> list:\n \"\"\"Returns a list of 5 keywords from the given text.\"\"\"\n return sorted(set(text.split(' ')), key=frequency, reverse=True)[0:5]\n\ndef possible_keywords(text:str) -> list:\n return []\n\ndef coverage(text: str) -> float:\n \"\"\"Returns the percentage of (unique) words from the given text that are in the frequency list.\"\"\"\n words = set(text.split(' '))\n return len([w for w in words if frequency(w) != 0]) / len(words) * 100\n\ndef categorize(book: TextIO) -> list:\n \"\"\"Returns a list with all the 100 words long texts from the given book along with their difficulties and keywords.\"\"\"\n chunks = get_texts(book)\n texts = []\n for t in chunks:\n level = difficulty(complexity(t))\n texts.append((t, level, keywords(t)))\n return texts\n\ndef save_frequencies(freqs: dict) -> None:\n \"\"\"Stores the given frequency list in a file ('freq_list').\"\"\"\n with open(\"freq_list\", 'w') as stored_freq_list:\n json.dump(freqs, stored_freq_list)\n\ndef load_frequencies() -> dict:\n \"\"\"Loads the frequency list stored in a file ('freq_list') and returns it.\"\"\"\n with open(\"freq_list\", 'r') as stored_freq_list:\n return json.load(stored_freq_list)\n\nwith open(\"final_version_words.txt\", 'r') as word_list, open(\"book.txt\", 'r', encoding='utf-8') as book:\n if os.path.exists(\"freq_list\"):\n frequency_list = load_frequencies()\n else:\n frequency_list = word_frequencies(word_list)\n save_frequencies(frequency_list)\n texts = categorize(book)\n\nwith open(\"final_texts.txt\", 'w', encoding='utf-8') as final_texts:\n for t in texts:\n text, level, kwords = t[0], t[1], \", \".join([remove_punctuation(w) for w in t[2]])\n final_texts.write(\"%s level.\\nKeywords: %s\\n%s\\n\" % (level, kwords, text))\n final_texts.write(\"--------------------------------------------------------------------------\\n\")","sub_path":"textgen.py","file_name":"textgen.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"124954625","text":"import requests\nimport execjs\nimport time\nimport re\nimport json\nfrom PIL import Image\nimport numpy as np\nimport random\n\n\nclass Geetest(object):\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36\"\n session = requests.session()\n\n def first_get_php(self):\n gt_url = f\"https://www.geetest.com/demo/gt/register-slide-official?t={int(time.time() * 1000)}\"\n gt_res = self.session.get(gt_url, headers={\"user-agent\": self.user_agent})\n php_data = gt_res.json()\n challenge = php_data.get(\"challenge\")\n gt = php_data.get(\"gt\")\n\n with open(\"./JS/get_w.js\", 'r', encoding=\"utf-8\")as f:\n js_content = f.read()\n ctx = execjs.compile(js_content)\n aes_key = ctx.call(\"jNfC\")\n\n # get.php\n with open(\"./JS/get_w.js\", 'r', encoding=\"utf-8\")as f:\n js_content = f.read()\n ctx = execjs.compile(js_content)\n new_w = ctx.call(\"generate\", challenge, gt, aes_key)\n\n url = \"https://api.geetest.com/get.php\"\n\n callback = f\"geetest_{int(time.time() * 1000)}\"\n\n params = {\n \"gt\": gt,\n \"challenge\": challenge,\n \"lang\": \"zh-cn\",\n \"pt\": \"0\",\n \"client_type\": \"web\",\n \"w\": new_w,\n \"callback\": callback\n }\n res = self.session.get(url, headers={\"User-agent\": self.user_agent, \"Host\": \"api.geetest.com\",\n \"Referer\": \"https://www.geetest.com/show\"}, params=params)\n\n json_data = res.text\n s = re.findall(f\"{callback}\\((.*)\\)\", json_data, re.S)[0]\n res_data = json.loads(s)\n\n return {\"res_data\": res_data, \"gt\": gt, \"challenge\": challenge, \"aes_key\": aes_key}\n\n def ajax_php(self, php_data):\n\n callback = f\"geetest_{int(time.time() * 1000)}\"\n url = \"https://api.geetest.com/ajax.php\"\n\n with open(\"./JS/get_w.js\", 'r', encoding=\"utf-8\")as f:\n js_content = f.read()\n ctx = execjs.compile(js_content)\n second_w = ctx.call(\"two_eval\", php_data[\"challenge\"], php_data[\"gt\"], php_data[\"res_data\"],\n php_data[\"aes_key\"])\n\n ajax_params = {\n \"gt\": php_data[\"gt\"],\n \"challenge\": php_data[\"challenge\"],\n \"lang\": \"zh-cn\",\n \"pt\": \"0\",\n \"client_type\": \"web\",\n \"w\": second_w,\n \"callback\": callback\n }\n ajax_res = self.session.get(url, headers={\"User-agent\": self.user_agent, \"Host\": \"api.geetest.com\",\n \"Referer\": \"https://www.geetest.com/show\"}, params=ajax_params)\n print(ajax_res.text)\n\n def second_php(self, php_data):\n callback = f\"geetest_{int(time.time() * 1000)}\"\n\n url = \"https://api.geetest.com/get.php\"\n data = {\n 'is_next': \"true\",\n 'type': 'slide3',\n 'gt': php_data[\"gt\"],\n 'challenge': php_data['challenge'],\n 'https': \"true\",\n 'protocol': 'https://',\n 'offline': 'false',\n 'product': 'embed',\n 'api_server': 'api.geetest.com',\n 'isPC': \"true\",\n 'width': '100%',\n 'callback': callback\n }\n\n response_img_json = self.session.get(url, headers={\"User-agent\": self.user_agent, \"Host\": \"api.geetest.com\",\n \"Referer\": \"https://www.geetest.com/show\"}, params=data)\n s = re.findall(f\"{callback}\\((.*)\\)\", response_img_json.text, re.S)[0]\n res_data = json.loads(s)\n print(res_data)\n return res_data\n\n @staticmethod\n def core(path, index):\n\n # js中获取到拼图还原的顺序\n position = [\n [157, 80, 167, 160], [145, 80, 155, 160], [265, 80, 275, 160],\n [277, 80, 287, 160], [181, 80, 191, 160], [169, 80, 179, 160],\n [241, 80, 251, 160], [253, 80, 263, 160], [109, 80, 119, 160],\n [97, 80, 107, 160], [289, 80, 299, 160], [301, 80, 311, 160],\n [85, 80, 95, 160], [73, 80, 83, 160], [25, 80, 35, 160],\n [37, 80, 47, 160], [13, 80, 23, 160], [1, 80, 11, 160],\n [121, 80, 131, 160], [133, 80, 143, 160], [61, 80, 71, 160],\n [49, 80, 59, 160], [217, 80, 227, 160], [229, 80, 239, 160],\n [205, 80, 215, 160], [193, 80, 203, 160], [145, 0, 155, 80],\n [157, 0, 167, 80], [277, 0, 287, 80], [265, 0, 275, 80],\n [169, 0, 179, 80], [181, 0, 191, 80], [253, 0, 263, 80],\n [241, 0, 251, 80], [97, 0, 107, 80], [109, 0, 119, 80],\n [301, 0, 311, 80], [289, 0, 299, 80], [73, 0, 83, 80],\n [85, 0, 95, 80], [37, 0, 47, 80], [25, 0, 35, 80],\n [1, 0, 11, 80], [13, 0, 23, 80], [133, 0, 143, 80],\n [121, 0, 131, 80], [49, 0, 59, 80], [61, 0, 71, 80],\n [229, 0, 239, 80], [217, 0, 227, 80], [193, 0, 203, 80],\n [205, 0, 215, 80]\n ]\n # js中获取到新图片的位置\n mm = [\n [0, 0], [10, 0], [20, 0], [30, 0],\n [40, 0], [50, 0], [60, 0], [70, 0],\n [80, 0], [90, 0], [100, 0], [110, 0],\n [120, 0], [130, 0], [140, 0], [150, 0],\n [160, 0], [170, 0], [180, 0], [190, 0],\n [200, 0], [210, 0], [220, 0], [230, 0],\n [240, 0], [250, 0], [0, 80], [10, 80],\n [20, 80], [30, 80], [40, 80], [50, 80],\n [60, 80], [70, 80], [80, 80], [90, 80],\n [100, 80], [110, 80], [120, 80], [130, 80],\n [140, 80], [150, 80], [160, 80], [170, 80],\n [180, 80], [190, 80], [200, 80], [210, 80],\n [220, 80], [230, 80], [240, 80], [250, 80]\n ]\n path_full = []\n for k, i in enumerate(path):\n img = Image.open(i)\n to_image = Image.new('RGB', (260, 160))\n for index, p in enumerate(position):\n cropped = img.crop(tuple(p)) # (left, upper, right, lower)\n to_image.paste(cropped, (mm[index][0], mm[index][1]))\n to_image.save(f'{k}.jpg')\n path_full.append(to_image)\n\n def is_similar_color(x_pixel, y_pixel):\n # 颜色对比\n if sum(np.abs(np.array(x_pixel) - np.array(y_pixel))) >= 200:\n return True\n\n # 计算距离\n def get_offset_distance(cut_image, full_image, index):\n for x in range(cut_image.width):\n for y in range(cut_image.height):\n cpx = cut_image.getpixel((x, y))\n fpx = full_image.getpixel((x, y))\n if is_similar_color(cpx, fpx):\n img = full_image.crop((x, y, x + 45, y + 40))\n # 保存一下计算出来位置图片,看看是不是缺口部分\n img.save(\"3.jpg\")\n # return [{\"x\": x+15, \"y\": random.randint(-3, 3)}]\n # return [{\"x\": x-7, \"y\": random.randint(-3, 3)}]\n return [{\"x\": x - 6, \"y\": random.randint(-3, 3)}]\n\n return get_offset_distance(path_full[0], path_full[1], index)\n\n\nif __name__ == '__main__':\n g = Geetest()\n get_data = g.first_get_php()\n g.ajax_php(get_data)\n g.second_php(get_data)\n","sub_path":"swipe_of_geetest/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"287539209","text":"\n\nfrom xai.brain.wordbase.nouns._nodule import _NODULE\n\n#calss header\nclass _NODULES(_NODULE, ):\n\tdef __init__(self,): \n\t\t_NODULE.__init__(self)\n\t\tself.name = \"NODULES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"nodule\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_nodules.py","file_name":"_nodules.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"452561011","text":"import argparse\nimport json\nimport requests\n\ndef check_performance(prometheus_addr, version, threshold):\n\n print('------- Performance Check -------')\n\n base_addr = 'http://{}/api/v1/query'.format(prometheus_addr)\n\n success_addr = '{}?query=canary_http_success_total'.format(base_addr)\n success_response = requests.get(success_addr)\n\n total_success = 0\n for result in success_response.json()['data']['result']:\n if result['metric']['pod'][:21] == 'canarylab-data-svc-{}'.format(version):\n total_success += int(result['value'][1])\n\n print('Total success: {}'.format(total_success))\n\n\n perf_addr = '{}?query=canary_response_duration_bucket'.format(base_addr)\n perf_response = requests.get(perf_addr)\n\n under_1000 = 0\n for result in perf_response.json()['data']['result']:\n if result['metric']['pod'][:21] == 'canarylab-data-svc-{}'.format(version):\n if result['metric']['le'] == '1000':\n under_1000 += int(result['value'][1])\n\n print('Under threshold: {}'.format(under_1000))\n\n if float(under_1000)/float(total_success) < threshold:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Check prometheus metrics for poor performance')\n parser.add_argument('prometheus_addr', help='IP address or DNS name of prometheus server')\n parser.add_argument('performance_threshold', type=float, help='Proportion of responses required to be 1000ms or less')\n parser.add_argement('--version', default='v2', help='Workload version to check')\n args = parser.parse_args()\n\n ok = check_performance(args.prometheus_addr, args.version, args.performance_threshold)\n if not ok:\n print('Performance threshold violated!')\n else:\n print('Everything is fine')\n\n","sub_path":"native/performance_check.py","file_name":"performance_check.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"496434359","text":"import cv2\nimport numpy as np\nimport scipy.io\nimport os.path\nfrom heatmappy import Heatmapper\nfrom PIL import Image\n'''\nThis is use for save tracking result video\nAnd save in \nvideo/camera1_result.avi\nvideo/camera2_result.avi\n...\nvideo/camera8_result.avi\n'''\n\nstart_time = [5543, 3607, 27244, 31182, 1, 22402, 18968, 46766]\nNumFrames = [359580, 360720, 355380, 374850, 366390, 344400, 337680, 353220]\nPartFrames = [[38370, 38370, 38400, 38670, 38370, 38400, 38790, 38370], [\n 38370, 38370, 38370, 38670, 38370, 38370, 38640, 38370\n], [38370, 38370, 38370, 38670, 38370, 38370, 38460,\n 38370], [38370, 38370, 38370, 38670, 38370, 38370, 38610, 38370], [\n 38370, 38370, 38370, 38670, 38370, 38400, 38760, 38370\n ], [38370, 38370, 38370, 38700, 38370, 38400, 38760,\n 38370], [38370, 38370, 38370, 38670, 38370, 38370, 38790, 38370],\n [38370, 38370, 38370, 38670, 38370, 38370, 38490, 38370],\n [38370, 38370, 38370, 38670, 38370, 37350, 28380,\n 38370], [14250, 15390, 10020, 26790, 21060, 0, 0, 7890]]\n\n\ndef calucate_part(icam, frame):\n sum_frame = 0\n for part_num in range(0, 10):\n previs_sum = sum_frame\n sum_frame += PartFrames[part_num][icam - 1]\n if sum_frame >= frame + 1:\n return part_num, frame - previs_sum\n\n\ndef load_mat():\n trajectory = scipy.io.loadmat(\n 'D:/Code/DeepCC/DeepCC/experiments/demo/L3-identities/L3Final_trajectories.mat'\n )\n data = trajectory['trackerOutputL3']\n return data\n\n\ndef simple_data(all_data):\n # store the np array foam data\n # [:, 0]: id / [:, 1]: start time in camera1 / ... / [:, 8]: start time in carera8\n # because it will cost many time\n # so i save the nparray to id_data.npy\n if os.path.isfile('data/id_data.npy'):\n id_data = np.load('data/id_data.npy')\n return id_data\n else:\n total_id = np.unique(all_data[:, 2])\n id_data = np.zeros((len(total_id), 9))\n for id_num in total_id:\n print(id_num)\n id_data[int(id_num) - 1, 0] = id_num\n data_new = [\n i for i in range(len(data)) if int(data[i, 2]) == id_num\n ]\n icam_check = np.unique(data[data_new, 0])\n for icam in range(1, 9):\n if icam in icam_check:\n id_data[int(id_num) - 1, int(icam)] = np.min(\n [data[i, 1] for i in data_new if data[i, 0] == icam])\n np.save('/data/id_data.npy', id_data)\n return id_data\n\n\ndef random_color(number_people):\n color = np.zeros((number_people + 1, 3))\n for i in range(0, number_people + 1):\n color[i] = list(np.random.choice(range(256), size=3))\n return color\n\n\ndata = load_mat()\nglobal data_part\ncolor = random_color(len(set(data[:, 2])))\nid_data = simple_data(data)\n\n\ndef show_video(icam, startFrame, endFrame):\n # only show video\n part_cam, part_frame = calucate_part(icam, startFrame)\n filename = 'D:/Code/DukeMTMC/videos/camera' + str(icam) + '/0000' + str(\n part_cam) + '.MTS'\n cap = cv2.VideoCapture(filename)\n part_cam_previous = part_cam\n cap.set(1, part_frame)\n for frame_num in range(startFrame, endFrame):\n part_cam, part_frame = calucate_part(icam, frame_num)\n if part_cam != part_cam_previous:\n filename = 'D:/Code/DukeMTMC/videos/camera' + str(\n icam) + '/0000' + str(part_cam) + '.MTS'\n cap = cv2.VideoCapture(filename)\n part_cam_previous = part_cam\n ret, frame_img = cap.read()\n frame_img = draw_bb(icam, frame_num, frame_img, startFrame)\n frame_img = cv2.resize(frame_img, (640, 360))\n cv2.imshow(\"video\", frame_img)\n cv2.waitKey(1)\n print(str(frame_num) + 'no')\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef find_index(icam, frame, startFrame):\n window_size_bb = 80\n window_size_heatmap = 8000\n find_ind_heat = [\n i for i in range(len(data_part))\n if data_part[i][1] <= frame and data_part[i][1] >= frame -\n window_size_heatmap and data_part[i][1] >= startFrame\n ]\n find_ind_bb = [\n i for i in find_ind_heat if data_part[i][1] >= frame - window_size_bb\n ]\n find_ind = [i for i in find_ind_bb if data_part[i][1] == frame]\n return find_ind_bb, find_ind_heat, find_ind, len(find_ind)\n\n\ndef draw_bb(icam, frame, img, startFrame, find_ind):\n # draw the bounding box\n for i in find_ind:\n color_id = tuple(color[int(data_part[i][2])])\n left_x = int(data_part[i][3])\n left_y = int(data_part[i][4])\n right_x = int(data_part[i][3] + data_part[i][5])\n right_y = int(data_part[i][4] + data_part[i][6])\n if data_part[i][1] == frame:\n id_num = int(data_part[i][2])\n duaration_s = int(\n int(data_part[i][1] - id_data[id_num - 1, icam]) / 60)\n label_text = str(int(data_part[i][2]))\n duaration_text = str(duaration_s)\n cv2.rectangle(img, (left_x, left_y), (right_x, right_y), color_id,\n 3)\n cv2.rectangle(img, (left_x - 3, left_y - 60),\n (right_x + 3, left_y), color_id, -1)\n cv2.putText(img, label_text, (left_x, left_y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2)\n cv2.putText(img, duaration_text, (left_x, left_y - 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2)\n cv2.circle(img, (int(data_part[i][3] + data_part[i][5] / 2), right_y),\n 7, color_id, -1)\n return img\n\n\ndef worldTomap(point_x, point_y):\n # get the map point\n image_points = np.array([[307.4323, 469.2366], [485.2483, 708.9507]])\n world_points = np.array([[0, 0], [24.955, 32.85]])\n diff = image_points[1] - image_points[0]\n scale = diff / world_points[1]\n trans = image_points[0]\n map_x = int(point_x * scale[0] + trans[0])\n map_y = int(point_y * scale[1] + trans[1])\n return map_x, map_y\n\n\ndef draw_traj(icam, frame, find_ind):\n # draw the 2d location in the map\n img = cv2.imread('D:/Code/DeepCC/DeepCC/src/visualization/data/map.jpg')\n for i in find_ind:\n color_id = tuple(color[int(data_part[i][2])])\n px, py = worldTomap(int(data_part[i][7]), int(data_part[i][8]))\n cv2.circle(img, (px, py), 7, color_id, -1)\n return img\n\n\ndef cal_heatmap(icam, frame, startFrame, find_ind):\n # draw the image for heatmap\n heatmap_value = []\n path = 'D:/Code/DeepCC/DeepCC/src/visualization/data/background' + str(\n icam) + '.jpg'\n background_img = Image.open(path)\n for i in find_ind:\n center_x = int(data_part[i][3] + (data_part[i][5] / 2))\n center_y = int(data_part[i][4] + (data_part[i][6] / 2))\n heatmap_value.append((center_x, center_y))\n heatmapper = Heatmapper()\n heatmap = heatmapper.heatmap_on_img(heatmap_value, background_img)\n img = cv2.cvtColor(np.asarray(heatmap), cv2.COLOR_RGB2BGR)\n return img\n\n\ndef cal_localtime(icam, frame_num):\n # get the real locat time\n start_sequence = 127720\n return start_sequence + frame_num - start_time[icam - 1] + 1\n\n\ndef main():\n startFrame_global = 0\n endFrame_global = 59820\n\n for icam in range(1, 9):\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out_filename = 'video/camera' + str(icam) + '_result.avi'\n out = cv2.VideoWriter(out_filename, fourcc, 60, (1018, 750))\n\n part_cam_previous = -1\n startFrame = cal_localtime(icam, startFrame_global)\n endFrame = cal_localtime(icam, endFrame_global)\n\n global data_part\n data_part = [\n data[i, :] for i in range(len(data)) if data[i, 0] == icam\n and data[i, 1] >= startFrame and data[i, 1] <= endFrame\n ]\n\n for current_frame in range(startFrame, endFrame):\n part_cam, part_frame = calucate_part(icam, current_frame)\n if current_frame == startFrame:\n filename = 'D:/Code/DukeMTMC/videos/camera' + str(\n icam) + '/0000' + str(part_cam) + '.MTS'\n cap = cv2.VideoCapture(filename)\n cap.set(1, part_frame)\n part_cam_previous = part_cam\n if part_cam != part_cam_previous:\n filename = 'D:/Code/DukeMTMC/videos/camera' + str(\n icam) + '/0000' + str(part_cam) + '.MTS'\n part_cam_previous = part_cam\n cap = cv2.VideoCapture(filename)\n ret, frame_img = cap.read()\n\n find_ind_bb, find_ind_heat, find_ind, num_visitor = find_index(\n icam, current_frame, startFrame)\n\n # get the bounding box and put to image box 1\n frame_img = draw_bb(icam, current_frame, frame_img, startFrame,\n find_ind_bb)\n frame_img = cv2.resize(frame_img, (640, 360))\n frame_img = cv2.copyMakeBorder(\n frame_img,\n 10,\n 10,\n 10,\n 10,\n cv2.BORDER_CONSTANT,\n value=[255, 255, 255])\n\n img_traj = draw_traj(icam, current_frame, find_ind)\n img_traj = cv2.resize(img_traj, (348, 730))\n img_traj = cv2.copyMakeBorder(\n img_traj,\n 10,\n 10,\n 0,\n 10,\n cv2.BORDER_CONSTANT,\n value=[255, 255, 255])\n\n img_heat = cal_heatmap(icam, current_frame, startFrame,\n find_ind_heat)\n img_heat = cv2.resize(img_heat, (640, 360))\n img_heat = cv2.copyMakeBorder(\n img_heat,\n 0,\n 10,\n 10,\n 10,\n cv2.BORDER_CONSTANT,\n value=[255, 255, 255])\n\n img_left = np.concatenate((frame_img, img_heat), axis=0)\n img = np.concatenate((img_left, img_traj), axis=1)\n\n cv2.imshow(\"video\", img)\n cv2.waitKey(1)\n print('icam = ' + str(icam))\n print('frame = ' + str(current_frame) + ' / ' + str(endFrame))\n out.write(img)\n\n out.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"UI/SaveVideo.py","file_name":"SaveVideo.py","file_ext":"py","file_size_in_byte":10323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"606295898","text":"import json\nfrom io import StringIO\nimport numpy as np\nimport math\nimport urllib.request\nimport pandas as pd\nimport requests\nfrom fastcluster import linkage as fast_linkage\nfrom scipy.cluster.hierarchy import cut_tree\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nimport collections\nfrom tqdm import tqdm\nimport time\nlink = \"https://docs.google.com/spreadsheets/d/e/2PACX-1vSPN_dljf2rU2xXgqFyYOaZ2taJSu0kveZ8Nhj9MWugYe-555ZNarMObD2RDXa50j2Mn4ALoGDqzcG_/pub?gid=561493728&single=true&output=csv\"\n\ndef form_data(type_data, data, total_data, trans_keywords, post_json):\n for index, item in enumerate(data):\n #print(item)\n count = item['term_count']\n year = item['year']\n keyword = trans_keywords[item['keyword']]\n #print(keyword)\n try:\n freq = item['freq']\n if year in post_json[\"row_col_src_rad\"][keyword]:\n post_json[\"row_col_src_rad\"][keyword][year][type_data] = item['quantile']\n else:\n post_json[\"row_col_src_rad\"][keyword][year] = {type_data: item['quantile']}\n except:\n if year in post_json[\"row_col_src_rad\"][keyword]:\n post_json[\"row_col_src_rad\"][keyword][year][type_data] = 0\n else:\n post_json[\"row_col_src_rad\"][keyword][year] = {type_data: 0}\n try:\n post_json[\"row_col_src_rad\"][keyword] = {k: post_json[\"row_col_src_rad\"][keyword][k] for k in post_json[\"col_order\"]}\n except:\n pass\n return post_json\n\n\ndef form_export_data(type_data, data, total_data):\n output_data_df = []\n for item in data:\n #print(item)\n for key in item:\n #print(item[key])\n for year in item[key]:\n #print(year)\n #print(total_data)\n try:\n output_data_df.append({'type': type_data,\n 'year': year['key_as_string'].split('-')[0],\n 'term_count': year['doc_count'],\n 'keyword': key,\n 'freq': year['doc_count'] / total_data[year['key_as_string'].split('-')[0]]})\n except KeyError:\n pass\n output_type_data_df = pd.DataFrame(output_data_df)\n #print(output_type_data_df.term_count.quantile([0.25, 0.5, 0.75]))\n\n quantiles = list(output_type_data_df.term_count.quantile([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]))\n\n quantile_value = {\"0.1\": quantiles[0],\n \"0.2\": quantiles[1],\n \"0.3\": quantiles[2],\n \"0.4\": quantiles[3],\n \"0.5\": quantiles[4],\n \"0.6\": quantiles[5],\n \"0.7\": quantiles[6],\n \"0.8\": quantiles[7],\n \"0.9\": quantiles[8],\n \"1\": quantiles[9]}\n\n quantile_size = {\"0.1\": 12,\n \"0.2\": 14,\n \"0.3\": 16,\n \"0.4\": 18,\n \"0.5\": 20,\n \"0.6\": 22,\n \"0.7\": 24,\n \"0.8\": 26,\n \"0.9\": 28,\n \"1\": 30\n }\n for item in output_data_df:\n if item['term_count'] >= quantile_value['1']:\n item['quantile'] = quantile_size['1']\n elif item['term_count'] >= quantile_value['0.9'] and item['term_count'] < quantile_value['1']:\n item['quantile'] = quantile_size['0.9']\n elif item['term_count'] >= quantile_value['0.8'] and item['term_count'] < quantile_value['0.9']:\n item['quantile'] = quantile_size['0.8']\n elif item['term_count'] >= quantile_value['0.7'] and item['term_count'] < quantile_value['0.8']:\n item['quantile'] = quantile_size['0.7']\n elif item['term_count'] >= quantile_value['0.6'] and item['term_count'] < quantile_value['0.7']:\n item['quantile'] = quantile_size['0.6']\n elif item['term_count'] >= quantile_value['0.5'] and item['term_count'] < quantile_value['0.6']:\n item['quantile'] = quantile_size['0.5']\n elif item['term_count'] >= quantile_value['0.4'] and item['term_count'] < quantile_value['0.5']:\n item['quantile'] = quantile_size['0.4']\n elif item['term_count'] >= quantile_value['0.3'] and item['term_count'] < quantile_value['0.4']:\n item['quantile'] = quantile_size['0.3']\n elif item['term_count'] >= quantile_value['0.2'] and item['term_count'] < quantile_value['0.3']:\n item['quantile'] = quantile_size['0.2']\n elif item['term_count'] >= quantile_value['0.1'] and item['term_count'] < quantile_value['0.2']:\n item['quantile'] = quantile_size['0.1']\n elif item['term_count'] <= quantile_value['0.1']:\n item['quantile'] = quantile_size['0.1']\n export_data = output_data_df\n return export_data\n\n\ndef get_data(url, json):\n result = None\n while result is None:\n try:\n result = requests.get(url, json=json, headers={'Content-Type': 'application/json'})\n except (TimeoutError, RuntimeError, requests.exceptions.ConnectionError):\n # sleep for a bit in case that helps\n time.sleep(1)\n # try again\n result = get_data(url, json=json)\n if result.status_code == 502:\n result = get_data(url,\n json=json)\n return result\n\n\n\nwith requests.Session() as s:\n download = s.get(link)\n decoded_content = download.content.decode('utf-8')\n if decoded_content:\n raw_data = pd.read_csv(StringIO(decoded_content))\n\n\nmd_query_template = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"pubdate\",\n \"interval\": \"1y\",\n \"time_zone\": \"Europe/Minsk\",\n \"min_doc_count\": 0\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"parse_date\",\n \"format\": \"date_time\"\n },\n {\n \"field\": \"pubdate\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"pubdate\": {\n \"gte\": 1082884662175,\n \"lte\": 1555942124107,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"filter\": [\n {\n \"bool\": {\n \"should\": [\n {\n \"match_phrase\": {\n \"article_body.en\": '\"{}\"'\n }\n }\n ],\n \"minimum_should_match\": 1\n }\n }\n ],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\npt_query_template = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"application_date\",\n \"interval\": \"1y\",\n \"time_zone\": \"Europe/Minsk\",\n \"min_doc_count\": 0\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"application_date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"application_date\": {\n \"gte\": 1082884662175,\n \"lte\": 1555942124107,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"filter\": [\n {\n \"bool\": {\n \"should\": [\n {\n \"match_phrase\": {\n \"abstract.en\": '\"{}\"'\n }\n }\n ],\n \"minimum_should_match\": 1\n }\n }\n ],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\nsci_query_template = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"pubyear\",\n \"interval\": \"1y\",\n \"time_zone\": \"Europe/Minsk\",\n \"min_doc_count\": 1\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"issued\",\n \"format\": \"date_time\"\n },\n {\n \"field\": \"pubyear\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"range\": {\n \"pubyear\": {\n \"gte\": 1082884662175,\n \"lte\": 1556037832971,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"filter\": [\n {\n \"bool\": {\n \"should\": [\n {\n \"match_phrase\": {\n \"abstract.en\": '\"{}\"'\n }\n }\n ],\n \"minimum_should_match\": 1\n }\n }\n ],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\nsci_total_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"pubyear\",\n \"interval\": \"1y\",\n \"time_zone\": \"Europe/Minsk\",\n \"min_doc_count\": 1\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"issued\",\n \"format\": \"date_time\"\n },\n {\n \"field\": \"pubyear\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n },\n {\n \"range\": {\n \"pubyear\": {\n \"gte\": 1209476834566,\n \"lte\": 1556549234566,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\nmd_total_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"pubdate\",\n \"interval\": \"1y\",\n \"time_zone\": \"Europe/Minsk\",\n \"min_doc_count\": 0\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"parse_date\",\n \"format\": \"date_time\"\n },\n {\n \"field\": \"pubdate\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n },\n {\n \"range\": {\n \"pubdate\": {\n \"gte\": 1082884662175,\n \"lte\": 1555942124107,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\npt_total_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"application_date\",\n \"interval\": \"1y\",\n \"time_zone\": \"Europe/Minsk\",\n \"min_doc_count\": 1\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"application_date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match_all\": {}\n },\n {\n \"range\": {\n \"application_date\": {\n \"gte\": 1241012983451,\n \"lte\": 1556549383451,\n \"format\": \"epoch_millis\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\nkeywords = raw_data['keyword']\ntranslate_dict = {}\nfor record in raw_data.to_dict(orient='index').values():\n translate_dict[record['keyword']] = record['label']\n\n\narea_keywords = dict(list())\nfor record in raw_data.to_dict(orient='index').values():\n if not record['area'] in area_keywords:\n area_keywords[record['area']] = []\n area_keywords[record['area']].append(record['keyword'])\n\n#print(area_keywords)\n\ndf_md = []\ndf_sci = []\ndf_pt = []\n\nr_md_total = get_data(url='http://172.18.207.5:9200/*md_*/_search', json=md_total_query)\nr_sci_total = get_data(url='http://172.18.207.5:9200/sti_mag/_search', json=sci_total_query)\nr_pt_total = get_data(url='http://172.18.207.5:9200/sti_patents_patstat3/_search', json=pt_total_query)\n\nif r_md_total.status_code == 200:\n md = json.loads(r_md_total.text)['aggregations']['2']['buckets']\nelse:\n print(r_md_total.reason)\nif r_sci_total.status_code == 200:\n sci = json.loads(r_sci_total.text)['aggregations']['2']['buckets']\nelse:\n print(r_sci_total.reason)\nprint(json.loads(r_pt_total.text))\nif r_pt_total.status_code == 200:\n pt = json.loads(r_pt_total.text)['aggregations']['2']['buckets']\nelse:\n print(r_pt_total.reason)\n\n\ntotal_md = {}\ntotal_pt = {}\ntotal_sci = {}\n\nfor item in md:\n total_md[item['key_as_string'].split('-')[0]] = item['doc_count']\n\nfor item in pt:\n total_pt[item['key_as_string'].split('-')[0]] = item['doc_count']\n\nfor item in sci:\n total_sci[item['key_as_string'].split('-')[0]] = item['doc_count']\n\n\n\n\n\n\n\n#B = keywords[:len(keywords)//2]\n#C = keywords[len(keywords)//2:]\n\nfor area in area_keywords.keys():\n post_json = {}\n post_json[\"show_bars\"] = 0\n post_json[\"show_title\"] = 0\n post_json[\"bar_stroke_width\"] = 0\n post_json[\"legend_label_size\"] = 18\n post_json[\"src_stroke_darken\"] = 30\n post_json[\"rowbar_stroke_darken\"] = 0\n post_json[\"bar_label_size\"] = 25\n post_json[\"rad_multiplier\"] = 1\n post_json[\"row_label_size\"] = 20\n post_json[\"colw\"] = 180\n post_json[\"title_rel_size\"] = 0.03333333333333333\n post_json[\"bar_width_coeff\"] = 6\n post_json[\"title_text\"] = \"Диаграмма жизненного цикла\"\n post_json[\"line_stroke\"] = \"none\"\n post_json[\"rowbar_fill\"] = {}\n post_json[\"show_legend\"] = 1\n post_json[\"rowbar_label\"] = {}\n post_json[\"rowbar_width\"] = {}\n post_json[\"title_h_prop\"] = 0.02\n post_json[\"legend_h_prop\"] = 0.07692307692307693\n post_json[\"rowbar_stroke\"] = {}\n post_json[\"src_label\"] = {\n \"md\": \"Рыночная аналитика\",\n \"pt\": \"Патенты\",\n \"wos\": \"Научные статьи\"\n }\n post_json[\"src_order\"] = [\n \"wos\",\n \"pt\",\n \"md\"\n ]\n post_json[\"src_stroke\"] = {\n \"md\": \"#ea9293\",\n \"pt\": \"#96cf96\",\n \"wos\": \"#ffbe86\"\n }\n post_json[\"col_label\"] = {\n \"2009\": \"2009\",\n \"2010\": \"2010\",\n \"2011\": \"2011\",\n \"2012\": \"2012\",\n \"2013\": \"2013\",\n \"2014\": \"2014\",\n \"2015\": \"2015\",\n \"2016\": \"2016\",\n \"2017\": \"2017\",\n \"2018\": \"2018\"\n }\n post_json[\"col_order\"] = [\n \"2009\",\n \"2010\",\n \"2011\",\n \"2012\",\n \"2013\",\n \"2014\",\n \"2015\",\n \"2016\",\n \"2017\",\n \"2018\"\n ]\n post_json[\"src_fill\"] = {\n \"md\": \"#ea9293\",\n \"pt\": \"#96cf96\",\n \"wos\": \"#ffbe86\"\n }\n post_json[\"row_order\"] = []\n post_json[\"row_label\"] = {}\n post_json[\"row_col_src_rad\"] = {}\n export_md = []\n export_pt = []\n export_sci = []\n print(area)\n B = area_keywords[area][:len(area_keywords[area]) // 2]\n C = area_keywords[area][len(area_keywords[area]) // 2:]\n print(B)\n for keyword in C:\n #print(keyword, translate_dict[keyword])\n post_json[\"row_order\"].append(translate_dict[keyword])\n post_json[\"row_label\"][translate_dict[keyword]] = translate_dict[keyword]\n post_json[\"row_col_src_rad\"][translate_dict[keyword]] = {}\n md_query_template['query']['bool']['filter'][0]['bool']['should'][0]['match_phrase']['article_body.en'] = keyword.replace(\"_\", \" \").lower()\n sci_query_template['query']['bool']['filter'][0]['bool']['should'][0]['match_phrase']['abstract.en'] = keyword.replace(\"_\", \" \").lower()\n pt_query_template['query']['bool']['filter'][0]['bool']['should'][0]['match_phrase']['abstract.en'] = keyword.replace(\"_\", \" \").lower()\n\n r_md = get_data(url='http://172.18.207.5:9200/*md_*/_search', json=md_query_template)\n r_sci = get_data(url='http://172.18.207.5:9200/sti_mag/_search', json=sci_query_template)\n r_pt = get_data(url='http://172.18.207.5:9200/sti_patents_patstat3/_search', json=pt_query_template)\n if r_md.status_code == 200:\n md_data = json.loads(r_md.text)['aggregations']['2']['buckets']\n export_md.append({keyword: md_data})\n #post_json = form_data(type_data='md', data=md_data, total_data=total_md, keyword=translate_dict[keyword],\n # post_json=post_json)\n else:\n print(r_md.reason)\n if r_sci.status_code == 200:\n sci_data = json.loads(r_sci.text)['aggregations']['2']['buckets']\n export_sci.append({keyword: sci_data})\n #post_json = form_data(type_data='wos', data=sci_data, total_data=total_sci, keyword=translate_dict[keyword],\n # post_json=post_json)\n else:\n print(r_sci.reason)\n if r_pt.status_code == 200:\n pt_data = json.loads(r_pt.text)['aggregations']['2']['buckets']\n export_pt.append({keyword: pt_data})\n #post_json = form_data(type_data='pt', data=pt_data, total_data=total_pt, keyword=translate_dict[keyword], post_json=post_json)\n else:\n print(r_pt.reason)\n\n formed_md = form_export_data(\"md\", export_md, total_md)\n formed_sci = form_export_data(\"sci\", export_sci, total_sci)\n formed_pt = form_export_data(\"pt\", export_pt, total_pt)\n post_json = form_data(type_data='md', data=formed_md, total_data=total_md, trans_keywords=translate_dict,\n post_json=post_json)\n #print(post_json)\n post_json = form_data(type_data='wos', data=formed_sci, total_data=total_sci, trans_keywords=translate_dict,\n post_json=post_json)\n post_json = form_data(type_data='pt', data=formed_pt, total_data=total_pt, trans_keywords=translate_dict,\n post_json=post_json)\n #print(formed_md)\n #print(formed_sci)\n #print(formed_pt)\n\n with open('not_working_lcd_json.txt', 'w') as file:\n file.write(json.dumps(post_json))\n\n\n req_svg = requests.post(url='http://172.18.207.36:5100/q/a190404_lifecycle_draw?_by=alexander',json=post_json)\n #print(req_svg.text)\n #print(r.project_develop_folder + '/export/' + 'life_cycle_diagram.svg')\n\n\n\n with open('/home/aaleshkevich/Elastic/gazprom_life_cycle_diagram_{}2.svg'.format(area), 'w') as file:\n file.write(req_svg.text)\n\n\n#print(output_data_df)\n#output_type_data_df = pd.DataFrame(output_data_df)\n#print(output_type_data_df)\n#output_type_data_df.to_csv('total_term_stat_gazprom_lcd.csv', sep=';', encoding='utf-8')\n","sub_path":"lfc/lfc_md_sci_pt.py","file_name":"lfc_md_sci_pt.py","file_ext":"py","file_size_in_byte":17874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"413719023","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\n# from sklearn.externals import joblib\nimport joblib\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score # 交叉检验\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom tools import file_tools\n# 中文显示\nplt.rcParams['font.family'] = ['sans-serif']\nplt.rcParams['font.sans-serif'] = ['SimHei']\n\n\ndef rmse(y_true, y_pred):\n return np.sqrt(mean_squared_error(y_true, y_pred))\n\n\ndef build_svr(ID,season,predict_day,time,type,data_path='data/obp/',\n models_save_path='models/svr/',\n images_save_path='images/svr/'):\n '''\n Parameters\n ----------\n ID : string\n 要建模的站点\n season : string\n 要建模的季节(3-4)\n predict_day : int\n 要预测的天数\n time : string\n 要预测的小时(08)\n data_path : string\n 路径,用来训练svr的文件路径,包含10UV,msl,obp等特征\n models_save_path : string\n svr模型的保存路径\n images_save_path : string\n 图片的保存路径\n Returns\n -------\n None.\n '''\n FILE_PATH = data_path+str(predict_day)+'天/'+season+'/'+time+'/'+type+'/'+ID+'_p.csv'\n \n orgin_data = pd.read_csv(FILE_PATH)\n \n # 分trian,test\n index = int(len(orgin_data)*0.9)\n columns_list = ['MSL',type,'ob_p']\n # x_train, x_test, y_train, y_test = train_test_split(orgin_data[columns_list], orgin_data['ob'], test_size=0.2,random_state=113)\n x_train = orgin_data[columns_list][:index]\n x_test = orgin_data[columns_list][index:]\n y_train = orgin_data['ob'][:index]\n y_test = orgin_data['ob'][index:]\n \n # 归一化\n min_max_scaler = MinMaxScaler()\n x_train_scaler = min_max_scaler.fit_transform(x_train)\n x_test_scaler = min_max_scaler.fit_transform(x_test)\n \n # 训练模型,并保存\n model = SVR(kernel='rbf')\n model.fit(x_train_scaler, y_train)\n model_save_path = models_save_path+season+'/'+str(predict_day)+'天/'+time+'/'+type+'/'\n file_tools.check_dir_and_mkdir(model_save_path)\n joblib.dump(model, model_save_path+ID+'.pkl')\n \n # 绘制训练Loss图\n # plot_learning_curves(model,x_train_scaler,y_train)\n \n predictions = model.predict(x_test_scaler)\n \n \n # 检查文件夹路径\n dir_path = images_save_path+ID+'/'\n file_tools.check_dir_and_mkdir(dir_path)\n \n # 画图\n X_label = []\n for i in range(predictions.shape[0]):\n X_label.append(i)\n plt.figure(figsize=(10,3))\n plt.plot(X_label, predictions,'r',label='预测结果')\n plt.plot(X_label, y_test,'black',label='理想结果')\n plt.plot(X_label, x_test[type],'g--',label='ec')\n plt.title(ID+' '+season+' '+str(predict_day))\n plt.legend()\n plt.savefig(dir_path+ID+'_'+season+'_'+str(predict_day)+'.png')\n # plt.show()\n\n \ndef svr_predict(ID,data,season,predict_day,time,type,models_save_path='models/svr/'):\n '''\n Parameters\n ----------\n ID : string\n 要建模的站点\n data : dataframe\n 成型的obp文件\n season : string\n 要建模的季节(3-4)\n predict_day : int\n 要预测的天数\n time : string\n 要预测的小时(08)\n models_save_path : string\n svr模型的保存路径\n -------\n Returns : dataframe\n 预测结果\n '''\n \n orgin_data = data\n \n columns_list = ['MSL',type,'ob_p']\n # x_train, x_test, y_train, y_test = train_test_split(orgin_data[columns_list], orgin_data['ob'], test_size=0.2,random_state=113)\n x = orgin_data[columns_list]\n \n # 归一化\n min_max_scaler = MinMaxScaler()\n x_train_scaler = min_max_scaler.fit_transform(x)\n \n # 加载模型\n model_save_path = models_save_path+season+'/'+str(predict_day)+'天/'+time+'/'+type+'/'+ID+'.pkl'\n model = joblib.load(model_save_path)\n \n predictions = model.predict(x)\n \n return predictions\n","sub_path":"build_model/svr_model.py","file_name":"svr_model.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"55238695","text":"import logging\r\nimport sqlite3\r\nimport os\r\n\r\nclass old:\r\n def __init__(self, logger: logging.Logger = None):\r\n self.logger = logger if logger is not None else logging.getLogger(\"db class\")\r\n\r\n dir_db = os.path.abspath(os.curdir)\r\n self.conn = sqlite3.connect(f'{dir_db}/snowwhitebot.db')\r\n self.c = self.conn.cursor()\r\n sql = '''CREATE TABLE IF NOT EXISTS chat_members (id integer primary key, chat_id integer, user_id integer, user_name text)'''\r\n self.c.execute(sql)\r\n self.conn.commit()\r\n\r\n sql = '''CREATE TABLE IF NOT EXISTS images (id integer primary key, images_key text)'''\r\n self.c.execute(sql)\r\n self.conn.commit()\r\n\r\n sql = '''CREATE TABLE IF NOT EXISTS chats (id integer primary key, chat_id integer, chat_name text)'''\r\n self.c.execute(sql)\r\n self.conn.commit()\r\n\r\n sql = '''CREATE TABLE IF NOT EXISTS questions (id integer primary key, message_id integer, author_id integer, question_text text)'''\r\n self.c.execute(sql)\r\n self.conn.commit()\r\n\r\n sql = '''CREATE TABLE IF NOT EXISTS answers (id integer primary key, question_id integer, message_id integer, answer_text text, points integer default 1)'''\r\n self.c.execute(sql)\r\n self.conn.commit()\r\n\r\n sql = '''CREATE TABLE IF NOT EXISTS question_votes (id integer primary key, question_id integer, answer_id integer, user_id integer, points integer default 1)'''\r\n self.c.execute(sql)\r\n self.conn.commit()\r\n\r\n def insert_chat(self, chat_id, chat_name):\r\n \"\"\"Метод добавления чата в БД\r\n\r\n Keyword arguments:\r\n chat_id -- id чата\r\n chat_name -- название чата\r\n\r\n \"\"\"\r\n\r\n self.logger.info('insert_chat')\r\n\r\n sql = '''INSERT INTO chats(chat_id, chat_name) VALUES(?, ?)'''\r\n self.c.execute(sql, (chat_id, chat_name))\r\n self.conn.commit()\r\n\r\n self.logger.info(f'chat_id: {chat_id}')\r\n self.logger.info(f'chat_name: {chat_name}')\r\n self.logger.info(self.c.lastrowid)\r\n\r\n return self.c.lastrowid\r\n\r\n def select_chat_by_chat_id(self, chat_id):\r\n \"\"\"Метод поиск чата по id чата, не путать id бд\r\n\r\n Keyword arguments:\r\n chat_id -- id чата\r\n\r\n \"\"\"\r\n self.logger.info('select_chat_by_chat_id')\r\n\r\n sql = '''SELECT * FROM chats WHERE chat_id = ?'''\r\n self.c.execute(sql, (chat_id,))\r\n\r\n self.logger.info(f'chat_id: {chat_id}')\r\n self.logger.info(self.c.fetchone())\r\n\r\n return self.c.fetchone()\r\n\r\n def get_chats(self):\r\n \"\"\"Метод получения данных всех добавленных чатов\"\"\"\r\n self.logger.info('get_chats')\r\n\r\n sql = '''SELECT * FROM chats'''\r\n self.c.execute(sql)\r\n return self.c.fetchall()\r\n\r\n def insert_chat_members_data(self, chat_id, user_id, user_name):\r\n sql = '''INSERT INTO chat_members(chat_id, user_id, user_name) VALUES(?, ?, ?)'''\r\n self.c.execute(sql, (chat_id, user_id, user_name))\r\n self.conn.commit()\r\n\r\n return self.c.lastrowid\r\n\r\n def select_chat_members_by_user_id(self, chat_id, user_id):\r\n sql = '''SELECT * FROM chat_members WHERE chat_id = ? AND user_id = ?'''\r\n self.c.execute(sql, (chat_id, user_id))\r\n return self.c.fetchone()\r\n\r\n def select_chat_members_by_chat_id(self, chat_id):\r\n sql = '''SELECT * FROM chat_members WHERE chat_id = ?'''\r\n self.c.execute(sql, (chat_id,))\r\n return self.c.fetchall()\r\n\r\n def insert_question(self, question_text, message_id, author_id):\r\n sql = '''INSERT INTO questions('author_id', 'message_id', 'question_text') VALUES (?, ?, ?)'''\r\n self.c.execute(sql, (author_id, message_id, question_text))\r\n self.conn.commit()\r\n return self.c.lastrowid\r\n\r\n def insert_answer(self, question_id, message_id, answer_text):\r\n try:\r\n sql = '''INSERT INTO answers('question_id','message_id', 'answer_text') VALUES (?, ?, ?)'''\r\n self.c.execute(sql, (question_id, message_id, answer_text))\r\n self.conn.commit()\r\n return self.c.lastrowid\r\n except sqlite3.Error as e:\r\n print(type(e).__name__)\r\n\r\n def select_question_answers(self, question_id):\r\n try:\r\n sql = '''SELECT * FROM answers WHERE question_id = ?'''\r\n self.c.execute(sql, (question_id,))\r\n return self.c.fetchall()\r\n except sqlite3.Error as e:\r\n print(type(e).__name__)\r\n\r\n def select_question_by_message_id(self, message_id):\r\n sql = '''SELECT * FROM questions WHERE message_id = ?'''\r\n self.c.execute(sql, (message_id,))\r\n return self.c.fetchone()\r\n\r\n def select_answer_by_message_id(self, message_id):\r\n sql = '''SELECT * FROM answers WHERE message_id = ?'''\r\n self.c.execute(sql, (message_id,))\r\n return self.c.fetchone()\r\n\r\n def update_answer_points_by_id(self, answer_id, points):\r\n sql = '''UPDATE answers SET points = ? WHERE id = ?'''\r\n self.c.execute(sql, (points, answer_id))\r\n\r\n def get_answer_points_by_id(self, answer_id):\r\n sql = '''SELECT points FROM answers WHERE id = ?'''\r\n self.c.execute(sql, (answer_id,))\r\n return self.c.fetchone()\r\n\r\n def select_questions_by_author(self, author_id):\r\n sql = '''SELECT * FROM questions WHERE author_id = ?'''\r\n self.c.execute(sql, (author_id,))\r\n return self.c.fetchall()\r\n\r\n def select_questions_not_votes(self, user_id):\r\n sql = '''\r\n SELECT * FROM questions WHERE id NOT IN (SELECT question_id FROM question_votes WHERE user_id = ?)\r\n '''\r\n\r\n def select_random_questions(self, user_id):\r\n sql = '''\r\n SELECT * FROM questions WHERE id NOT IN (SELECT question_id FROM question_votes WHERE user_id = ?) ORDER BY RANDOM() LIMIT 1\r\n '''\r\n try:\r\n self.c.execute(sql, (user_id,))\r\n\r\n except sqlite3.Error as e:\r\n print(e)\r\n\r\n return self.c.fetchall()\r\n\r\n def ckeck_answer_user_vote(self, question_id, answer_id, user_id):\r\n sql = '''SELECT * FROM question_votes WHERE question_id = ? AND user_id = ?'''\r\n self.c.execute(sql, (question_id, user_id))\r\n return self.c.fetchone()\r\n\r\n def add_answer_vote(self, question_id, answer_id, user_id, points=1):\r\n try:\r\n sql = '''INSERT INTO question_votes('question_id','answer_id', 'user_id', 'points') VALUES (?, ?, ?, ?)'''\r\n self.c.execute(sql, (question_id, answer_id, user_id, points))\r\n self.conn.commit()\r\n return self.c.lastrowid\r\n except sqlite3.Error as e:\r\n print(type(e).__name__)\r\n print(\"Database error: %s\" % e)\r\n","sub_path":"db/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":6908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"262239229","text":"\"\"\"\nThis file contains examples of batch processor implementations, which basically\ncreate entries arbitrarily. The processors here are useful as placeholders and\ntest cases.\n\n\"\"\"\nfrom typing import Dict, Optional, Type\n\nimport numpy as np\n\nfrom forte.data.data_pack import DataPack\nfrom forte.common.types import DataRequest\nfrom forte.data.batchers import ProcessingBatcher, FixedSizeDataPackBatcher\nfrom forte.processors.base import BatchProcessor\nfrom ft.onto.base_ontology import Token, Sentence, EntityMention, RelationLink\n\n__all__ = [\n \"DummyRelationExtractor\",\n]\n\n\nclass DummyRelationExtractor(BatchProcessor):\n \"\"\"\n A dummy relation extractor.\n\n Note that to use :class:`DummyRelationExtractor`, the :attr:`ontology` of\n :class:`Pipeline` must be an ontology that includes\n ``ft.onto.base_ontology.Sentence``.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.define_context()\n self.batch_size = 4\n\n def define_batcher(self) -> ProcessingBatcher:\n # pylint: disable=no-self-use\n return FixedSizeDataPackBatcher()\n\n def define_context(self) -> Type[Sentence]:\n # pylint: disable=no-self-use\n return Sentence\n\n def _define_input_info(self) -> DataRequest:\n # pylint: disable=no-self-use\n input_info: DataRequest = {\n Token: [],\n EntityMention: {\"fields\": [\"ner_type\", \"tid\"]}\n }\n return input_info\n\n def predict(self, data_batch: Dict): # pylint: disable=no-self-use\n entities_span = data_batch[\"EntityMention\"][\"span\"]\n entities_tid = data_batch[\"EntityMention\"][\"tid\"]\n\n pred: Dict = {\n \"RelationLink\": {\n \"parent.tid\": [],\n \"child.tid\": [],\n \"rel_type\": [],\n }\n }\n for tid, entity in zip(entities_tid, entities_span):\n parent = []\n child = []\n rel_type = []\n\n entity_num = len(entity)\n for i in range(entity_num):\n for j in range(i + 1, entity_num):\n parent.append(tid[i])\n child.append(tid[j])\n rel_type.append(\"dummy_relation\")\n\n pred[\"RelationLink\"][\"parent.tid\"].append(\n np.array(parent))\n pred[\"RelationLink\"][\"child.tid\"].append(\n np.array(child))\n pred[\"RelationLink\"][\"rel_type\"].append(\n np.array(rel_type))\n\n return pred\n\n def pack(self, data_pack: DataPack, output_dict: Optional[Dict] = None):\n # pylint: disable=no-self-use\n \"\"\"Add corresponding fields to data_pack\"\"\"\n if output_dict is None:\n return\n\n for i in range(len(output_dict[\"RelationLink\"][\"parent.tid\"])):\n for j in range(len(output_dict[\"RelationLink\"][\"parent.tid\"][i])):\n link = RelationLink(data_pack)\n link.rel_type = output_dict[\"RelationLink\"][\"rel_type\"][i][j]\n parent: EntityMention = data_pack.get_entry( # type: ignore\n output_dict[\"RelationLink\"][\"parent.tid\"][i][j])\n link.set_parent(parent)\n child: EntityMention = data_pack.get_entry( # type: ignore\n output_dict[\"RelationLink\"][\"child.tid\"][i][j])\n link.set_child(child)\n data_pack.add_or_get_entry(link)\n","sub_path":"forte/processors/dummy_batch_processor.py","file_name":"dummy_batch_processor.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"186889203","text":"\r\nb = []\r\nstack = []\r\ncount = 0\r\ndef findIslands(a,n,m):\r\n global stack,count,b\r\n count = 0\r\n b = []\r\n stack = []\r\n def neibhours(i,j,l1,l2):\r\n global stack,count,b\r\n all_nei = [[i-1,j-1],[i-1,j],[i-1,j+1],[i,j-1],[i,j+1],[i+1,j-1],[i+1,j],[i+1,j+1]]\r\n b[i][j] = \"t\"\r\n for k in all_nei:\r\n if(k[0]>=0 and k[1]>=0 and k[0]', '', '', '', ''\n]\n\ndef preprocess_sentence(sentence):\n output_words = []\n output_punctuation_marks = []\n\n id_ = sentence.split()[0]\n words = sentence.split()[1:]\n for (word, punctuation_mark) in zip(words, words[1:] + [None]):\n if word in PUNCTUATION_MARKS:\n continue\n\n if punctuation_mark not in PUNCTUATION_MARKS:\n punctuation_mark = \"\"\n\n output_words.append(word)\n output_punctuation_marks.append(punctuation_mark)\n\n return [id_,\n \" \".join(output_words),\n \" \".join(output_punctuation_marks)\n ]\n\n\n # return [\n # \" %s \" % \" \".join(output_words),\n # \" %s \" % \" \".join(output_punctuation_marks)\n # ]\n\ndef create_dataset(path):\n lines = io.open(path, encoding='UTF-8').read().strip().split('\\n')\n word_pairs = [preprocess_sentence(l) for l in lines]\n return list(zip(*word_pairs))\n\ndef tokenize(lang):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\n lang_tokenizer.fit_on_texts(lang)\n # fit_on_texts\n print('lang_tokenizer.word_index',lang_tokenizer.word_index)\n print('lang_tokenizer.index_word',lang_tokenizer.index_word)\n\n\n tensor = lang_tokenizer.texts_to_sequences(lang)\n\n return tensor, lang_tokenizer\n\ndef load_dataset(path):\n id_,w,p = create_dataset(path)\n inp, targ = list(w),list(p)\n # input_tensor, inp_lang_tokenizer = tokenize(inp_lang)\n # target_tensor, targ_lang_tokenizer = tokenize(targ_lang)\n\n return inp,targ\n # input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer\n\n\n\ntrain_inp, train_targ = load_dataset('train.txt')\ndev_inp, dev_targ = load_dataset('dev.txt')\ntotal_inp = train_inp + dev_inp\ntotal_targ = train_targ + dev_targ\n\ninp_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\ninp_tokenizer.fit_on_texts(total_inp)\n\ntarg_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\ntarg_tokenizer.fit_on_texts(total_targ)\n\n\ntrain_input_tensor = inp_tokenizer.texts_to_sequences(train_inp)\ntrain_target_tensor = targ_tokenizer.texts_to_sequences(train_targ)\n\n\ndev_input_tensor = inp_tokenizer.texts_to_sequences(dev_inp)\ndev_target_tensor = targ_tokenizer.texts_to_sequences(dev_targ)\n\n\nprint(inp_tokenizer.index_word)\nprint(targ_tokenizer.index_word)\n\n# train_input_tensor, train_target_tensor, train_inp_lang, train_targ_lang = load_dataset('train.txt')\n# dev_input_tensor, dev_target_tensor, dev_inp_lang, dev_targ_lang = load_dataset('dev.txt')\n\nprint(len(train_input_tensor))\nprint(len(dev_input_tensor))\nprint(len(list(inp_tokenizer.word_index.keys())))\nprint(len(list(targ_tokenizer.word_index.keys())))\n\ndef convert(lang, tensor):\n for t in tensor:\n # print(t)\n if t!=0:\n print (\"%d ----> %s\" % (t, lang.index_word[int(t)]))\n\nprint (\"Input Language; index to word mapping\")\n# convert(dev_inp_lang, dev_input_tensor[143])\nconvert(inp_tokenizer, train_input_tensor[143])\n# print(convert(train_inp_lang, dev_input_tensor[143]))\nprint ()\nprint (\"Target Language; index to word mapping\")\nconvert(targ_tokenizer, train_target_tensor[143])\n\n\n\ndef bucketer(input_ds,target_ds,BATCH_SIZE):\n \n\n BUFFER_SIZE = 128\n # BATCH_SIZE = 64\n\n \n lengths = []\n for i in range(len(target_ds)):\n lengths.append(len(target_ds[i]))\n print(max(lengths))\n \n \n\n buckets = list(range(5, max(lengths)+6, 5))\n print(buckets)\n batch_sizes = [BATCH_SIZE] * (len(buckets)+1)\n print(batch_sizes)\n \n \n def generator():\n for i in range(len(input_ds)):\n yield np.array(input_ds[i]),(target_ds[i])\n \n\n \n dataset = tf.data.Dataset.from_generator(generator, (tf.int64, tf.int64), ([None], [None]))\n dataset = dataset.shuffle(BUFFER_SIZE)\n dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length(\n lambda input_ds, target_ds: tf.size(input_ds), \n buckets,\n batch_sizes,\n drop_remainder=True,\n pad_to_bucket_boundary=False))\n \n c=0\n for i in dataset:\n # print(ds[i])\n print(tf.shape(i[0]), tf.shape(i[1]))\n c+=1\n \n if c == 5:\n break\n return dataset\n\nBATCH_SIZE = 64\ntrain_dataset = bucketer(train_input_tensor,train_target_tensor,BATCH_SIZE)\nval_dataset = bucketer(dev_input_tensor,dev_target_tensor,BATCH_SIZE)\n\nembedding_dim = 256\nprint(train_dataset.reduce(0, lambda x, _: x + 1).numpy())\nprint(val_dataset.reduce(0, lambda x, _: x + 1).numpy())\n\n\n\nclass StatefulF1(Metric):\n def __init__(self, name='stateful_f1', **kwargs):\n # initializing an object of the super class\n super(StatefulF1, self).__init__(name=name, **kwargs)\n\n # initializing state variables\n self.c = self.add_weight(name='c',initializer='zeros',dtype='int32')\n self.s = self.add_weight(name='s',initializer='zeros',dtype='int32') \n self.d = self.add_weight(name='d', initializer='zeros',dtype='int32') \n self.i = self.add_weight(name='i', initializer='zeros',dtype='int32') \n\n\n def update_state(self, ytrue, ypred, sample_weight=None):\n # casting ytrue and ypred as float dtype\n ytrue = tf.cast(ytrue, tf.int32)\n ypred = tf.argmax(ypred,axis=2)\n ypred = tf.cast(ypred, tf.int32)\n\n\n\n\n\n # targ_labels = [targ_lang.word_index[''],targ_lang.word_index[''],\n # targ_lang.word_index[''],targ_lang.word_index[''],\n # targ_lang.word_index['']]\n\n \n\n match = tf.equal(ytrue,ypred)\n space = tf.cast(targ_tokenizer.word_index[''],tf.int32)\n pad = tf.cast(0,tf.int32)\n \n targ_space = tf.equal(space,ytrue) \n targ_pad = tf.equal(pad,ytrue)\n \n pred_space = tf.equal(space,ypred) \n pred_pad =tf.equal(pad,ypred)\n \n\n #CSID\n # Correct if targ matches pred and targ is not a spece or pad\n self.c.assign_add(tf.reduce_sum(tf.cast(match & ~targ_space & ~targ_pad, tf.int32)))\n\n # Substitution if targ does not match pred and targ is not a pad and space and pred is not a pad and space\n self.s.assign_add(tf.reduce_sum(tf.cast(( ~match & ~targ_space & ~targ_pad & ~pred_space & ~pred_pad), tf.int32))) \n\n #Insertion if target is a space or pad and pred is not a space and not a pad\n self.i.assign_add(tf.reduce_sum(tf.cast(( targ_space & ~(pred_space | pred_pad) ), tf.int32)))\n # self.i.assign_add(tf.reduce_sum(tf.cast(( (targ_space | targ_pad) & ~pred_space & ~pred_pad), tf.int32)))\n\n\n #Deletion if target is not a space and not a pad and pred is a space or pad\n self.d.assign_add(tf.reduce_sum(tf.cast(( ~targ_space & ~targ_pad & (pred_space | pred_pad)), tf.int32)))\n \n\n\n def result(self):\n self.precision = self.c / (self.c+self.s+self.i)\n self.recall = self.c / (self.c+self.s+self.d) # calculates recall\n\n self.micro = (2*self.precision*self.recall) / (self.precision+self.recall)\n\n return self.micro\n\n def reset_states(self):\n self.c.assign(0)\n self.s.assign(0)\n self.d.assign(0)\n self.i.assign(0)\n\nclass StatefulF1Class(Metric):\n def __init__(self, name,index, **kwargs):\n # initializing an object of the super class\n super(StatefulF1Class, self).__init__(name=name, **kwargs)\n\n # initializing state variables\n self.c = self.add_weight(name='c',initializer='zeros',dtype='int32')\n self.p = self.add_weight(name='p',initializer='zeros',dtype='int32')\n self.r = self.add_weight(name='r',initializer='zeros',dtype='int32') \n # self.d = self.add_weight(name='d', initializer='zeros',dtype='int32') \n # self.i = self.add_weight(name='i', initializer='zeros',dtype='int32') \n self.index=index\n\n\n def update_state(self, ytrue, ypred, sample_weight=None):\n # casting ytrue and ypred as float dtype\n ytrue = tf.cast(ytrue, tf.int32)\n ypred = tf.argmax(ypred,axis=2)\n ypred = tf.cast(ypred, tf.int32)\n\n\n\n\n\n targ_labels = [targ_tokenizer.word_index[''],targ_tokenizer.word_index[''],\n targ_tokenizer.word_index[''],targ_tokenizer.word_index[''],\n targ_tokenizer.word_index['']]\n\n \n\n\n\n\n punc = targ_labels[self.index]\n\n space = tf.cast(targ_tokenizer.word_index[''],tf.int32)\n pad = tf.cast(0,tf.int32)\n \n match = tf.equal(ytrue,ypred) & tf.equal(ytrue, punc)\n not_match = tf.equal(ytrue,punc) & ~tf.equal(ypred, punc)\n\n targ_space = tf.equal(space,ytrue) \n targ_pad = tf.equal(pad,ytrue) \n\n # pred_space = tf.equal(space,ypred) \n # pred_pad =tf.equal(pad,ypred) \n\n def tf_count_pred(t, val):\n elements_equal_to_value = tf.equal(t, val) & ~targ_pad \n as_ints = tf.cast(elements_equal_to_value, tf.int32)\n count = tf.reduce_sum(as_ints)\n return count\n\n def tf_count_true(t, val):\n elements_equal_to_value = tf.equal(t, val)\n as_ints = tf.cast(elements_equal_to_value, tf.int32)\n count = tf.reduce_sum(as_ints)\n return count\n\n self.c.assign_add(tf.reduce_sum(tf.cast(match & ~targ_space & ~targ_pad, tf.int32)))\n self.p.assign_add(tf_count_pred(ypred,punc))\n self.r.assign_add(tf_count_true(ytrue,punc))\n \n\n\n def result(self):\n self.precision = self.c / self.p\n self.recall = self.c / self.r \n\n self.micro = (2*self.precision*self.recall) / (self.precision+self.recall)\n\n return self.micro\n\n def reset_states(self):\n self.c.assign(0)\n self.p.assign(0)\n self.r.assign(0)\n # self.d.assign(0)\n # self.i.assign(0)\n\nstateful_f1 = StatefulF1()\n# targ_labels = [targ_tokenizer.word_index[''],targ_tokenizer.word_index[''],\n# targ_tokenizer.word_index[''],targ_tokenizer.word_index[''],\n# targ_tokenizer.word_index['']]\nstateful_f1_fullstop = StatefulF1Class(name='stateful_f1_fullstop',index=0)\nstateful_f1_comma = StatefulF1Class(name='stateful_f1_comma',index=1)\nstateful_f1_question = StatefulF1Class(name='stateful_f1_question',index=2)\nstateful_f1_exclamation = StatefulF1Class(name='stateful_f1_exclamation',index=3)\nstateful_f1_dots = StatefulF1Class(name='stateful_f1_dots',index=4)\n\nvocab_inp_size = len(inp_tokenizer.word_index)+1\nvocab_tar_size = len(targ_tokenizer.word_index)+1\nprint(vocab_inp_size)\nprint(vocab_tar_size)\n\n\nmodel = keras.Sequential()\nmodel.add(Embedding(vocab_inp_size, 512, mask_zero=True))\n# Embedding(num_input_words, hidden_layer_size, mask_zero=True)\nmodel.add(LSTM(256, return_sequences=True))\n#256\n# model.add(TimeDistributed(Dense(512, activation='relu')))\n# model.add(Dropout(0.5))\nmodel.add(TimeDistributed(Dense(vocab_tar_size, activation='softmax'))) \n\n# Compile model\nmodel.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=[stateful_f1,stateful_f1_fullstop,stateful_f1_comma,stateful_f1_question,stateful_f1_exclamation,stateful_f1_dots])\nmodel.fit(train_dataset,epochs=10,batch_size=64,validation_data=val_dataset)\n\n","sub_path":"unilstm_lexical.py","file_name":"unilstm_lexical.py","file_ext":"py","file_size_in_byte":11350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408115988","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@Author : xueqiang.liu\n@contact : xqliu@dragondropinn.com\n@Date : 2019/12/4 \n@Description :\n无重复样本使用rMATS3版本,重复样本使用rMATS4版本\n'''\nimport os\nimport time\nimport sys\nfrom profile import Profile,timefly\nfrom argparse import ArgumentParser\nimport pandas as pd\nfrom multiprocessing import Process,Pool\nimport shutil\n\nvar_path = Profile()\n\n@timefly\ndef asprofile(prefix,species):\n if not os.path.exists('Splice'):\n os.mkdir('Splice')\n os.system(\"{ASprofile_path}/extract-as Assembly/{T}.gtf {ref_path}/fasta/{s}/{s}.fa.hdrs > Splice/{T}.as.txt \"\n \">Splice/{T}.asprofile.log 2>&1\".format(T=prefix,s=species, **var_path))\n os.system(\"perl {ASprofile_path}/summarize_as.pl Assembly/{T}.gtf Splice/{T}.as.txt -p Splice/{T} \"\n \">>Splice/{T}.asprofile.log 2>&1\".format(T=prefix, **var_path))\n os.system(\"{ASprofile_path}/extract-as-fpkm Assembly/{T}.gtf {ref_path}/fasta/{s}/{s}.fa.hdrs Splice/{T}.as.nr > Splice/{T}.as.fpkm.txt \"\n \">>Splice/{T}.asprofile.log 2>&1\".format(T=prefix,s=species, **var_path))\n\n@timefly\ndef rMATS(conSamps,expSamps,species):\n os.makedirs('Splice',exist_ok=True)\n if len(conSamps) == 1:\n prefix1,prefix2 = conSamps+expSamps\n os.system(\"source /home/xueqiang.liu/anaconda3/bin/activate python27 && \"\n \"python {rMATS3} -b1 Mapping/{p1}.sort.bam -b2 Mapping/{p2}.sort.bam -c 0.05 -gtf {ref_path}/gtf/{s}/{s}.gtf -o Splice -t paired -len 150 >>Splice/splice.log 2>&1 && \"\n \"source /home/xueqiang.liu/anaconda3/bin/deactivate\".format(p1=prefix1,p2=prefix2,s=species, **var_path))\n try:\n shutil.rmtree('Splice/SAMPLE_1')\n shutil.rmtree('Splice/SAMPLE_2')\n except:\n print('Splice/SAMPLE_1文件不存在')\n else:\n conBam = open('Mapping/conBam.txt', 'w', encoding='utf-8')\n info = ','.join([\"Mapping/\"+consamp+\".sort.bam\" for consamp in conSamps])\n conBam.write(info)\n conBam.close()\n expBam = open('Mapping/expBam.txt', 'w', encoding='utf-8')\n info2 = ','.join([\"Mapping/\" + expsamp + \".sort.bam\" for expsamp in expSamps])\n expBam.write(info2)\n expBam.close()\n os.system(\"source /home/xueqiang.liu/anaconda3/bin/activate python27 && \"\n \"python {rMATS4} --b1 Mapping/conBam.txt --b2 Mapping/expBam.txt --gtf {ref_path}/gtf/{s}/{s}.gtf --od Splice -t paired --readLength 150 >>Splice/splice.log 2>&1 && \"\n \"source /home/xueqiang.liu/anaconda3/bin/deactivate\".format( s=species,**var_path))\n\ndef rmatsPlot(conBams,expBams,asevent):\n os.system(\"source /home/xueqiang.liu/anaconda3/bin/activate python27 && \"\n \"python {rmatsPlot} --b1 {cb} --b2 {eb} --l1 control --l2 experimental --exon_s 1 --intron_s 5 -t {ase} \"\n \"-e Splice/{ase}.MATS.JC.filtered.txt -o Splice/{ase} >>Splice/plot.log 2>&1 && \"\n \"source /home/xueqiang.liu/anaconda3/bin/deactivate\".format(cb=conBams,eb=expBams,ase=asevent,** var_path))\n\n@timefly\ndef rmats2sashimiplot(conSamps, expSamps):\n ASevents=['A3SS','A5SS','MXE','RI','SE']\n for ase in ASevents:\n asfile = 'Splice/MATS_output/' +ase + '.MATS.JunctionCountOnly.txt' if len(conSamps) == 1 else 'Splice/'+ase+'.MATS.JC.txt' #样本是否重复文件路径、名称均不同\n df = pd.read_csv(asfile,sep='\\t')\n df2 = df[ abs(df.IncLevelDifference) >= 0.5] if len(conSamps) == 1 else df[(df.FDR <= 0.05) & (abs(df.IncLevelDifference) >= 0.5)] #提取P值小于0.05,差异大于50%的数据,无重复不考虑P值\n df2['absIncLD'] = abs(df2['IncLevelDifference'])\n df3 = df2.sort_values('absIncLD',ascending=False)\n df3 = df3.drop('absIncLD',axis=1)\n df3.to_csv('Splice/{}.MATS.JC.filtered.txt'.format(ase),sep='\\t',index=False)\n conBams = ','.join([\"Mapping/\" + consamp + \".sort.bam\" for consamp in conSamps])\n expBams = ','.join([\"Mapping/\" + expsamp + \".sort.bam\" for expsamp in expSamps])\n pool=Pool(5)\n for asevent in ASevents:\n pool.apply_async(rmatsPlot,args=(conBams,expBams,asevent,))\n pool.close()\n pool.join()\n\n\n\n@timefly\ndef cuffcompare(prefix,species):\n os.system(\"{cuffcompare} -r {ref_path}/gtf/{s}/{s}.gtf -s {ref_path}/fasta/{s}/{s}.fa -o Splice/{T} Assembly/{T}.gtf \"\n \">Splice/{T}.cuffcompare.log 2>&1\" .format(T=prefix,s=species, **var_path))\n\n\ndef Parser_opt():\n parser = ArgumentParser()\n parser.add_argument('--control_samples', dest='con_samp', type=str, default='', help='对照组样本名称,如果含重复用逗号分隔,必需!')\n parser.add_argument('--experimental_samples', dest='exp_samp', type=str, default='', help='室验组样本名称,如果含重复用逗号分隔,必需!')\n parser.add_argument('--model', dest='model', type=str, default='', help='输入运行模块[rMATS,rmats2sashimiplot],必须!')\n parser.add_argument('--species', dest='species', type=str, default='GRCh37', help='输入物种名称[GRCh37,GRCm38],默认GRCh37!')\n return parser\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('\\nusage: python {} -h \\n'.format(sys.argv[0]))\n sys.exit(1)\n parser = Parser_opt()\n args = parser.parse_args()\n print('\\n' + '#' * 60)\n print('python ' + ' '.join(sys.argv))\n print('{0} 开始进行分析......'.format(time.ctime()))\n conSamps = args.con_samp.split(',')\n expSamps = args.exp_samp.split(',')\n if args.model == 'rMATS':\n rMATS(conSamps, expSamps, args.species)\n else:\n rmats2sashimiplot(conSamps, expSamps)","sub_path":"splice.py","file_name":"splice.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"299426305","text":"num = 0\ntot = 0\nwhile True:\n number = input(\"Enter number:\")\n if number == \"done\":\n break\n try:\n num1 = float(number)\n except:\n print(\"Invalid input\")\n continue\n num = num + 1\n tot = tot + num1\n average = tot / num\nprint(tot, num, average)\n\n\n\n\n\n\n\n","sub_path":"src/chapter 5/exercisE 1.py","file_name":"exercisE 1.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"372251875","text":"class Sum:\r\n @staticmethod\r\n def get_sum(*args):\r\n _sum = 0\r\n for i in args:\r\n _sum += i\r\n return _sum\r\n\r\n\r\nclass Dog:\r\n # static variables; shared by every object this class\r\n num_of_dogs = 0\r\n\r\n def __init__(self, name=\"Unknown\"):\r\n self.name = name\r\n Dog.num_of_dogs += 1 #dostep do statycznej wartosic\r\n\r\n @staticmethod\r\n def get_num_of_dogs():\r\n print(\"There are currently {} dogs\".format(Dog.num_of_dogs))\r\n\r\n\r\n# import sum\r\n# print(\"Sum: \",sum.get_sum(1,2,3,4,5))\r\n\r\nfrom sum import get_sum\r\nprint(\"Sum: \", get_sum(1,2,3,4,5))\r\n\r\n\r\ndef main():\r\n print(\"Sum: \", Sum.get_sum(1,2,3,4,5,6,7))\r\n spot = Dog(\"Spot\")\r\n spot.get_num_of_dogs()\r\n doug = Dog(\"Doug\")\r\n Dog.get_num_of_dogs()\r\n\r\nmain()","sub_path":"bootcam_banas/pythontut19.py","file_name":"pythontut19.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"545192762","text":"# Copyright (C) 2016 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n'''Module that contains the test TestBreakpointFileLine.'''\n\nfrom __future__ import absolute_import\n\nfrom harness.test_base_remote import TestBaseRemote\nfrom harness.decorators import (\n cpp_only_test,\n ordered_test\n)\n\n\nclass TestBreakpointFileLine(TestBaseRemote):\n '''Tests the setting of a breakpoint on a specific line of a RS file.'''\n\n bundle_target = {\n 'java': 'JavaDebugWaitAttach',\n 'jni': 'JNIDebugWaitAttach',\n 'cpp': 'CppDebugWaitAttach'\n }\n\n @ordered_test(0)\n def test_breakpoint_fileline(self):\n self.try_command('language renderscript status',\n ['Runtime Library discovered',\n 'Runtime Driver discovered'])\n\n self.try_command('breakpoint set --file simple.rs --line 12',\n ['(pending)'])\n\n self.try_command('process continue',\n [])\n\n self.try_command('bt',\n ['librs.simple.so',\n 'simple_kernel',\n 'stop reason = breakpoint'])\n\n self.try_command('breakpoint list',\n ['simple.rs',\n 'resolved = 1'])\n\n self.try_command('process status',\n ['stopped',\n 'stop reason = breakpoint'])\n\n self.try_command('breakpoint delete 1',\n ['1 breakpoints deleted'])\n\n @ordered_test('last')\n @cpp_only_test()\n def test_cpp_cleanup(self):\n self.try_command('process continue', ['exited with status = 0'])\n","sub_path":"frameworks/rs/tests/lldb/tests/testcases/test_breakpoint_fileline.py","file_name":"test_breakpoint_fileline.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"196259507","text":"# User database abstractions\n\nimport psycopg2\nfrom common import tzPrint\n\nclass UserDatabase:\n def __init__(self, connstr):\n '''\n Sets up the PostgreSQL connection to be used by this instance.\n '''\n self.db = psycopg2.connect(connstr)\n cur = self.db.cursor()\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS userdata (\n guild_id BIGINT,\n user_id BIGINT,\n zone TEXT NOT NULL,\n last_active TIMESTAMPTZ NOT NULL DEFAULT now(),\n PRIMARY KEY (guild_id, user_id)\n )\"\"\")\n self.db.commit()\n cur.close()\n\n def update_activity(self, serverid : str, authorid : str):\n '''\n If a user exists in the database, updates their last activity timestamp.\n '''\n c = self.db.cursor()\n c.execute(\"\"\"\n UPDATE userdata SET last_active = now()\n WHERE guild_id = %s AND user_id = %s\n \"\"\", (serverid, authorid))\n self.db.commit()\n c.close()\n\n def delete_user(self, serverid : str, authorid : str):\n '''\n Deletes existing user from the database.\n '''\n c = self.db.cursor()\n c.execute(\"\"\"\n DELETE FROM userdata\n WHERE guild_id = %s AND user_id = %s\n \"\"\", (serverid, authorid))\n self.db.commit()\n c.close()\n\n def update_user(self, serverid : str, authorid : str, zone : str):\n '''\n Insert or update user in the database.\n Does not do any sanitizing of incoming values, as only a small set of\n values are allowed anyway. This is enforced by the caller.\n '''\n self.delete_user(serverid, authorid)\n c = self.db.cursor()\n c.execute(\"\"\"\n INSERT INTO userdata (guild_id, user_id, zone) VALUES\n (%s, %s, %s)\n ON CONFLICT (guild_id, user_id)\n DO UPDATE SET zone = EXCLUDED.zone\n \"\"\", (serverid, authorid, zone))\n self.db.commit()\n c.close()\n\n def get_user(self, serverid, userid):\n '''\n Retrieves the time zone name of a single user.\n '''\n c = self.db.cursor()\n c.execute(\"\"\"\n SELECT zone FROM userdata\n WHERE guild_id = %s and user_id = %s\n \"\"\", (serverid, userid))\n result = c.fetchone()\n c.close()\n if result is None: return None\n return result[0]\n\n def get_users(self, serverid):\n \"\"\"\n Retrieves all user time zones for all recently active members.\n Users not present are not filtered here. Must be handled by the caller.\n Returns a dictionary of lists - Key is formatted zone, value is list of users represented.\n \"\"\"\n c = self.db.cursor()\n c.execute(\"\"\"\n SELECT zone, user_id\n FROM userdata\n WHERE\n last_active >= now() - INTERVAL '30 DAYS' -- only users active in the last 30 days\n AND guild_id = %(guild)s\n AND zone in (SELECT zone from (\n SELECT zone, count(*) as ct\n FROM userdata\n WHERE\n guild_id = %(guild)s\n AND last_active >= now() - INTERVAL '30 DAYS'\n GROUP BY zone\n LIMIT 20\n ) as pop_zones)\n ORDER BY RANDOM() -- Randomize display order (expected by consumer)\n \"\"\", {'guild': serverid})\n result = {}\n for row in c:\n resultrow = tzPrint(row[0])\n result[resultrow] = result.get(resultrow, list())\n result[resultrow].append(row[1])\n c.close()\n return result\n\n def get_unique_tz_count(self):\n '''\n Gets the number of unique time zones in the database.\n '''\n c = self.db.cursor()\n c.execute('SELECT COUNT(DISTINCT zone) FROM userdata')\n result = c.fetchall()\n c.close()\n return result[0][0]","sub_path":"userdb.py","file_name":"userdb.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"269441191","text":"\"\"\"\r\n Agenda de contactos con Archivo\r\n Contact Diary with File\r\n Author: Alan Ibarra\r\n Python 3.6\r\n\"\"\"\r\nclass Agenda:\r\n def __init__(self):\r\n self.db = \"agenda.txt\"\r\n\r\n def agregar(self,nombre,telefono):\r\n id = self.consultar()\r\n contacto = str(id + 1) + \",\" + str(nombre) + \",\" + str(telefono) + \"\\n\"\r\n regs = self.contactos() + contacto\r\n data = open(self.db , 'w')\r\n data.write(regs)\r\n data.close()\r\n return True\r\n\r\n def eliminar(self,id):\r\n num = self.consultar()\r\n out = True\r\n if(num > 0):\r\n registros = \"\"\r\n with open(self.db) as f:\r\n for contacto in f:\r\n conct = contacto.split(',')\r\n num = conct[0]\r\n if(num != id): \r\n registros +=contacto\r\n f.close()\r\n f = open(self.db,'w')\r\n f.write(registros)\r\n f.close()\r\n else:\r\n out = False\r\n return out\r\n\r\n def modificar(self,id,nombre,telefono):\r\n out = True\r\n if(self.consultar() == 0):\r\n out = False\r\n \r\n with open(self.db) as f:\r\n registros = \"\"\r\n for contacto in f:\r\n conct = contacto.split(',')\r\n num = conct[0]\r\n if(num == id):\r\n nombre = conct[1] if nombre == \"\" else nombre\r\n telefono = conct[2] if telefono == \"\" else telefono + \"\\n\"\r\n registros += str(id) + \",\" + str(nombre) + \",\" + str(telefono)\r\n else:\r\n registros += contacto\r\n f.close()\r\n f = open(self.db ,\"w\")\r\n f.write(registros)\r\n f.close()\r\n return out\r\n \r\n def consultar(self):\r\n num = 0\r\n registros = \"\"\r\n with open(self.db) as f:\r\n for contacto in f:\r\n reg = contacto.split(',')\r\n num = int(reg[0])\r\n registros += contacto\r\n f.close()\r\n return num\r\n\r\n def contactos(self):\r\n registros = \"\"\r\n with open(self.db) as f:\r\n for contacto in f:\r\n registros +=contacto\r\n f.close()\r\n return registros\r\n\r\n def main(self):\r\n opcion = '0'\r\n while(opcion != '5'):\r\n print(\"#######-CONTACTOS-#######\")\r\n print(\"# 1. Ver Contactos #\")\r\n print(\"# 2. Nuevo Contacto #\")\r\n print(\"# 3. Eliminar Contacto #\")\r\n print(\"# 4. Modificar Contacto #\")\r\n print(\"# 5. Salir #\")\r\n print(\"#########################\")\r\n opcion = input(\"Capture una opción: \")\r\n\r\n if(opcion == '1'):\r\n print(self.contactos())\r\n elif(opcion == '2'):\r\n nombre = input(\"Nombre: \")\r\n tel = input(\"Telefono: \")\r\n if(self.agregar(nombre , tel)):\r\n print(\"Guardado con exito\")\r\n else:\r\n print(\"No se pudo guardar\")\r\n elif(opcion == '3'):\r\n id = input(\"Capture el ID del contacto: \")\r\n if(self.eliminar(id)):\r\n print(\"Contacto eliminado\")\r\n else:\r\n print(\"No se elimino, puede que no exista el contacto\")\r\n elif(opcion == '4'):\r\n id = input(\"Capture el ID del contacto: \")\r\n nombre = input(\"Capture el nuevo nombre: \")\r\n telefono = input(\"Capture el nuevo telefono: \")\r\n if(self.modificar(id,nombre,telefono)):\r\n print(\"Contacto actualizado\")\r\n else:\r\n print(\"No se actualizo, es posible que no exista\")\r\n else:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n a = Agenda()\r\n a.main()\r\n \r\n\r\n\r\n","sub_path":"Agenda.py","file_name":"Agenda.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"20076297","text":"from django.urls import path, include\nfrom rest_framework.schemas import get_schema_view\nfrom rest_framework_simplejwt.views import (TokenObtainPairView,\n TokenRefreshView)\nfrom . import views\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'posts', views.PostViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('schema/', get_schema_view()),\n path('auth/', include(\n 'rest_framework.urls', namespace='rest_framework')),\n path('auth/token/obtain/', TokenObtainPairView.as_view(),\n name=\"api-token-obtain\"),\n path('auth/token/refresh/', TokenRefreshView.as_view()),\n path('post_like/', views.post_like,\n name='post_like'),\n\n]\n","sub_path":"soc_net_app/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"645136559","text":"from __future__ import print_function\nimport gym\nimport itertools\nimport matplotlib\nimport numpy as np\nimport tensorflow as tf\nfrom lib.env.threedmountain_car import ThreeDMountainCarEnv\nimport lib.RandomAction\nfrom lib.env.mountain_car import MountainCarEnv\nimport matplotlib.pyplot as plt\nimport os\nimport lib.qlearning as ql\nimport pickle\nimport deepq\nfrom lib.env.cartpole import CartPoleEnv\nfrom lib.env.threedcartpole import ThreeDCartPoleEnv\n\n# Create model\ndef neural_net(x, weights, biases):\n # Hidden fully connected layer with 256 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with 256 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n\ndef one_step_transition_model(learning_rate=0.1, n_hidden_1 = 32, n_hidden_2 = 32, num_input = 5, num_output = 4):\n\n # tf Graph input\n X = tf.placeholder(\"float\", [None, num_input])\n Y = tf.placeholder(\"float\", [None, num_output])\n\n # Store layers weight & bias\n weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, num_output]))\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([num_output]))\n }\n\n # Construct model\n logits = neural_net(X, weights, biases)\n\n # Define loss and optimizer\n loss_op = tf.losses.mean_squared_error(logits, Y)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op)\n\n return loss_op, train_op, X, Y\n\n\ndef get_train_test_data(source_qlearn=True, source_env=MountainCarEnv(), target_env=ThreeDMountainCarEnv()):\n\n # source task\n if source_qlearn: # collect data from qlearning = true, collect data from random actions = false\n source_filename = './' + source_env.name + '_dsource_qlearn.npz'\n if os.path.isfile(source_filename):\n f_read = np.load(source_filename)\n dsource = f_read['dsource']\n\n else:\n model = deepq.models.mlp([64], layer_norm=True)\n act = deepq.learn(\n source_env,\n q_func=model,\n lr=1e-3,\n max_timesteps=40000,\n buffer_size=50000,\n exploration_fraction=0.1,\n exploration_final_eps=0.1,\n print_freq=1,\n param_noise=False\n )\n\n replay_memory = [] # reset\n for ep in range(100): # 100 episodes\n obs, done = source_env.reset(), False\n while not done:\n n_obs, rew, done, _ = source_env.step(act(obs[None])[0])\n replay_memory.append([obs, act(obs[None])[0], n_obs, rew, done])\n obs = n_obs\n\n dsource = np.array(replay_memory)\n np.savez(source_filename, dsource=dsource)\n # with open('./data/q_learning.pkl', 'wb') as file:\n # pickle.dump(qlearning_2d, file)\n else:\n source_filename = './' + source_env.name + '_dsource_random.npz'\n if os.path.isfile(source_filename):\n f_read = np.load(source_filename)\n dsource = f_read['dsource']\n else:\n qlearning_2d = lib.RandomAction.RandomAction(source_env)\n dsource = np.array(qlearning_2d.play())\n np.savez(source_filename, dsource=dsource)\n\n # target task\n target_filename = './' + target_env.name + '_dtarget_random.npz'\n if os.path.isfile(target_filename):\n f_read = np.load(target_filename)\n # print(f_read['dtarget'].shape)\n dtarget = f_read['dtarget']\n else:\n random_action_3d = lib.RandomAction.RandomAction(target_env)\n dtarget = np.array(random_action_3d.play())\n np.savez(target_filename, dtarget=dtarget)\n\n # Define the input function for training\n dsa = np.array([np.append(x[0], x[1]) for x in dtarget]) # dsa = d states actions\n dns = np.array([x[2] for x in dtarget]) # dns = d next states\n\n dsa_train = dsa[:-100]\n dns_train = dns[:-100]\n dsa_test = dsa[-100:]\n dns_test = dns[-100:]\n\n return dsa_train, dns_train, dsa_test, dns_test, dsource, dtarget\n\n\ndef train_model(num_steps=10000, batch_size=100, display_step=100, source_env=MountainCarEnv(),\n target_env=ThreeDMountainCarEnv()):\n loss_op, train_op, X, Y = one_step_transition_model(num_input=target_env.observation_space.shape[0]+1, num_output=target_env.observation_space.shape[0])\n dsa_train, dns_train, dsa_test, dns_test, dsource, dtarget = get_train_test_data(\n source_qlearn=False, source_env=source_env, target_env=target_env)\n\n batch_num = np.size(dsa_train, 0)\n\n init = tf.global_variables_initializer()\n loss = []\n\n saver = tf.train.Saver()\n\n # Start training\n with tf.Session() as sess:\n # Run the initializer\n sess.run(init)\n\n for step in range(num_steps):\n batch_x = dsa_train[(step * batch_size) % batch_num: (step * batch_size + batch_size) % batch_num, :]\n batch_y = dns_train[(step * batch_size) % batch_num: (step * batch_size + batch_size) % batch_num, :]\n\n # Run optimization op (backprop)\n loss_train, _ = sess.run([loss_op, train_op], feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0:\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \"{:.4f}\".format(loss_train))\n loss.append(loss_train)\n\n print(\"Optimization Finished!\")\n\n # test set\n loss_test = sess.run(loss_op, feed_dict={X: dsa_test, Y: dns_test})\n print(\"test loss is {}\".format(loss_test))\n\n save_path = saver.save(sess, \"./data/tmp/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n\n # Find the mapping between source and target\n source_states = source_env.observation_space.shape[0] # 2\n target_states = target_env.observation_space.shape[0] # 4\n source_actions = source_env.action_space.n # 3\n target_actions = target_env.action_space.n # 5\n\n mse_state_mappings = np.zeros((source_states,) * target_states) # 2 by 2 by 2 by 2\n mse_action_mappings = np.ndarray(shape=(target_actions, source_actions, pow(target_states, source_states))) # 5 by 3 by 16\n mse_action_mappings.fill(-1)\n\n state_count = 0\n\n\n for target_states_list in itertools.product(range(source_states), repeat=target_states):\n state_losses = []\n for t_action in range(target_actions):\n for s_action in range(source_actions):\n states = np.array([x[0] for x in dsource if x[1] == s_action])\n actions = np.array([x[1] for x in dsource if x[1] == s_action])\n n_states = np.array([x[2] for x in dsource if x[1] == s_action])\n\n if (states.size == 0) or (actions.size == 0) or (n_states.size == 0):\n print(\n 'this happened.. dsource does not have certain states or does not perform certain actions at all. make sure to generate better samples. possibly with high epsilon value')\n # mse_action_mappings[t_action, s_action, state_count] = 0\n continue\n\n # transform to dsource_trans\n actions_trans = np.ndarray(shape=(actions.size,))\n actions_trans.fill(t_action)\n input_trans = np.concatenate((states[:, target_states_list], actions_trans[:,None]), axis=1)\n n_states_trans = np.squeeze(np.array([n_states[:, target_states_list]]))\n\n # calculate mapping error\n loss_mapping = sess.run(loss_op, feed_dict={X: input_trans, Y: n_states_trans})\n # print('loss_mapping is {}'.format(loss_mapping))\n\n state_losses.append(loss_mapping)\n # import pdb; pdb.set_trace()\n mse_action_mappings[t_action, s_action, state_count] = loss_mapping\n\n # import pdb; pdb.set_trace()\n mse_state_mappings[target_states_list] = np.mean(state_losses)\n state_count += 1\n\n ## mse_action_mappings_result = [[np.mean(mse_action_mappings[t_action, s_action, :]) for s_action in range(source_actions)] for t_action in range(target_actions)]\n\n mse_action_mappings_result = np.zeros((target_actions, source_actions))\n for t_action in range(target_actions):\n for s_action in range(source_actions):\n losses_act = []\n for s in range(target_states * target_states):\n if mse_action_mappings[t_action, s_action, s] != -1:\n # print(mse_action_mappings[t_action, s_action, s])\n losses_act.append(mse_action_mappings[t_action, s_action, s])\n mse_action_mappings_result[t_action, s_action] = np.mean(losses_act)\n\n print('action mapping: {}'.format(mse_action_mappings_result))\n print('state mapping {}'.format(mse_state_mappings))\n\n count = 0\n for target_states_list in itertools.product(range(source_states), repeat=target_states):\n print(str(count) + ': ')\n print(mse_state_mappings[target_states_list])\n count += 1\n\n with open('./data/mse_state_mappings_3d_2d.pkl', 'wb') as file:\n pickle.dump(mse_state_mappings, file)\n\n with open('./data/mse_action_mappings_3d_2d.pkl', 'wb') as file:\n pickle.dump(mse_action_mappings, file)\n\n print(\"Done exporting MSE file\")\n\n\nif __name__ == '__main__':\n # train_model(num_steps=10000, batch_size=100, display_step=100, source_env=MountainCarEnv(),\n # target_env=ThreeDMountainCarEnv())\n train_model(num_steps=10000, batch_size=100, display_step=100, source_env=ThreeDMountainCarEnv(),\n target_env=MountainCarEnv())\n","sub_path":"taylor_baseline/MASTER_baseline.py","file_name":"MASTER_baseline.py","file_ext":"py","file_size_in_byte":10338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395663630","text":"import FWCore.ParameterSet.Config as cms\n\ngenerator = cms.EDFilter(\"Pythia8GeneratorFilter\",\n comEnergy = cms.double(14000.0),\n crossSection = cms.untracked.double(1.),\n filterEfficiency = cms.untracked.double(1),\n maxEventsToPrint = cms.untracked.int32(0),\n pythiaHepMCVerbosity = cms.untracked.bool(False),\n pythiaPylistVerbosity = cms.untracked.int32(0),\n\n PythiaParameters = cms.PSet(\n processParameters = cms.vstring(\n 'Main:timesAllowErrors = 10000',\n 'ParticleDecays:limitTau0 = on',\n 'ParticleDecays:tauMax = 10',\n 'HardQCD:gg2bbbar = on',\n 'HardQCD:qqbar2bbbar = on',\n 'PhaseSpace:pTHatMin = 15',\n 'PhaseSpace:pTHatMax = 3000',\n 'Tune:pp 5',\n 'Tune:ee 3'\n ),\n parameterSets = cms.vstring('processParameters')\n )\n)\n\nmmfltr = cms.EDFilter(\"MCParticlePairFilter\",\n MinP = cms.untracked.vdouble(2.5, 2.5),\n MaxEta = cms.untracked.vdouble(3.0, 3.0),\n MinEta = cms.untracked.vdouble(-3.0, -3.0),\n MaxDeltaR = cms.untracked.double(1.2),\n ParticleID1 = cms.untracked.vint32(13),\n ParticleID2 = cms.untracked.vint32(13)\n)\n\nProductionFilterSequence = cms.Sequence(generator * mmfltr)\n","sub_path":"genfragments/FourteenTeV/QCDbbbar_2mufilter_14TeV_cfi.py","file_name":"QCDbbbar_2mufilter_14TeV_cfi.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"631244237","text":"from django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium import webdriver\nfrom dosgamesfinder.tests.base import create_test_publisher, create_test_genre, create_test_dosgame, create_test_screenshot, create_test_download_location\n#from selenium.webdriver.common.keys import Keys\n#from time import sleep\n\nMAX_WAIT = 10 # 10 second max wait\n\nclass FunctionalTest(StaticLiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Firefox()\n self.browser.implicitly_wait(MAX_WAIT)\n self.browser.set_page_load_timeout(MAX_WAIT)\n\n # create 2 test genres\n self.test_action_genre = create_test_genre(name='Action')\n self.test_adventure_genre = create_test_genre(name='Adventure')\n\n # create 2 test publishers\n self.test_publisher_test_soft = create_test_publisher(name='Test Soft Inc')\n self.test_publisher_foobar_games = create_test_publisher(name='Foo Bar Entertainment')\n \n # create 6 specific test dosgames.\n self.test_dosgame_a = create_test_dosgame(title='Abracadabra', publisher=self.test_publisher_test_soft, genre=self.test_action_genre)\n self.test_dosgame_b = create_test_dosgame(title='Beetlejuice', publisher=self.test_publisher_test_soft, genre=self.test_action_genre)\n self.test_dosgame_c = create_test_dosgame(title='Commandant Ki', publisher=self.test_publisher_test_soft, genre=self.test_adventure_genre)\n self.test_dosgame_d = create_test_dosgame(title='Dodecahedron', publisher=self.test_publisher_foobar_games, genre=self.test_adventure_genre)\n self.test_dosgame_e = create_test_dosgame(title='Explorer Dora', publisher=self.test_publisher_foobar_games, genre=self.test_action_genre)\n self.test_dosgame_f = create_test_dosgame(title='Fortune Finder', publisher=self.test_publisher_foobar_games, genre=self.test_action_genre)\n\n # to test pagination, we'll create a bunch of dummy anonoymous test games too. So as to keep our filtering tests clean, we'll create separate genre and publisher too.\n self.test_publisher_throwaway_soft = create_test_publisher('Throwaway Games')\n self.test_shovelware_genre = create_test_genre('Shovelware')\n\n for c in 'abcdefghijklmnopqrstuvwxyz1234567890': # Note that there are separate unit tests for testing the code that enables/disables pagination\n create_test_dosgame(title=c, genre=self.test_shovelware_genre, publisher=self.test_publisher_throwaway_soft)\n\n # create a test screenshot for each game\n self.test_screenshot_a = create_test_screenshot(game=self.test_dosgame_a)\n self.test_screenshot_b = create_test_screenshot(game=self.test_dosgame_b)\n self.test_screenshot_c = create_test_screenshot(game=self.test_dosgame_c)\n self.test_screenshot_d = create_test_screenshot(game=self.test_dosgame_d)\n self.test_screenshot_e = create_test_screenshot(game=self.test_dosgame_e)\n self.test_screenshot_f = create_test_screenshot(game=self.test_dosgame_f)\n\n # create a test download location for each game\n self.test_download_location_a = create_test_download_location(game=self.test_dosgame_a)\n self.test_download_location_b = create_test_download_location(game=self.test_dosgame_b)\n self.test_download_location_c = create_test_download_location(game=self.test_dosgame_c) \n self.test_download_location_d = create_test_download_location(game=self.test_dosgame_d)\n self.test_download_location_e = create_test_download_location(game=self.test_dosgame_e)\n self.test_download_location_f = create_test_download_location(game=self.test_dosgame_f)\n\n def tearDown(self):\n self.browser.quit()\n super().tearDown()\n\nclass LayoutAndStylingTest(FunctionalTest):\n def test_layout_and_styling(self):\n # user visits the home page\n self.browser.get(self.live_server_url)\n\n # user notices that there are rows of game cards, 3 columns to the row\n game_cards_row = self.browser.find_element_by_class_name('games-list-row')\n self.assertEqual(3, len(game_cards_row.find_elements_by_class_name('game-listview')))\n\nclass HomePageTests(FunctionalTest):\n def test_visit_home_page_and_test_search_feature(self):\n # user visits home page and attempts to use the search field. \n self.browser.get(self.live_server_url)\n search_bar = self.browser.find_element_by_id('search-bar')\n \n # user types something into the search bar. and clicks the search bar. \n search_bar.send_keys('abracadabra')\n self.browser.find_element_by_id('search-button').click()\n\n # user is redirected to a new page, user notices url has a search query in it and 'search results for' in the page header\n self.assertIn('search', self.browser.current_url)\n self.assertIn('Search results for', self.browser.find_element_by_tag_name('h1').text)\n\n # user searched for 'abracadabra', which is test_dosgame_a, user is given exactly one result, that game.\n self.assertEqual(1, len(self.browser.find_elements_by_class_name('game-listview'))) \n self.assertEqual(self.test_dosgame_a.title, self.browser.find_element_by_link_text(self.test_dosgame_a.title).text)\n \n def test_visit_home_page_and_test_genre_dropdown(self):\n # user visits the home page and clicks on the genre drop down menu\n self.browser.get(self.live_server_url)\n\n genre_dropdown = self.browser.find_element_by_id('GenreNavbarDropdown')\n genre_dropdown.click()\n\n # user notices that there are only 3 genres, as we have only created 3 in our class constructor\n genre_filter_buttons = self.browser.find_elements_by_class_name('dropdown-item')\n self.assertEqual(3, len(genre_filter_buttons))\n\n # user clicks on the action filter\n action_filter = str(self.test_action_genre.slug) + '-filter'\n action_filter_button = self.browser.find_element_by_id(action_filter)\n action_filter_button.click()\n\n # user is redirected to the action filter, user notices that the title has changed to say \"Action Games\"\n page_title = self.browser.find_element_by_tag_name('h1')\n self.assertEqual('Action Games', page_title.text)\n\n def test_visit_home_page_and_visit_game_page(self):\n # user visits the home page and sees a few games on the home page\n self.browser.get(self.live_server_url)\n listview_game_title = self.browser.find_element_by_class_name('game-title-link')\n listview_game_title_text = listview_game_title.text\n listview_game_title.click()\n\n # user notices that they are taken to a detailview for the game, \n detailview_game_title = self.browser.find_element_by_tag_name('h1')\n self.assertEqual(listview_game_title_text, detailview_game_title.text) \n\n # user goes back to the home page, instead of clicking on the page title, he clicks on the image \n self.browser.get(self.live_server_url)\n listview_game_title = self.browser.find_element_by_class_name('listView-screenshot')\n listview_game_title.click()\n\n # user notices that they are taken to the same detailview for whatever game they have selected\n self.assertIn('/game/', self.browser.current_url)\n\n def test_game_card_links_to_genre(self):\n # user visits the home page and sees a few games on the home page\n self.browser.get(self.live_server_url)\n\n # user sees that the game card has a genre link and clicks it\n listview_game_genre = self.browser.find_element_by_class_name('card-genre').find_element_by_tag_name('a')\n listview_game_genre_name = listview_game_genre.text\n listview_game_genre.click()\n\n # user is redirected to a page filtering by that genre. User notices that the title now says something like 'action games' or 'adventure games'\n filterview_page_title = self.browser.find_element_by_tag_name('h1')\n self.assertEqual(str(listview_game_genre_name) + ' Games', filterview_page_title.text)\n\n def test_game_card_links_to_publisher_filter(self):\n # user visits the home page and sees a few games on the home page\n self.browser.get(self.live_server_url)\n\n # user sees that the game card has a genre link and clicks it\n listview_game_publisher = self.browser.find_element_by_class_name('card-publisher-and-date').find_element_by_tag_name('a')\n listview_game_publisher_name = listview_game_publisher.text\n listview_game_publisher.click()\n\n # user is redirected to a page filtering by that genre. User notices that the title now says something like 'action games' or 'adventure games'\n filterview_page_title = self.browser.find_element_by_tag_name('h1')\n self.assertEqual('Games by ' + str(listview_game_publisher_name), filterview_page_title.text)\n\n def test_visit_home_page_pagination(self):\n # user visits the home page and sees a few games on the home page\n self.browser.get(self.live_server_url) \n \n # user notices that there are some page numbers across the top of the page. \n # user clicks the number 2 and notices that the url changes\n self.browser.find_element_by_link_text('2').click()\n self.assertIn('/?page=2', self.browser.current_url)\n\n # user then clicks the prev page button and is taken back to page 1\n self.browser.find_element_by_link_text('Prev').click()\n self.assertIn('/?page=1', self.browser.current_url)\n\n # user then clicks the next page button and is taken back to page 2 \n self.browser.find_element_by_link_text('Next').click()\n self.assertIn('/?page=2', self.browser.current_url)\n \n def test_visit_home_page_and_visit_publisher_page(self):\n # user visits the home page and clicks on the publisher filter button.\n self.browser.get(self.live_server_url) \n self.browser.find_element_by_link_text('Publishers').click()\n \n # user notices that the url has changed and that the page title is \"All Publishers\"\n self.assertIn('/publishers', self.browser.current_url)\n self.assertEqual('All Publishers', self.browser.find_element_by_tag_name('h1').text)\n\n # user notices 3 publishers on the page, \n self.assertEqual(self.test_publisher_throwaway_soft.name, self.browser.find_element_by_link_text(self.test_publisher_throwaway_soft.name).text)\n self.assertEqual(self.test_publisher_foobar_games.name, self.browser.find_element_by_link_text(self.test_publisher_foobar_games.name).text)\n self.assertEqual(self.test_publisher_test_soft.name, self.browser.find_element_by_link_text(self.test_publisher_test_soft.name).text)\n\n def test_visit_home_page_and_filter_by_genre(self):\n # user visits the home page and clicks on the genre filter button, selecting the action genre\n self.browser.get(self.live_server_url) \n self.browser.find_element_by_link_text('Genres').click()\n self.browser.find_element_by_link_text('Action').click()\n \n # user notices that the url has changed and that the page title is \"All Publishers\"\n self.assertIn('/genre/action', self.browser.current_url)\n self.assertEqual('Action Games', self.browser.find_element_by_tag_name('h1').text)\n\n # user notices 4 games in the action genre \n self.assertEqual(self.test_dosgame_a.title, self.browser.find_element_by_link_text(self.test_dosgame_a.title).text)\n self.assertEqual(self.test_dosgame_b.title, self.browser.find_element_by_link_text(self.test_dosgame_b.title).text)\n self.assertEqual(self.test_dosgame_e.title, self.browser.find_element_by_link_text(self.test_dosgame_e.title).text)\n self.assertEqual(self.test_dosgame_f.title, self.browser.find_element_by_link_text(self.test_dosgame_f.title).text)\n\nclass GamePageTests(FunctionalTest):\n def test_visit_game_page_and_test_screenshots(self):\n # user visits a game page\n detailview_url = self.live_server_url + '/game/' + self.test_dosgame_a.slug \n self.browser.get(detailview_url) \n\n # user notices that there are screenshots on the page\n screenshots = self.browser.find_elements_by_class_name('detailView-screenshot')\n self.assertGreater(len(screenshots), 0)\n\n def test_visit_game_page_and_view_download_links(self): \n # user visits a game page\n detailview_url = self.live_server_url + '/game/' + self.test_dosgame_a.slug \n self.browser.get(detailview_url) \n\n # user notices that there are download locations listed on the page\n download_locations = self.browser.find_elements_by_class_name('download-location')\n self.assertGreater(len(download_locations), 0)\n\n def test_visit_game_page_all_games_returns_to_homepage(self):\n # user visits a game page\n original_homepage_url = self.live_server_url + '/'\n detailview_url = self.live_server_url + '/game/' + self.test_dosgame_a.slug \n self.browser.get(detailview_url) \n \n # user clicks on the 'all games' button to return to the home page. \n self.browser.find_element_by_link_text('All Games').click()\n self.assertEqual(self.browser.current_url, original_homepage_url)\n\n def test_visit_game_page_navbar_logo_returns_to_homepage(self):\n # user visits a game page\n original_homepage_url = self.live_server_url + '/'\n detailview_url = self.live_server_url + '/game/' + self.test_dosgame_a.slug \n self.browser.get(detailview_url) \n \n # user clicks on the 'all games' button to return to the home page. \n self.browser.find_element_by_class_name('navbar-brand').click()\n self.assertEqual(self.browser.current_url, original_homepage_url) \n\nclass PublisherPageTests(FunctionalTest):\n def test_visit_publisher_page_and_select_a_publisher_filter(self):\n # user visits the publisher page\n self.browser.get(self.live_server_url + '/publishers/')\n\n # user selects on of the publishers and follows the link\n self.browser.find_element_by_link_text(self.test_publisher_foobar_games.name).click()\n\n # user notices that the page title now says something like 'Games by Foo Bar Games'\n page_title = self.browser.find_element_by_tag_name('h1')\n self.assertEqual(page_title.text, 'Games by ' + str(self.test_publisher_foobar_games.name))\n\n # user sees that all the games that Foo Bar Games has ever developed can be found here. in this case. games d, e and f\n self.assertEqual(self.test_dosgame_d.title, self.browser.find_element_by_link_text(self.test_dosgame_d.title).text)\n self.assertEqual(self.test_dosgame_e.title, self.browser.find_element_by_link_text(self.test_dosgame_e.title).text)\n self.assertEqual(self.test_dosgame_f.title, self.browser.find_element_by_link_text(self.test_dosgame_f.title).text)\n","sub_path":"functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":15080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"159007611","text":"#!/usr/bin/env python\n\n\"\"\"Datastore query expressions\"\"\"\n\n__author__ = 'Michael Meisinger'\n\nfrom pyon.core.exception import BadRequest\nfrom pyon.datastore.datastore_common import DataStore\n\n\nclass DatastoreQueryConst(object):\n\n # Expression\n EXP_PREFIX = \"exp:\"\n EXP_AND = EXP_PREFIX + \"and\"\n EXP_OR = EXP_PREFIX + \"or\"\n EXP_NOT = EXP_PREFIX + \"not\"\n\n # Operators\n OP_PREFIX = \"op:\"\n OP_EQ = OP_PREFIX + \"eq\"\n OP_NEQ = OP_PREFIX + \"neq\"\n OP_LT = OP_PREFIX + \"lt\"\n OP_LTE = OP_PREFIX + \"lte\"\n OP_GT = OP_PREFIX + \"gt\"\n OP_GTE = OP_PREFIX + \"gte\"\n OP_LIKE = OP_PREFIX + \"like\"\n OP_ILIKE = OP_PREFIX + \"ilike\"\n OP_FUZZY = OP_PREFIX + \"fuzzy\"\n\n XOP_PREFIX = \"xop:\"\n XOP_IN = XOP_PREFIX + \"in\"\n XOP_BETWEEN = XOP_PREFIX + \"between\"\n XOP_ATTLIKE = XOP_PREFIX + \"attlike\"\n XOP_ATTILIKE = XOP_PREFIX + \"attilike\"\n XOP_ALLMATCH = XOP_PREFIX + \"allmatch\"\n\n GOP_PREFIX = \"gop:\"\n GOP_OVERLAPS_BBOX = GOP_PREFIX + \"overlaps\"\n GOP_WITHIN_BBOX = GOP_PREFIX + \"within\"\n GOP_CONTAINS_BBOX = GOP_PREFIX + \"contains\"\n\n ROP_PREFIX = \"rop:\"\n ROP_OVERLAPS_RANGE = ROP_PREFIX + \"overlaps\"\n ROP_WITHIN_RANGE = ROP_PREFIX + \"within\"\n ROP_CONTAINS_RANGE = ROP_PREFIX + \"contains\"\n\n # Object and resource attribute\n ATT_ID = \"att:id\"\n ATT_TYPE = \"att:type_\"\n RA_NAME = \"ra:name\"\n RA_TS_CREATED = \"ra:ts_created\"\n RA_TS_UPDATED = \"ra:ts_updated\"\n RA_LCSTATE = \"ra:lcstate\"\n RA_AVAILABILITY = \"ra:availability\"\n RA_GEOM = \"ra:geom\"\n RA_GEOM_LOC = \"ra:geom_loc\"\n RA_VERT_RANGE = \"ra:vertical_range\"\n RA_TEMP_RANGE = \"ra:temporal_range\"\n\n\nDQ = DatastoreQueryConst\nQUERY_EXP_ID = \"qexp_v1.0\"\n\n\nclass DatastoreQueryBuilder(DatastoreQueryConst):\n \"\"\"Helps create structured queries to the datastore\"\"\"\n\n def __init__(self, profile=None, datastore=None, where=None, order_by=None, id_only=False, limit=0, skip=0, **kwargs):\n self.query = {}\n self.query[\"QUERYEXP\"] = QUERY_EXP_ID\n qargs = self.query.setdefault(\"query_args\", {})\n qargs[\"profile\"] = profile or DataStore.DS_PROFILE.RESOURCES\n qargs[\"datastore\"] = datastore or DataStore.DS_RESOURCES\n self.build_query(where=where, order_by=order_by, id_only=id_only, limit=limit, skip=skip)\n\n def build_query(self, where=None, order_by=None, id_only=None, limit=None, skip=None, **kwargs):\n qargs = self.query[\"query_args\"]\n if id_only is not None:\n qargs[\"id_only\"] = id_only\n if limit is not None:\n qargs[\"limit\"] = limit\n if skip is not None:\n qargs[\"skip\"] = skip\n qargs.update(kwargs)\n self.query[\"where\"] = where if where is not None else self.query.get(\"where\", \"\")\n self.query[\"order_by\"] = order_by if order_by is not None else self.query.get(\"order_by\", {})\n\n def get_query_arg(self, argname, default=None):\n return self.query[\"query_args\"].get(argname, default)\n\n def get_query(self):\n self.check_query(self.query)\n return self.query\n\n def op_expr(self, operator, *args):\n return [operator, args or []]\n\n def and_(self, *args):\n return self.op_expr(self.EXP_AND, *args)\n\n def or_(self, *args):\n return self.op_expr(self.EXP_OR, *args)\n\n def not_(self, *args):\n return self.op_expr(self.EXP_NOT, *args)\n\n def eq(self, col, value):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.OP_EQ, colname, value)\n\n def neq(self, col, value):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.OP_NEQ, colname, value)\n\n def in_(self, col, *args):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.XOP_IN, colname, *args)\n\n def like(self, col, value, case_sensitive=True):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n if case_sensitive:\n return self.op_expr(self.OP_LIKE, colname, value)\n else:\n return self.op_expr(self.OP_ILIKE, colname, value)\n\n def fuzzy(self, col, value):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.OP_FUZZY, colname, value)\n\n # --- Special operators\n\n def between(self, col, val1, val2):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.XOP_BETWEEN, colname, val1, val2)\n\n def all_match(self, value):\n return self.op_expr(self.XOP_ALLMATCH, value)\n\n def attr_like(self, attr, value, case_sensitive=True):\n if case_sensitive:\n return self.op_expr(self.XOP_ATTLIKE, attr, value)\n else:\n return self.op_expr(self.XOP_ATTILIKE, attr, value)\n\n # --- Range operators\n\n def overlaps_range(self, col, x1, y1):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.ROP_OVERLAPS_RANGE, colname, x1, y1)\n\n def contains_range(self, col, x1, y1):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.ROP_CONTAINS_RANGE, colname, x1, y1)\n\n def within_range(self, col, x1, y1):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.ROP_WITHIN_RANGE, colname, x1, y1)\n\n # --- Geospatial operators\n\n def overlaps_bbox(self, col, x1, y1, x2, y2):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.GOP_OVERLAPS_BBOX, colname, x1, y1, x2, y2)\n\n def contains_bbox(self, col, x1, y1, x2, y2):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.GOP_CONTAINS_BBOX, colname, x1, y1, x2, y2)\n\n def within_bbox(self, col, x1, y1, x2, y2):\n self._check_col(col)\n colname = col.split(\":\", 1)[1]\n return self.op_expr(self.GOP_WITHIN_BBOX, colname, x1, y1, x2, y2)\n\n # --- Ordering\n\n def order_by(self, column, asc=True, *args):\n pass\n\n def _check_col(self, col):\n profile = self.query[\"query_args\"][\"profile\"]\n if profile == DataStore.DS_PROFILE.RESOURCES:\n if not (col.startswith(\"ra\") or col.startswith(\"att\")):\n raise BadRequest(\"Query column unknown: %s\" % col)\n\n\n @classmethod\n def check_query(cls, query):\n \"\"\"Check a query expression (dict) for basic compliance\"\"\"\n if not isinstance(query, dict):\n raise BadRequest(\"query type dict expected, not: %s\" % type(query))\n if not \"query_args\" in query:\n raise BadRequest(\"query_args expected in query\")\n if query[\"QUERYEXP\"] != QUERY_EXP_ID:\n raise BadRequest(\"Unknown query expression language: %s\" % query[\"query_args\"][\"QUERYEXP\"])\n if not \"where\" in query:\n raise BadRequest(\"where expected in query\")\n if not \"order_by\" in query:\n raise BadRequest(\"order_by expected in query\")\n","sub_path":"pyon/datastore/datastore_query.py","file_name":"datastore_query.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"431551798","text":"# -*- coding: utf-8 -*-\n'''\nfile : C:\\WORKS_2\\WS\\WS_Others.Art\\JVEMV6\\46_art\\VIRTUAL\\Admin_Projects\\ip\\data\\ops\\2_image-programming\\2_projects\\3_handle-exif-data\\ops_1.py\norig : C:\\WORKS_2\\WS\\WS_Others.Art\\JVEMV6\\46_art\\VIRTUAL\\Admin_Projects\\ip\\data\\ops\\1_1.py\nat : 2018/08/23 12:33:00\n\nr w && r d4\npushd C:\\WORKS_2\\WS\\WS_Others.Art\\JVEMV6\\46_art\\VIRTUAL\\Admin_Projects\\ip\\data\\ops\\2_image-programming\\2_projects\\3_handle-exif-data\npython ops_1.py\n\n\n\n'''\n###############################################\nimport sys\n\n'''###################\n import : original files \n###################'''\nsys.path.append('.')\nsys.path.append('..')\nsys.path.append('C:\\\\WORKS_2\\\\WS\\\\WS_Others.Art\\\\JVEMV6\\\\46_art\\\\VIRTUAL\\\\Admin_Projects') # libs_mm\n\nfrom libs_admin import libs, lib_ip\n\n'''###################\n import : built-in modules \n###################'''\n# import getopt\nimport os, glob, getopt, math as math, numpy as np\n# import inspect\n\n'''###################\n import : built-in modules \n###################'''\nimport cv2\n# from pandas.compat import str_to_bytes\nfrom numpy.distutils.from_template import item_re\n#ref https://stackoverflow.com/questions/3129322/how-do-i-get-monitor-resolution-in-python\nfrom win32api import GetSystemMetrics\nfrom matplotlib import pylab as plt\n\n#ref https://www.lifewithpython.com/2014/12/python-extract-exif-data-like-data-from-images.html\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\n###############################################\ndef show_Message() :\n \n msg = '''\n \n -v\tVolume down the amplitude --> 1.0 * v / 1000\n -f\tBase frequency ---> e.g. 262 for the A tone\n -p\tPhase of the sine curves ---> sin(2 * np.pi * f0 * n * phase / fs)'''\n \n print (msg)\n\n#ref https://www.lifewithpython.com/2014/12/python-extract-exif-data-like-data-from-images.html\ndef get_exif_of_image(file):\n \"\"\"Get EXIF of an image if exists.\n\n 指定した画像のEXIFデータを取り出す関数\n @return exif_table Exif データを格納した辞書\n \"\"\"\n im = Image.open(file)\n\n # Exif データを取得\n # 存在しなければそのまま終了 空の辞書を返す\n try:\n exif = im._getexif()\n except AttributeError:\n return {}\n\n # タグIDそのままでは人が読めないのでデコードして\n # テーブルに格納する\n exif_table = {}\n for tag_id, value in exif.items():\n tag = TAGS.get(tag_id, tag_id)\n exif_table[tag] = value\n\n return exif_table\n\n\n'''###################\n \n get_GPS_Data(fpath_Image)\n \n @return: tuple\n => (('N', 35, 35, 24.14), ('E', 139, 34, 48.01))\n \n @param fpath_Image: file full path\n \n @example file : C:\\WORKS_2\\WS\\WS_Cake_IFM11\\iphone_to_upload\\2018-08-22_15-42-10_000.jpg\n \n [ops_1.py:126] dicOf_Exif['GPSInfo'] =>\n {1: 'N', 2: ((35, 1), (35, 1), (2414, 100)), 3: 'E', 4: ((139, 1), (34, 1), (480\n 1, 100)), 5: b'\\x00', 6: (24268, 387), 7: ((6, 1), (42, 1), (900, 100)), 12: 'K'\n , 13: (0, 1), 16: 'T', 17: (80093, 231), 23: 'T', 24: (80093, 231), 29: '2018:08\n :22', 31: (10, 1)} \n###################'''\ndef get_GPS_Data(fpath_Image):\n \n dicOf_Exif = get_exif_of_image(fpath_Image)# def get_GPS_Data():\n \n gps_Info = dicOf_Exif['GPSInfo']\n \n '''###################\n lat, longi \n ###################'''\n txt_Lat = \"%s=\" % (gps_Info[1])\n txt_Longi = \"%s=\" % (gps_Info[3])\n\n '''###################\n values\n ###################'''\n vals_Lat = gps_Info[2]\n \n txt_Lat += \"%d-%d-%.02f\" % \\\n (vals_Lat[0][0], vals_Lat[1][0], vals_Lat[2][0] * 1.0 / vals_Lat[2][1])\n# (vals_Lat[0][0], vals_Lat[1][0])\n\n vals_Longi = gps_Info[4]\n \n txt_Longi += \"%d-%d-%.02f\" % \\\n (vals_Longi[0][0], vals_Longi[1][0], vals_Longi[2][0] * 1.0 / vals_Longi[2][1])\n\n# txt_Longi += \"%d-%d-\" % \\\n# (vals_Longi[0][0], vals_Longi[1][0])\n\n \n '''###################\n tuples\n ###################'''\n data_Lat = (gps_Info[1], vals_Lat[0][0], vals_Lat[1][0], vals_Lat[2][0] * 1.0 / vals_Lat[2][1])\n data_Longi = (gps_Info[3], vals_Longi[0][0], vals_Longi[1][0], vals_Longi[2][0] * 1.0 / vals_Longi[2][1])\n \n data_Final = (data_Lat, data_Longi)\n \n '''###################\n report \n ###################'''\n print(\"[%s:%d] txt_Lat = %s, txt_Longi = %s\" % \\\n (os.path.basename(libs.thisfile()), libs.linenum()\n , txt_Lat, txt_Longi\n ), file=sys.stderr)\n \n '''###################\n return \n ###################'''\n return data_Final\n \n#/ def get_GPS_Data(fpath_Image):\n \ndef test_1():\n\n '''######################################\n ops \n ######################################'''\n dpath_Ops_Images = \"C:\\\\WORKS_2\\\\WS\\\\WS_Cake_IFM11\\\\iphone_to_upload\"\n# C:\\WORKS_2\\WS\\WS_Cake_IFM11\\iphone_to_upload\n fname_Ops_Image = \"2018-08-22_15-32-53_000.jpg\"\n# fname_Ops_Image = \"2018-08-22_15-42-10_000.jpg\"\n \n fpath_Ops_Image = os.path.join(dpath_Ops_Images, fname_Ops_Image)\n\n '''###################\n get : exif data\n ###################'''\n# print(get_exif_of_image(fpath_Ops_Image)) \n# # print get_exif_of_image(fpath_Ops_Image)\n# # print get_exif_of_image(\"sample.jpg\")\n \n \n #ref https://stackoverflow.com/questions/15785719/how-to-print-a-dictionary-line-by-line-in-python\n \n# for item in dicOf_Exif:\n# \n# print(item)\n# \n# #/for item in dicOf_Exif:\n\n# for key in sorted(dicOf_Exif.keys()):\n# \n# print(key)\n# \n# #/for key in :\n\n# print(\"[%s:%d] dicOf_Exif['SubjectLocation'] => \" % \\\n# (os.path.basename(libs.thisfile()), libs.linenum()\n# \n# ), file=sys.stderr)\n# print(dicOf_Exif['SubjectLocation'])\n \n '''###################\n gps info\n ###################'''\n result = get_GPS_Data(fpath_Ops_Image)\n #(('N', 35, 35, 24.14), ('E', 139, 34, 48.01))\n \n print()\n print(\"[%s:%d] GPS data =>\" % \\\n (os.path.basename(libs.thisfile()), libs.linenum()\n \n ), file=sys.stderr)\n print(result)\n \n \n \n return\n \n dicOf_Exif = get_exif_of_image(fpath_Ops_Image)\n \n print()\n print(\"[%s:%d] dicOf_Exif['GPSInfo'] => \" % \\\n (os.path.basename(libs.thisfile()), libs.linenum()\n \n ), file=sys.stderr)\n print(dicOf_Exif['GPSInfo'])\n\n '''###################\n gps info\n ###################'''\n gps_Info = dicOf_Exif['GPSInfo']\n \n for item in gps_Info.keys():\n# for item in gps_Info:\n \n print(\"key => %d\" % (item))\n print(gps_Info[item])\n print()\n# print(\"key => %d\" % (item))\n# print(gps_Info[item])\n# print(\"%s => %s\" % (item, gps_Info[item]))\n# print(item, type(item))\n# #1 \n \n #/for item in gps_Info:\n\n \n '''###################\n message\n ###################'''\n print()\n print(\"[%s:%d] test_1 =======================\" % \\\n (os.path.basename(libs.thisfile()), libs.linenum()\n\n ), file=sys.stderr)\n \n \n \n#/ def test_1():\n\ndef exec_prog():\n \n '''###################\n ops \n ###################'''\n test_1()\n \n print(\"[%s:%d] exec_prog() => done\" % \\\n (os.path.basename(libs.thisfile()), libs.linenum()\n \n ), file=sys.stderr)\n \n#def exec_prog()\n\n'''\n\ntest_1.py [-fXXX] #=> frequency\ntest_1.py -f402\n'''\nif __name__ == \"__main__\" :\n\n '''###################\n \tvalidate : help option\t\t\n ###################'''\n\n '''###################\n \tget options\t\t\n ###################'''\n\n '''###################\n \tevecute\t\t\n ###################'''\n exec_prog()\n\n print()\n \n print(\"[%s:%d] done\" % \\\n (os.path.basename(libs.thisfile()), libs.linenum()\n \n ), file=sys.stderr)\n# print \"[%s:%d] done\" % (thisfile(), linenum())\n\n\n'''\nApertureValue\nBrightnessValue\nColorSpace\nComponentsConfiguration\nCustomRendered\nDateTime\nDateTimeDigitized\nDateTimeOriginal\nExifImageHeight\nExifImageWidth\nExifOffset\nExifVersion\nExposureBiasValue\nExposureMode\nExposureProgram\nExposureTime\nFNumber\nFlash\nFlashPixVersion\nFocalLength\nFocalLengthIn35mmFilm\nGPSInfo\nISOSpeedRatings\nLensMake\nLensModel\nLensSpecification\nMake\nMakerNote\nMeteringMode\nModel\nOrientation\nResolutionUnit\nSceneCaptureType\nSceneType\nSensingMethod\nShutterSpeedValue\nSoftware\nSubjectLocation\nSubsecTimeDigitized\nSubsecTimeOriginal\nWhiteBalance\nXResolution\nYCbCrPositioning\nYResolution\n'''","sub_path":"JVEMV6/46_art/VIRTUAL/Admin_Projects/ip/data/ops/2_image-programming/2_projects/3_handle-exif-data/ops_1.py","file_name":"ops_1.py","file_ext":"py","file_size_in_byte":8811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"586092982","text":"import time as clock\nclear = \"\\n\" * 100\nbuild = 1\nbuildDate = \"13/06/2016\"\ntitle = \"text-game\"\n\n\ndef main():\n menu()\n print(\"next\")\n\n\ndef menu():\n title_run = 5\n while True:\n if not title_run:\n break\n else:\n title_info = [\"Welcome to text-game!\"]\n print(clear, title_info[0], \"\\nBuild id:\", build, \"\\nBuild date:\", buildDate)\n print(\"Starting in\", title_run)\n title_run -= 1\n clock.sleep(1)\nmain()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"225817176","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'index/$',views.index,name = 'index'),\n\turl(r'^display/$',views.display,name='display'),\n\turl(r'^register/$',voews.register,name='register')\n#\turl(r'hit_rate_info/$',views.hit_rate_info,name = 'hit_rate_info'),\n\n]","sub_path":"aims/justdo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"127825824","text":"\"\"\"\nNOTE: Experimental code for testing how gate decomposition that introduces\nintroduces ancillas would look like in Cirq\n\nFor this the ICM form of circuits is being implemented. As described in\n\n\"Faster manipulation of large quantum circuits using wire label reference\ndiagrams\" https://arxiv.org/abs/1811.06011\n\"\"\"\n\nimport cirq\n\nfrom icm.icm_operation_id import OperationId\n\nclass SplitQubit(cirq.NamedQubit):\n\n # Static nr_ancilla\n nr_ancilla = -1\n\n\n def __init__(self, name):\n super().__init__(name)\n\n self.children = (None, None)\n\n self.threshold = OperationId()\n\n def get_latest_ref(self, operation_id):\n\n # this wire has not been split\n if self.children == (None, None):\n return self\n\n n_ref = self\n while n_ref.children != (None, None):\n # Decide based on the threshold\n if n_ref.threshold >= operation_id:\n n_ref = self.children[0]\n else:\n n_ref = self.children[1]\n\n return n_ref\n\n def split_this_wire(self, operation_id):\n # It can happen that the reference is too old\n current_wire = self.get_latest_ref(operation_id)\n\n # The wire receives a threshold for latter updates\n current_wire.threshold = operation_id\n\n # It is a new wire, but keep the old name\n n_child_0 = SplitQubit(current_wire.name)\n\n # It is a new wire, that is introduced and gets a new name\n SplitQubit.nr_ancilla += 1\n n_child_1 = SplitQubit(\"_anc_{0}\".format(SplitQubit.nr_ancilla))\n\n # Update the children tuple of this wire\n current_wire.children = (n_child_0, n_child_1)\n\n # Return the children as a tuple\n return current_wire.children\n\n\ndef decomp_to_icm(cirq_operation):\n # TODO:\n\n new_op_id = cirq_operation.icm_op_id.add_decomp_level()\n\n # Assume for the moment that these are only single qubit operations\n new_wires = cirq_operation.qubits[0].split_this_wire(new_op_id)\n\n print(\"qubit is \", cirq_operation.qubits[0])\n\n # Create the cnot\n cnot = cirq.CNOT(new_wires[0], new_wires[1])\n # Assign a decomposition id, like [old].1\n cnot.icm_op_id = new_op_id.advance_decomp()\n\n # Create the measurement\n meas = cirq.measure(new_wires[0])\n # Because this operation follows the CNOT, has ID from the previous\n # results into something like [oldid].2\n meas.icm_op_id = cnot.icm_op_id.advance_decomp()\n\n return [cnot, meas]\n\n\ndef keep_icm(op):\n # TODO: mai calumea\n if isinstance(op.gate, (cirq.CNotPowGate, cirq.MeasurementGate)):\n return True\n\n return False\n\nimport icm.icm_flag_manipulations as flags\na = SplitQubit(\"a\")\nb = SplitQubit(\"b\")\n\nmycircuit = cirq.Circuit(cirq.T.on(a), cirq.T.on(b), cirq.CNOT.on(a,b), cirq.S.on(a))\nflags.add_op_ids(mycircuit, [cirq.T, cirq.S])\n\nprint(mycircuit)\n\nicm_circuit = cirq.Circuit(cirq.decompose(mycircuit,\n intercepting_decomposer=decomp_to_icm,\n keep = keep_icm))\nprint(icm_circuit)","sub_path":"icm/icm_converter.py","file_name":"icm_converter.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"123377343","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed 20 09:40:01 2016\r\n\r\n@author: t817682\r\n\"\"\"\r\n\r\n# Imports & Definitions\r\nfrom afklortools.spark.context import BaseHiveContext\r\n\r\n\r\nclass Reduce(BaseHiveContext):\r\n\r\n def rdd_reduce(self):\r\n # def test_pymongo(x):\r\n # import pymongo\r\n # return (x[1], 1)\r\n # start by creating a mockup dataset\r\n nb_airport = self.hc.sql(\"select * from crm_data.airport\").count()\r\n\r\n l = [(1, 'hello'), (2, 'world'), (3, 'world')]\r\n self.logger.info(\"Inside the reduce !!!!!!!!!!!!!!!!!!! :\" + str(self.sc.master))\r\n self.logger.info(\"Number of airports !!!!!!!!!!!!!!!!!!! :\" + str(nb_airport))\r\n # and create a RDD out of it\r\n hellordd = self.sc.parallelize(l)\r\n return hellordd.map(lambda x: (x[1], 1)).reduceByKey(lambda a, b: a + b).collect()","sub_path":"helloworld/wordcount/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"431786506","text":"class Solution(object):\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n dp = [[0] * n] * m\n for sub_m in range(0, m):\n for sub_n in range(0, n):\n if not (sub_m and sub_n):\n dp[sub_m][sub_n] = 1\n else:\n dp[sub_m][sub_n] = dp[sub_m - 1][sub_n] + dp[sub_m][sub_n - 1]\n\n return dp[-1][-1]\n","sub_path":"不同路径/mainv2.py","file_name":"mainv2.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"111339147","text":"'''\nsorted\n'''\nL = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\nL1 = ['bob', 'about', 'Zoo', 'Credit']\n\nprint(sorted(L1))\nprint(sorted(L1, key = str.lower))\n\ndef by_name(t):\n\treturn t[0]\n\nL2 = sorted(L, key = by_name)\nprint(L2)\n\nfrom operator import itemgetter\n\nprint(sorted(L2, key = itemgetter(0)))\nprint(sorted(L2, key = itemgetter(1), reverse = True))\nprint(sorted(L2, key = lambda x : x[1]))\n\ndef lazy_sum(*args):\n\tdef sum():\t\n\t\tax = 0\n\t\tfor n in args:\n\t\t\tax += n\n\t\treturn ax\n\treturn sum\n\t\nf = lazy_sum(1, 2, 4, 5, 6, 8, 9)\nprint(f)\nprint(f())\n\ndef count():\n\tfs = []\n\tfor i in range(1, 4):\n\t\tdef f():\n\t\t\treturn i * i\n\t\tfs.append(f)\n\treturn fs\n\t\nf1, f2, f3 = count()\nprint(f1())\nprint(f2())\nprint(f3())\n\ndef count1():\n\tfs = []\n\tdef f(n):\n\t\tdef j():\n\t\t\treturn n * n\n\t\treturn j\n\tfor i in range(1, 4):\n\t\tfs.append(f(i))\n\treturn fs\n\t\nf1, f2, f3 = count1()\nprint(f1())\nprint(f2())\nprint(f3())\n\nprint(list(map(lambda x : x * x, [1, 2, 3, 4])))\n\ndef log(func):\n\tdef wrapper(*args, **kw):\n\t\tprint('call %s()' % func.__name__)\n\t\treturn func(*args, **kw)\n\treturn wrapper\n\t\n@log\ndef now():\n\tprint('2016-11-10')\n\t\nnow()","sub_path":"eight.py","file_name":"eight.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"28736810","text":"import os\nimport json\nimport requests\nfrom tests_api.config import URL_AUTH, AUTH_PAYLOADS, HEADER, URL_USERS, URL_EVENT\n\n\nclass Header:\n\n def get_header_auth_admin(self):\n response_decoded_json = requests.post(\n URL_AUTH['url_login'], data=json.dumps(\n AUTH_PAYLOADS['payload_admin']), headers=HEADER['header'])\n h = json.loads(response_decoded_json.content.decode())\n auth = h[\"token\"]\n header = {\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json-patch+json\",\n \"authorization\": \"Bearer \" + auth}\n return header\n\n def get_header_auth_user(self):\n response_decoded_json = requests.post(\n URL_AUTH['url_login'], data=json.dumps(\n AUTH_PAYLOADS['payload_user']), headers=HEADER['header'])\n h = json.loads(response_decoded_json.content.decode())\n auth = h[\"token\"]\n header = {\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json-patch+json\",\n \"authorization\": \"Bearer \" + auth}\n return header\n\n def get_header_auth_vasya(self):\n response_decoded_json = requests.post(\n URL_AUTH['url_login'], data=json.dumps(\n AUTH_PAYLOADS['payload_vasya']), headers=HEADER['header'])\n h = json.loads(response_decoded_json.content.decode())\n auth = h[\"token\"]\n header = {\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json-patch+json\",\n \"authorization\": \"Bearer \" + auth}\n return header\n\n def get_token_admin(self):\n response_decoded_json = requests.post(\n URL_AUTH['url_login'], data=json.dumps(\n AUTH_PAYLOADS['payload_admin']), headers=HEADER['header'])\n h = json.loads(response_decoded_json.content.decode())\n auth = h[\"token\"]\n token = \"Bearer \" + auth\n return token\n\n\nclass Event():\n\n CURRENT_PATH = os.path.abspath('CH_096_TAQC')\n\n header = {\n 'Content-Type': 'multipart/form-data',\n 'Authorization': Header().get_token_admin()\n }\n\n payload = {\n 'Title': 'Test Event',\n 'Description': 'Event for testing search',\n 'DateFrom': 'Fra Jan 24 2020',\n 'DateTo': 'Mon Feb 10 2020',\n 'User.Id': 'f320932e-aac2-4999-32d3-08d79b47df59',\n 'CityId': '418ad80a-85da-4033-f8df-08d79b47df2b',\n 'Categories[0].Id': '60c56914-a974-4b4c-f461-08d79b47df60'\n }\n\n files = {\n # 'Photo': open(os.path.join(CURRENT_PATH) + 'data\\\\imageAddEvent\\\\testing_img.png','rb')\n }\n\n def create(self):\n response = requests.post(\n URL_EVENT['url_event_edit'],\n headers=self.header,\n data=self.payload,\n files=self.files)\n print(\n URL_EVENT['url_event_edit'],\n self.header,\n self.payload,\n self.files)\n print(response)\n\n def get_token_admin(self):\n response_decoded_json = requests.post(\n URL_AUTH['url_login'], data=json.dumps(\n AUTH_PAYLOADS['payload_admin']), headers=HEADER['header'])\n h = json.loads(response_decoded_json.content.decode())\n auth = h[\"token\"]\n token = \"Bearer \" + auth\n return token\n\n\nclass User:\n \"\"\"Class to test Users API\"\"\"\n\n def __init__(\n self,\n id=\"e02dfd94-a8a9-4b1a-6cfc-08d7a28d1878\",\n name='Vasya',\n gender=0,\n birthday=\"2000-01-01\"):\n \"\"\"Init new user data\"\"\"\n self.id = id\n self.name = name\n self.birthday = str(birthday) + \"T00:00:00\"\n self.gender = gender\n self.payload_edit_gender = {\n \"id\": self.id,\n \"gender\": self.gender}\n self.payload_edit_birthday = {\n \"id\": self.id,\n \"birthday\": self.birthday}\n self.payload_edit_username = {\n \"id\": self.id,\n \"name\": self.name}\n\n def PAYLOAD_edit_gender(self):\n \"\"\"Reload PAYLOAD_gender data\"\"\"\n PAYLOAD_edit_gender = {\n \"id\": self.id,\n \"gender\": self.gender}\n return PAYLOAD_edit_gender\n\n def PAYLOAD_edit_username(self):\n \"\"\"Reload PAYLOAD_username data\"\"\"\n PAYLOAD_edit_username = {\n \"id\": self.id,\n \"name\": self.name}\n return PAYLOAD_edit_username\n\n def PAYLOAD_edit_birthday(self):\n \"\"\"Reload PAYLOAD_birthday data\"\"\"\n PAYLOAD_edit_birthday = {\n \"id\": self.id,\n \"birthday\": self.birthday}\n return PAYLOAD_edit_birthday\n\n def edit_gender(self, gender):\n \"\"\"Edit user gender\"\"\"\n self.gender = gender\n response_decoded_json = requests.post(\n URL_USERS['edit_gender'],\n data=json.dumps(\n self.PAYLOAD_edit_gender()),\n headers=Header().get_header_auth_vasya())\n print(\"Гендер змінено\")\n\n def edit_username(self, username):\n \"\"\"Edit user name\"\"\"\n self.name = username\n response_decoded_json = requests.post(\n URL_USERS['edit_username'],\n data=json.dumps(\n self.PAYLOAD_edit_username()),\n headers=Header().get_header_auth_vasya())\n print(\"Ім'я змінено\")\n return response_decoded_json\n\n def edit_birthday(self, birthday):\n \"\"\"Edit user birthday\"\"\"\n self.birthday = birthday\n response_decoded_json = requests.post(\n URL_USERS['edit_birthday'],\n data=json.dumps(\n self.PAYLOAD_edit_birthday()),\n headers=Header().get_header_auth_vasya())\n print(\"Дату відредаговано на \", self.birthday[:10])\n return response_decoded_json\n\n def set_attitude(self, id, attitude):\n \"\"\"Set attitude to User from Vasya\"\"\"\n response_decoded_json = requests.post(\n URL_USERS['set_attitude'],\n data=json.dumps({\n \"userFromId\": self.id,\n \"userToId\": id,\n \"attitude\": attitude\n }),\n headers=Header().get_header_auth_vasya())\n return response_decoded_json\n\n def back_attitude(self, id):\n \"\"\"Set attitude to User from Vasya\"\"\"\n response_decoded_json = requests.post(\n URL_USERS['set_attitude'],\n data=json.dumps({\n \"userFromId\": self.id,\n \"userToId\": id,\n \"attitude\": 2\n }),\n headers=Header().get_header_auth_vasya())\n return response_decoded_json\n\n def get_info_by_id(self):\n \"\"\"Get all user info by user-id\"\"\"\n url = URL_USERS['user_by_id'] + self.id\n response_decoded_json = requests.get(\n url,\n headers=Header().get_header_auth_vasya())\n Json = response_decoded_json.content.decode()\n dictionary = json.loads(Json)\n return dictionary\n\n def get_gender(self):\n \"\"\"Get user gender\"\"\"\n dictionary = self.get_info_by_id()\n gender = dictionary[\"gender\"]\n return gender\n\n def get_birthday(self):\n \"\"\"Get user birthday\"\"\"\n dictionary = self.get_info_by_id()\n birthday = dictionary[\"birthday\"]\n return birthday[:10]\n\n def get_username(self):\n \"\"\"Get user username\"\"\"\n dictionary = self.get_info_by_id()\n birthday = dictionary[\"name\"]\n return birthday\n\n\n\"\"\"\nid = \"e02dfd94-a8a9-4b1a-6cfc-08d7a28d1878\"\nprint(Header().get_token_admin())\nuser = User(id, \"Jesus\", 2, \"2001-06-04\")\nprint(user.get_info_by_id())\nuser.edit_username()\nuser.edit_gender()\nuser.edit_birthday()\nprint(user.get_info_by_id())\nuser.back_username()\nuser.back_gender()\nuser.back_birthday()\nprint(user.get_info_by_id())\n\"\"\"\n\nclass EditUserByAdmin:\n \"\"\"Class to test User API\"\"\"\n\n def __init__(self, name):\n \"\"\"Init new user data\"\"\"\n self.id = None # \"id\": \"a1d49d6a-f832-4f2a-32d4-08d79b47df59\"\n self.name = name # UserTest\n self.header = Header().get_header_auth_admin()\n\n def block(self):\n \"\"\"\n Block user by name\n :param: username :type: str\n :param: header :type: instance of Header()\n :return: instance of response object\n \"\"\"\n self.get_user_id()\n payload_block_user = {\"id\": self.id}\n print('***********', payload_block_user)\n response = requests.post(\n URL_USERS['url_block_user'],\n data=json.dumps(\n payload_block_user),\n headers=self.header)\n print(f\"User {self.name} has blocked\")\n return response\n\n def unblock(self):\n \"\"\"Unblock user by name\n :param: username :type: str\n :param: header :type: instance of Header()\n :return: instance of response object\n \"\"\"\n self.get_user_id()\n payload_block_user = {\"id\": self.id}\n print('------------', payload_block_user)\n\n response = requests.post(\n URL_USERS['url_unblock_user'],\n data=json.dumps(\n payload_block_user),\n headers=self.header)\n print(f\"User {self.name} has unblocked\")\n return response\n\n def get_user_id(self):\n \"\"\"\n API test. This method search a user by id\n :param: username\n :type: str\n :param: header\n :type: str\n :return: user id\n :type: str\n \"\"\"\n response = requests.get(URL_USERS['url_search_users'],\n headers=self.header)\n resp = response.json()\n users_count = len(resp['items'])\n\n for index in range(users_count):\n if resp['items'][index]['username'] == self.name:\n self.id = resp['items'][index]['id']\n print('=========', self.id)\n return self.id\n\n def collect_users(self, response):\n \"\"\"\n API test. This method search all users\n :param: header :type: instance of Header()\n :return: list of all users\n :type: list of str\n \"\"\"\n\n resp = response.json()\n users_count = len(resp['items'])\n\n users = []\n for index in range(users_count):\n users.append(resp['items'][index]['username'])\n\n return users\n\n\n#name = 'UserTest'\n#user = User1(name)\n# print(user.block())\n# print(user.unblock())\n","sub_path":"tests_api/testHelper.py","file_name":"testHelper.py","file_ext":"py","file_size_in_byte":10445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"606883475","text":"import sys\r\nimport os\r\nfrom tkinter import *\r\n\r\nwindow=Tk()\r\n\r\nwindow.title(\"Tracking Bird Migration\")\r\nwindow.geometry('450x250')\r\nwindow.config(bg = 'black')\r\n\r\ndef run():\r\n exec(open('BirdMigrationLatLong.py').read())\r\n\r\ndef run1():\r\n exec(open('BirdMigration2DSpeed.py').read())\r\n\r\ndef run2():\r\n os.system('BirdMigrationDateTime.py')\r\n\r\ndef run3():\r\n os.system('BirdMigrationDailyMeanSpeed.py')\r\n\r\ndef run4():\r\n exec(open('BirdMigrationMapView.py').read())\r\n\r\ndef close(): \r\n window.destroy()\r\n\r\nl = Label(window, text = \"--Tracking Bird Migration--\") \r\nl.config(font =(\"Courier\", 14)) \r\nl.pack()\r\n\r\n\r\nb = Button(window,text = \"1. Latitude and Longitude\", command = run) \r\nb.pack(padx=10, pady=5)\r\n\r\nb = Button(window,text = \"2. 2D Speed Vs Frequency\", command = run1) \r\nb.pack(padx=10, pady=5)\r\n\r\nb = Button(window,text = \"3. Time and Date\", command = run2) \r\nb.pack(padx=10, pady=5)\r\n\r\nb = Button(window,text = \"4. Daily Mean Speed\", command = run3) \r\nb.pack(padx=10, pady=5)\r\n\r\nb = Button(window,text = \"5. Cartographic View\", command = run4) \r\nb.pack(padx=10, pady=5)\r\n\r\nb = Button(window,text = \"6. Exit\", command = close) \r\nb.pack(padx=10, pady=5)\r\n\r\n\r\n\r\nwindow.mainloop()\r\n","sub_path":"main(GUI).py","file_name":"main(GUI).py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"240252750","text":"from imutils.video import VideoStream\r\nimport datetime\r\nimport imutils\r\nimport time\r\nfrom cv2 import cv2\r\nimport numpy as np\r\nvideo_path = None\r\nbuffsize = 64\r\nindx = 0\r\nred_range1 = [(0,50,50),(10,255,255)]\r\nred_range2 = [(170,50,50),(180,255,255)]\r\npath = np.zeros((buffsize,2),dtype='int')\r\nvideo_path = None\r\nif video_path is None:\r\n vs = VideoStream().start()\r\n time.sleep(2)\r\nelse:\r\n vs = cv2.VideoCapture(video_path)\r\nwhile True:\r\n frame = vs.read()\r\n frame = frame if video_path is None else frame[1]\r\n if frame is None:\r\n break\r\n frame = imutils.resize(frame,width=500)\r\n blur = cv2.GaussianBlur(frame,(21,21),0)\r\n hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)\r\n #hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)\r\n m1 = cv2.inRange(hsv,red_range1[0],red_range2[1])\r\n m2 = cv2.inRange(hsv,red_range2[0], red_range2[1])\r\n mask = cv2.bitwise_and(m1,m2)\r\n\r\n #mask = cv2.inRange(hsv,red_range1[0],red_range1[1])\r\n mask = cv2.erode(mask,None,iterations=2)\r\n mask = cv2.dilate(mask,None,iterations=2)\r\n cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n if len(cnts) > 0:\r\n cnt = max(cnts,key=cv2.contourArea)\r\n ((x,y),radius) = cv2.minEnclosingCircle(cnt)\r\n\r\n M = cv2.moments(cnt)\r\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\r\n if radius > 10:\r\n cv2.circle(frame,(int(x),int(y)),int(radius),(0,255,255),2)\r\n cv2.circle(frame,center,5,(0,0,255),-1)\r\n if indx < buffsize:\r\n path[indx] = (center[0],center[1])\r\n indx += 1\r\n else:\r\n path[0:indx-1] = path[1:indx]\r\n path[indx-1] = (center[0],center[1])\r\n for i in range(1,len(path)):\r\n if path[i-1] is None or path[i] is None:\r\n continue\r\n thickness = int(np.sqrt(len(path)/float(i+1))*2.5)\r\n cv2.line(frame,(path[i-1][0],path[i-1][1]),(path[i][0],path[i][1]),(0,0,255),thickness)\r\n cv2.putText(frame, \"Room Status: {}\".format(\"HEJSAN\"), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255),2)\r\n cv2.putText(frame, datetime.datetime.now().strftime(\"%A %d %B %Y %I:%M:%S%p\"),(10, frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\r\n\r\n cv2.imshow(\"Camera image\", frame)\r\n cv2.imshow(\"Mask\", mask)\r\n key = cv2.waitKey(1)& 0xFF\r\n if key == ord(\"q\"):\r\n break\r\nvs.stop() if video_path is None else vs.release()\r\ncv2.destroyAllWindows()","sub_path":"detect red object on video stream.py","file_name":"detect red object on video stream.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"349759505","text":"# coding: UTF-8\n\nimport json\n\ndef lambda_handler(event, context):\n print(json.dumps(event))\n\n expect_code = event.get('queryStringParameters').get('expect_code', '200')\n if expect_code == '200':\n return {\n \"message\": \"hello world\"\n }\n elif expect_code == '400':\n raise Exception(\"Bad Request\")\n elif expect_code == '401':\n return {\n \"stackTrace\": [],\n \"errorType\": \"Exception\",\n \"errorMessage\": \"Unauthorixed\"\n }\n elif expect_code == '403':\n raise Exception(\"あなたにはアクセス権がありません\")\n else:\n raise Exception(\"Internal Server Error\")\n","sub_path":"functions/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"42620219","text":"#! /usr/bin/env python\n\n# Copyright (C) 2015 Dmitry Rodionov\n# This file is part of my GSoC'15 project for Cuckoo Sandbox:\n# http://www.cuckoosandbox.org\n# This software may be modified and distributed under the terms\n# of the MIT license. See the LICENSE file for details.\n\n# Edits by Federico (phretor) Maggi\n\nimport os\nimport sys\nimport pkgutil\nimport logging\nimport traceback\nfrom datetime import datetime\n\nfrom lib.core.config import Config\nfrom lib.core.startup import create_folders, init_logging\nfrom lib.core.packages import choose_package\n\nfrom lib.common.results import upload_to_host\nfrom lib.common.abstracts import Package, Auxiliary\nfrom lib.common.constants import PATHS\nfrom lib.common.exceptions import CuckooError, CuckooPackageError\n\nfrom modules import auxiliary\n\nlog = logging.getLogger(__name__)\n\nclass Macalyser:\n \"\"\"Cuckoo OS X analyzer.\n \"\"\"\n\n target = \"\"\n target_artefacts = []\n config= []\n\n def prepare(self):\n \"\"\"Prepare env for analysis.\"\"\"\n # Create the folders used for storing the results.\n create_folders()\n\n # Initialize logging.\n init_logging()\n\n # Parse the analysis configuration file generated by the agent.\n self.parse_config(\"analysis.conf\")\n\n # Setup machine time\n self.setup_machine_time()\n\n def run(self):\n \"\"\"Run analysis.\n @return: operation status.\n \"\"\"\n self.prepare()\n\n log.debug(\"Starting analyzer from: %s\", os.getcwd())\n log.debug(\"Storing results at: %s\", PATHS[\"root\"])\n\n # Retrieve analysis package\n package = self.analysis_package_for_current_target()\n\n # Initialize analysis package\n self.initialize_package(package)\n\n # Setup and start auxiliary modules\n aux = self.setup_auxiliary_modules()\n\n # Start analysis package\n results = self.analysis(package)\n\n # Shutdown Auxiliary modules\n self.shutdown_auxiliary_modules(aux)\n\n # TODO: figure out a way to do this cleanly\n # shutdown_spawned_modules(results.procs_still_alive)\n\n # Done!\n self.complete(package)\n\n def complete(self, package):\n self.upload_artefacts(package)\n self.cleanup()\n\n def parse_config(self, config_name=\"analysis.conf\"):\n self.config = Config(cfg=config_name)\n\n def analysis_package_for_current_target(self):\n # If no analysis package was specified at submission, we try to select\n # one automatically.\n if not self.config.package:\n log.debug(\"No analysis package specified, trying to detect \"\n \"it automagically.\")\n\n # If the analysis target is a file, we choose the package according\n # to the file format.\n if self.config.category == \"file\":\n package = choose_package(self.config.file_type,\n self.config.file_name)\n\n if not package:\n raise CuckooError(\"No valid package available for file \"\n \"type: {0}\".format(self.config.file_type))\n\n log.info(\"Automatically selected analysis package \\\"%s\\\"\", package)\n # Otherwise just select the specified package.\n else:\n package = self.config.package\n\n return package\n\n def initialize_package(self, package):\n # Generate the package path.\n package_name = \"modules.packages.%s\" % package\n\n # Try to import the analysis package.\n try:\n __import__(package_name, globals(), locals(), [\"dummy\"], -1)\n # If it fails, we need to abort the analysis.\n except ImportError:\n raise CuckooError(\"Unable to import package \\\"{0}\\\", does \"\n \"not exist.\".format(package_name))\n\n # Initialize the package parent abstract.\n Package()\n\n # Enumerate the abstract subclasses.\n try:\n package_class = Package.__subclasses__()[0]\n except IndexError as e:\n raise CuckooError(\"Unable to select package class \"\n \"(package={0}): {1}\".format(package_name, e))\n\n # Initialize the analysis package.\n pack = package_class(self.config.get_options())\n\n def setup_auxiliary_modules(self):\n # Initialize Auxiliary modules\n Auxiliary()\n prefix = auxiliary.__name__ + \".\"\n for loader, name, ispkg in pkgutil.iter_modules(\n auxiliary.__path__, prefix):\n if ispkg:\n continue\n\n # Import the auxiliary module.\n try:\n __import__(name, globals(), locals(), [\"dummy\"], -1)\n except ImportError as e:\n log.warning(\"Unable to import the auxiliary module \"\n \"\\\"%s\\\": %s\", name, e)\n\n # Walk through the available auxiliary modules.\n aux_enabled, aux_avail = [], []\n for module in Auxiliary.__subclasses__():\n # Try to start the auxiliary module.\n try:\n aux = module(self.config.get_options())\n aux_avail.append(aux)\n aux.start()\n except (NotImplementedError, AttributeError):\n log.warning(\"Auxiliary module %s was not implemented\",\n aux.__class__.__name__)\n continue\n except Exception as e:\n log.warning(\"Cannot execute auxiliary module %s: %s\",\n aux.__class__.__name__, e)\n continue\n finally:\n log.debug(\"Started auxiliary module %s\",\n aux.__class__.__name__)\n aux_enabled.append(aux)\n\n def setup_machine_time(self):\n # Set virtual machine clock.\n clock = datetime.strptime(self.config.clock, \"%Y%m%dT%H:%M:%S\")\n\n # Setting date and time.\n # TODO(phretor): check how to set seconds\n os.system(\"date {0}\".format(clock.strftime(\"%m%d%H%M%y\")))\n\n # TODO(phretor): add support for other than \"file\"\n if self.config.category == \"file\":\n self.target = os.path.join(os.environ[\"TEMP\"] + os.sep,\n str(self.config.file_name))\n\n def analysis(self, package):\n try:\n pids = package.start(self.target)\n except NotImplementedError:\n raise CuckooError(\"The package \\\"{0}\\\" doesn't contain a run \"\n \"function.\".format(package))\n except CuckooPackageError as e:\n raise CuckooError(\"The package \\\"{0}\\\" start function raised an \"\n \"error: {1}\".format(package, e))\n except Exception as e:\n raise CuckooError(\"The package \\\"{0}\\\" start function encountered \"\n \"an unhandled exception: \"\n \"{1}\".format(package, e))\n\n # Should we enforce timeout?\n if self.config.enforce_timeout:\n log.info(\"Enabled timeout enforce, running for the full timeout.\")\n time.sleep(self.config.timeout)\n\n try:\n # Before finishing the analysis, the package can perform some\n # final operations through the finish() function.\n package.finish()\n except Exception as e:\n log.warning(\"The package \\\"%s\\\" finish function raised an \"\n \"exception: %s\", package, e)\n\n def shutdown_auxiliary_modules(self, aux):\n pass\n\n def shutdown_spawned_processes(self, procs):\n pass\n\n def upload_artefacts(self, package):\n try:\n # Upload files the package created to package_files in the\n # results folder to host\n package_files = package.package_files()\n if package_files != None:\n for p in package_files:\n upload_to_host(\n p[0], os.path.join(\"package_files\", p[1]));\n except Exception as e:\n log.warning(\"The package \\\"%s\\\" package_files function raised an \"\n \"exception: %s\", package, e)\n\n\n def cleanup(self):\n pass\n\nAnalyzer = Macalyser\n\nif __name__ == \"__main__\":\n success = False\n error = \"\"\n\n try:\n # Initialize the main analyzer class.\n analyzer = Analyzer()\n\n # Run it and wait for the response.\n success = analyzer.run()\n\n # This is not likely to happen.\n except KeyboardInterrupt:\n error = \"Keyboard Interrupt\"\n\n # If the analysis process encountered a critical error, it will raise a\n # CuckooError exception, which will force the termination of the analysis.\n # Notify the agent of the failure. Also catch unexpected exceptions.\n except Exception as e:\n # Store the error.\n error_exc = traceback.format_exc()\n error = str(e)\n\n # Just to be paranoid.\n if len(log.handlers):\n log.exception(error_exc)\n else:\n sys.stderr.write(\"{0}\\n\".format(error_exc))\n\n # Once the analysis is completed or terminated for any reason, we report\n # back to the agent, notifying that it can report back to the host.\n finally:\n # Establish connection with the agent XMLRPC server.\n server = xmlrpclib.Server(\"http://127.0.0.1:8000\")\n server.complete(success, error, PATHS[\"root\"])\n","sub_path":"analyzer/darwin/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":9422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"421596922","text":"import imageio\nimport pandas as pd\nimport os\n\n\ndef create_gif(image_list, gif_name, duration = 1.0):\n '''\n :param image_list: 这个列表用于存放生成动图的图片\n :param gif_name: 字符串,所生成gif文件名,带.gif后缀\n :param duration: 图像间隔时间\n :return:\n '''\n frames = []\n for image_name in image_list:\n frames.append(imageio.imread(image_name))\n\n imageio.mimsave(gif_name, frames, 'GIF', duration=duration)\n return\n\n\nfns = []\nfilelist = os.listdir(r'pic/')\nfor i in filelist:\n fns.append('pic/'+i)\n\n\n# fns = []\n# for x in list(pd.date_range(start='2021-05-06', end='2021-05-07',freq='H')):\n# dt = x.strftime('%Y%m%d')\n# d = x.strftime('%H')\n# try:\n# dir = r'NC_H08_{}_{}00_L2ARP030_FLDK.02401_02401.nc'.format(dt,d)\n# dir1 = r'data/'+dir\n# if dir in filelist:\n# fns.append('county_image/{}_{}00.png'.format(dt,d))\n# except:\n# pass\n# print(fns)\ncreate_gif(fns,'gif/探空图.gif', duration=1.0)","sub_path":"探空数据/makegif.py","file_name":"makegif.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"27478481","text":"#imports da classes necessaria\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n#coletando os dados de uma pagina\nhtml = urlopen(\"http://pythonscraping.com/pages/page3.html\")\nbsObj = BeautifulSoup(html, \"lxml\")\n\n#tratando os dados; coletando dos filhos da tag 'table'\nfor child in bsObj.find(\"table\", {\"id\":\"giftList\"}).children:\n print(child)\n\n#tratando os dados; coletando dos descentendes da tag 'table'\nfor desc in bsObj.find(\"table\", {\"id\":\"giftList\"}).descendants:\n print(desc)\n\n\n","sub_path":"tags_filhos_descendentes.py","file_name":"tags_filhos_descendentes.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"257106925","text":"# Motion Detection Example:\n#\n# This example demonstrates using frame differencing with your OpenMV Cam to do\n# motion detection. After motion is detected your OpenMV Cam will take picture.\n\nimport os, pyb, sensor, image, time\n\nif not \"temp\" in os.listdir(): os.mkdir(\"temp\") # Make a temp directory\n\nsensor.reset()\nsensor.set_framesize(sensor.QVGA)\n\nwhile(True):\n sensor.set_pixformat(sensor.GRAYSCALE) # Grayscale is much faster than RGB.\n\n # Warm up the cam\n for i in range(10):\n sensor.snapshot()\n\n for i in [5, 4, 3, 2, 1]:\n print(\"Saving background in... %d\" % i)\n time.sleep(1000)\n\n print(\"Saving background...\")\n sensor.snapshot().save(\"temp/bg.bmp\")\n\n diff = 30 # wait 30 snapshot before taking picture\n while(diff):\n img = sensor.snapshot()\n img.difference(\"temp/bg.bmp\")\n img.binary([(32, 255)])\n sum, x, y = img.centroid()\n if sum > 100: # 100 pixels detected\n img.draw_cross(x, y, color = 127)\n diff -= 1\n\n sensor.set_pixformat(sensor.RGB565)\n # Warm up the cam\n for i in range(10):\n sensor.snapshot()\n sensor.snapshot().save(\"temp/movement-%d\" % pyb.rng()) # Save movement\n","sub_path":"usr/examples/motion_detection.py","file_name":"motion_detection.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"190731697","text":"def euclids_ggt_algorithm(a,b): #Euklidischer GGT algorithmus\n m = a; n = b\n last = [0,0]\n while not (n == m):\n if ( m < n ):\n temp = m; m = n\n n = temp\n r = m - n\n m = n; n = r\n if ((last[0] == m and last[1] == n) or (last[0] == n and last[1] == m)): #verhindert infinite looping\n break\n last[0] = m; last[1] = n #bis hier\n #print(\"[a,b]=[\"+str(a)+\",\"+str(b)+\"];[GGT]=[\"+str(m)+\" == \"+str(n)+\"]\")\n return m","sub_path":"euclid.py","file_name":"euclid.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"328215596","text":"from blackfox.api.data_set_api import DataSetApi\nfrom blackfox.api.network_api import NetworkApi\nfrom blackfox.api.prediction_api import PredictionApi\nfrom blackfox.api.training_api import TrainingApi\nfrom blackfox.api.optimization_api import OptimizationApi\nfrom blackfox.models.keras_optimization_config import KerasOptimizationConfig\nfrom blackfox.models.range import Range\n\nfrom blackfox.api_client import ApiClient\nfrom blackfox.configuration import Configuration\nfrom blackfox.rest import ApiException\n\nimport hashlib\nimport shutil\nimport time\nfrom datetime import datetime\nimport signal\nimport sys\nimport os\nfrom io import BytesIO\nfrom tempfile import NamedTemporaryFile\n\nBUF_SIZE = 65536 # lets read stuff in 64kb chunks!\n\n\nclass BlackFox:\n\n def __init__(self, host=\"http://localhost:50476/\"):\n self.host = host\n configuration = Configuration()\n configuration.host = host\n self.client = ApiClient(configuration)\n self.data_set_api = DataSetApi(self.client)\n self.network_api = NetworkApi(self.client)\n self.prediction_api = PredictionApi(self.client)\n self.training_api = TrainingApi(self.client)\n self.optimization_api = OptimizationApi(self.client)\n\n def log(self, stream, msg):\n if isinstance(stream, str):\n with open(stream, mode='a', encoding='utf-8', buffering=1) as f:\n f.write(msg)\n else:\n stream.write(msg)\n stream.flush()\n\n def upload_data_set(self, path):\n id = self.sha1(path)\n try:\n self.data_set_api.head(id)\n except ApiException as e:\n if e.status == 404:\n id = self.data_set_api.post(file=path)\n else:\n raise e\n return id\n\n def download_data_set(self, id, path):\n temp_path = self.data_set_api.get(id)\n shutil.move(temp_path, path)\n\n def upload_network(self, path):\n id = self.sha1(path)\n try:\n self.network_api.head(id)\n except ApiException as e:\n if e.status == 404:\n id = self.network_api.post(file=path)\n else:\n raise e\n return id\n\n def download_network(self, id, path=None):\n temp_path = self.network_api.get(id)\n if path is None:\n return open(temp_path, 'rb')\n else:\n shutil.move(temp_path, path)\n\n def train_keras(\n self,\n config,\n data_set_path=None,\n network_path=None\n ):\n \"\"\"\n Train network\n\n :param KerasTrainingConfig config:\n :param str data_set_path:\n :param str nework_path:\n :return: TrainedNetwork\n If data_set_path is not None upload data set \n and sets config.dataset_id to new id.\n If network_path is not None \n download network to given file.\n \"\"\"\n if data_set_path is not None:\n config.dataset_id = self.upload_data_set(data_set_path)\n\n trained_network = self.training_api.post(value=config)\n\n if network_path is not None:\n self.download_network(trained_network.id, network_path)\n\n return trained_network\n\n def predict_from_file_keras(\n self,\n config,\n network_path=None,\n data_set_path=None,\n result_path=None\n ):\n \"\"\"\n Predict values and download results in file\n\n :param PredictionFileConfig config:\n :param str network_path:\n :param str data_set_path:\n :param str result_path:\n :return: str: result data set id\n If network_path is not None upload network,\n and sets config.network_id to new id.\n If data_set_path is not None upload data set,\n and sets config.data_set_id to new id.\n If result_path is not None download results\n to given file.\n \"\"\"\n if network_path is not None:\n config.network_id = self.upload_network(network_path)\n if data_set_path is not None:\n config.data_set_id = self.upload_data_set(data_set_path)\n result_id = self.prediction_api.post_file(config=config)\n if result_path is not None:\n self.download_data_set(result_id, result_path)\n return result_id\n\n def predict_from_array_keras(\n self,\n config,\n network_path=None\n ):\n \"\"\"\n Predict values and return results\n\n :param PredictionArrayConfig config:\n :param str network_path:\n :return: list[list[float]]: \n If network_path is not None upload network,\n and sets config.network_id to new id.\n \"\"\"\n if network_path is not None:\n config.network_id = self.upload_network(network_path)\n results = self.prediction_api.post_array(config=config)\n return results\n\n def get_ranges(self, data_set):\n ranges = []\n for row in data_set:\n for i, d in enumerate(row):\n if len(ranges) <= i or ranges[i] is None:\n ranges.append(Range(d, d))\n else:\n r = ranges[i]\n r.min = min(r.min, d)\n r.max = max(r.max, d)\n return ranges\n\n def optimize_keras_sync(\n self,\n input_set=None,\n output_set=None,\n data_set_path=None,\n config=KerasOptimizationConfig(),\n network_path=None,\n status_interval=5,\n log_file=sys.stdout\n ):\n \"\"\"\n Find optimal network for given problem.\n\n :param KerasOptimizationConfig config:\n :param str input_set:\n :param str output_set:\n :param str data_set_path:\n :param str network_path:\n :param int status_interval:\n :param str log_file:\n :return: BytesIO: byte array from network model\n If data_set_path is not None upload data set,\n and sets config.dataset_id to new id.\n If network_path is not None download network to given file.\n If log_file is not None write to log file \n every 5 seconds(status_interval)\n \"\"\"\n print('Use CTRL + C to stop optimization')\n\n if input_set is not None and output_set is not None:\n if type(input_set) is not list:\n input_set = input_set.tolist()\n if type(output_set) is not list:\n output_set = output_set.tolist()\n tmp_file = NamedTemporaryFile(delete=False)\n # input ranges\n config.input_ranges = self.get_ranges(input_set)\n # output ranges\n config.output_ranges = self.get_ranges(output_set)\n data_set = list(map(lambda x, y: (','.join(map(str, x)))+',' +\n (','.join(map(str, y))), input_set, output_set))\n\n column_count = len(config.input_ranges) + len(config.output_ranges)\n column_range = range(0, column_count)\n headers = map(lambda i: 'column_'+str(i), column_range)\n data_set.insert(0, ','.join(headers))\n csv = '\\n'.join(data_set)\n tmp_file.write(csv.encode(\"utf-8\"))\n tmp_file.close()\n if data_set_path is not None:\n self.log(log_file, 'Ignoring data_set_path\\n')\n data_set_path = str(tmp_file.name)\n\n if data_set_path is not None:\n if config.input_ranges is None:\n self.log(log_file, \"config.input_ranges is None\\n\")\n return None\n if config.output_ranges is None:\n self.log(log_file, \"config.output_ranges is None\\n\")\n return None\n if tmp_file is not None:\n self.log(log_file, \"Uploading data set\\n\")\n else:\n self.log(log_file, \"Uploading data set \" + data_set_path + \"\\n\")\n config.dataset_id = self.upload_data_set(data_set_path)\n\n if tmp_file is not None:\n os.remove(tmp_file.name)\n\n self.log(log_file, \"Starting...\\n\")\n id = self.optimization_api.post_async(config=config)\n\n def signal_handler(sig, frame):\n self.log(log_file, \"Stopping optimization : \"+id+\"\\n\")\n print(\"Stopping optimization : \"+id)\n self.stop_optimization_keras(id)\n\n signal.signal(signal.SIGINT, signal_handler)\n\n running = True\n status = None\n while running:\n status = self.optimization_api.get_status_async(id)\n running = (status.state == 'Active')\n if log_file is not None:\n self.log(\n log_file,\n (\"%s -> %s, \"\n \"Generation: %s/%s, \"\n \"Validation set error: %f, \"\n \"Training set error: %f, \"\n \"Epoch: %d, \"\n \"Optimization Id: %s\\n\") %\n (\n datetime.now(),\n status.state,\n status.generation,\n status.total_generations,\n status.validation_set_error,\n status.training_set_error,\n status.epoch,\n id\n )\n )\n time.sleep(status_interval)\n\n if status.state == 'Finished' or status.state == 'Stopped':\n if status.network is not None and status.network.id is not None:\n self.log(log_file,\n \"Downloading network \" +\n status.network.id + \"\\n\")\n network_stream = self.download_network(status.network.id)\n data = network_stream.read()\n if network_path is not None:\n self.log(log_file,\n \"Saving network \" +\n status.network.id + \" to \" + network_path + \"\\n\")\n with open(network_path, 'wb') as f:\n f.write(data)\n return BytesIO(data)\n\n elif status.state == 'Error':\n self.log(log_file, \"Optimization error\\n\")\n return None\n else:\n self.log(log_file, \"Unknown error\\n\")\n return None\n\n def optimize_keras(\n self,\n config,\n data_set_path=None\n ):\n \"\"\"\n Find optimal network for given problem async.\n\n :param KerasOptimizationConfig config:\n :param str data_set_path:\n :return: str: \n If data_set_path is not None upload data set,\n and sets config.dataset_id to new id.\n Return optimization id.\n \"\"\"\n if data_set_path is not None:\n config.dataset_id = self.upload_data_set(data_set_path)\n return self.optimization_api.post_async(config=config)\n\n def get_optimization_status_keras(\n self,\n id,\n network_path=None\n ):\n \"\"\"\n Get status for optimization.\n\n :param KerasOptimizationConfig config:\n :param str network_path:\n :return: KerasOptimizationStatus: \n If data_set_path is not None upload data set,\n and sets config.dataset_id to new id.\n \"\"\"\n status = self.optimization_api.get_status_async(id)\n if (\n (status.state == 'Finished' or status.state == 'Stopped')\n and (network_path is not None)\n ):\n self.download_network(status.network.id, network_path)\n\n return status\n\n def cancel_optimization_keras(self, id):\n \"\"\"\n Cancel optimization.\n\n :param str id:\n :return: None: \n Call get_optimization_status_keras to get status.\n \"\"\"\n self.optimization_api.post_action_async(id, 'Cancel')\n\n def stop_optimization_keras(self, id, network_path=None):\n \"\"\"\n Stop optimization\n\n :param str id:\n :param str network_path:\n :return: None: \n If network_path is not None download network to given file,\n else call get_optimization_status_keras to get status\n and download network.\n \"\"\"\n self.optimization_api.post_action_async(id, 'Stop')\n if network_path is not None:\n state = 'Active'\n while state == 'Active':\n status = self.get_optimization_status_keras(id, network_path)\n state = status.state\n\n def sha1(self, path):\n sha1 = hashlib.sha1()\n with open(path, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n sha1.update(data)\n return sha1.hexdigest()\n","sub_path":"blackfox/black_fox.py","file_name":"black_fox.py","file_ext":"py","file_size_in_byte":12939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"180518913","text":"from bs4 import BeautifulSoup\nfrom decimal import Decimal\nfrom lxml import html as lxhtml\nfrom urllib import urlopen\n\n__all__ = (\n 'pluralize',\n 'strip_lower', 'zip_and_dict',\n 'convert_text', 'intify_text', 'contains',\n 'get_table', 'table_getter', 'get_row', 'row_getter',\n 'get_fields', 'field_getter', 'column_getter', 'load_data',\n 'SignalData',\n)\n\ndef pluralize(word):\n \"\"\"HORRIBLE WAY TO PLURALIZE A WORD!!!\n\n Will modify (if needed) in future, otherwise it works ok for now.\n \"\"\"\n if word.endswith('s'):\n suffix = 'es'\n else:\n suffix = 's'\n return ''.join((word, suffix))\n\ndef strip_lower(text):\n \"\"\"strip() and lower() text.\"\"\"\n if text is not None:\n try:\n text = text.strip().lower()\n except:\n return None\n return text\n\ndef zip_and_dict(lists, keys):\n \"\"\"Helper to convert value lists to keyed dicts.\n\n Sample: (should this just be a doc test??)\n lists: [['red', 'blue'], [0, 1]]\n keys: ['color', 'status']\n Returns:\n [{'color': 'red', 'status': 0],\n {'color': 'blue', 'status': 1]]\n \"\"\"\n lists = zip(*lists)\n lists = map(lambda list: dict(zip(keys, list)), lists)\n return lists\n\ndef convert_text(elems, func, split=None):\n \"\"\"Convert and return a list of element's .text value with func().\"\"\"\n nums = []\n for elem in elems:\n num = elem.text\n if num is not None:\n num = strip_lower(num)\n if split is not None:\n num = num.split(split, 1)[0]\n try:\n num = func(num)\n except:\n num = None\n nums.append(num)\n return nums\n\ndef intify_text(elems, split=None):\n \"\"\"Wrapper for convert_text with int() as conversion function.\"\"\"\n return convert_text(elems, int, split)\n\ndef contains(text):\n \"\"\"Generate an xpath contains() for text.\"\"\"\n return 'contains(text(), \"{}\")'.format(text)\n\ndef get_table(lxml, header):\n \"\"\"Get a table via header text.\"\"\"\n headers = lxml.xpath('//table//*[{}]'.format(contains(header)))\n if headers:\n tables = headers[0].xpath('./ancestor::table')\n if tables:\n # Return \"closest\" table\n return tables[-1]\n\ndef table_getter(header):\n def func(self):\n return get_table(self.lxml, header)\n return func\n\ndef get_row(table, header):\n \"\"\"Get a row via it's header text.\"\"\"\n if table is not None:\n xpath = './tbody/tr/td[{}]'.format(contains(header))\n tds = table.xpath(xpath)\n if tds:\n return tds[0].getparent()\n\ndef row_getter(table, header):\n def func(self):\n return get_row(get_table(self.lxml, table), header)\n return func\n\ndef get_fields(table, header, split=None, convert=None):\n \"\"\"Get a fields for a row specified by their header text.\"\"\"\n row = get_row(table, header)\n if row is not None:\n tds = row.xpath('./td')[1:]\n if convert is not None:\n return convert_text(tds, convert, split)\n else:\n return intify_text(tds, split)\n\ndef field_getter(table, header, split=None, convert=None):\n def func(self):\n t = get_table(self.lxml, table)\n return get_fields(t, header, split, convert)\n return func\n\ndef column_getter(table, fields, min_columns=0):\n def func(self):\n columns = []\n for field in fields:\n method = '_'.join((table, field))\n method = pluralize(method)\n columns.append(getattr(self, method)())\n columns = zip_and_dict(columns, fields)\n for i in range(len(columns), min_columns):\n columns.append({})\n return columns\n return func\n\ndef load_data(source, parser='lxml'):\n if hasattr(source, 'startswith') and source.startswith('http'):\n content = urlopen(source)\n else:\n content = open(source)\n return BeautifulSoup(content.read(), parser)\n\nclass SignalData(object):\n def __init__(self, html):\n self.soup = load_data(html)\n #self.soup = load_data(html, \"lxml\")\n self.lxml = lxhtml.fromstring(str(self.soup).lower())\n\n # This is used to setup class methods in setup_signal_data()\n tables = {\n 'down': {\n 'header': 'downstream',\n 'min_columns': 4,\n 'rows': [\n ('channel', 'channel'),\n ('freq', 'frequency', ' '),\n ('snr', 'signal to noise', ' '),\n ('power', 'power level', ' '),\n ],\n },\n 'up': {\n 'header': 'upstream',\n 'min_columns': 3,\n 'rows': [\n ('channel', 'channel'),\n ('freq', 'frequency', ' '),\n ('service_id', 'service id'),\n ('rate', 'symbol rate', ' ', Decimal),\n ('power', 'power level', ' '),\n ('status', 'ranging status', None, str),\n ],\n },\n 'stats': {\n 'header': 'signal stats (codewords)',\n 'min_columns': 4,\n 'rows': [\n ('channel', 'channel id'),\n ('unerrored', 'total unerrored'),\n ('correctable', 'total correctable'),\n ('uncorrectable','total uncorrectable'),\n ],\n },\n }\n\n\ndef setup_signal_data():\n \"\"\"\n Setup SignalData methods by going through it's tables dict.\n\n This is inside of a function to avoid cluttering module namespace.\n \"\"\"\n cls = SignalData\n for table, info in cls.tables.items():\n table_header = info['header']\n setattr(cls, '{}_table'.format(table), table_getter(table_header))\n\n rows = info.get('rows', [])\n for row in rows:\n name, row_header, sep, convert = (row + (None, None))[:4]\n args = table_header, row_header, sep, convert\n full_name = '_'.join((table, name))\n setattr(cls, '{}_row'.format(full_name), row_getter(*args[:2]))\n setattr(cls, pluralize(full_name), field_getter(*args))\n\n min_columns = info.get('min_columns', 0)\n setattr(cls, '{}_by_column'.format(table),\n column_getter(table, [r[0] for r in rows], min_columns))\n\nsetup_signal_data()\n","sub_path":"surfboard/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"395944150","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tifffile as tiff\n\nfrom train_unet import weights_path, get_model, normalize, PATCH_SZ, N_CLASSES\n\ndef rotate_img(img, r):\n # channels in img are last!!!\n # r is a transformation type (an integer from 0 to 7)\n reverse_x = r % 2 == 1 # True for r in [1,3,5,7]\n reverse_y = (r // 2) % 2 == 1 # True for r in [2,3,6,7]\n swap_xy = (r // 4) % 2 == 1 # True for r in [4,5,6,7]\n if reverse_x:\n img = img[::-1, :, :]\n if reverse_y:\n img = img[:, ::-1, :]\n if swap_xy:\n img = img.transpose([1, 0, 2])\n return img\n\n \ndef predict(img, model, patch_sz=160, border_sz=20, n_classes=5, augment=True):\n ## model is a trained CNN \n # border is a place around center in the patch where predictions are usually bad in u-nets\n img_height = img.shape[0]\n img_width = img.shape[1]\n n_channels = img.shape[2]\n\n # make extended img so that it contains integer number of crossed-by-border patches\n center_sz = patch_sz - 2 * border_sz\n npatches_vert = int(math.ceil((img_height - 2*border_sz)/center_sz))\n npatches_horizon = int(math.ceil((img_width - 2*border_sz)/center_sz))\n extended_height = 2*border_sz + center_sz * npatches_vert\n extended_width = 2*border_sz + center_sz * npatches_horizon\n ext_img = np.zeros(shape=(extended_height, extended_width, n_channels), dtype=np.float32)\n\n # fill extended image with mirrors:\n ext_img[:img_height, :img_width, :] = img\n for i in range(img_height, extended_height):\n ext_img[i, :, :] = ext_img[2*img_height - i - 1, :, :]\n for j in range(img_width, extended_width):\n ext_img[:, j, :] = ext_img[:, 2*img_width - j - 1, :]\n\n # now assemble all patches in one array\n patches_list = []\n for i in range(0, npatches_vert):\n for j in range(0, npatches_horizon):\n x0, x1 = i * center_sz, (i + 1) * center_sz + 2 * border_sz\n y0, y1 = j * center_sz, (j + 1) * center_sz + 2 * border_sz\n if augment:\n for r in range(8):\n patches_list.append(rotate_img(ext_img[x0:x1, y0:y1, :], r))\n else:\n patches_list.append(ext_img[x0:x1, y0:y1, :])\n patches_arr = np.asarray(patches_list) # np.transpose(patches_list, (0, 1, 2, 3))\n\n # predictions:\n patches_predict = model.predict(patches_arr, batch_size=4)\n confidence_map_patch = np.full(shape=(patch_sz, patch_sz, n_classes), fill_value=0.1) # low confidence for borders\n confidence_map_patch[border_sz:border_sz+center_sz, border_sz:border_sz+center_sz, :] = 1 # high confidence for center\n confidence_map_img = np.zeros(shape=(extended_height, extended_width, n_classes), dtype=np.float32)\n prd = np.zeros(shape=(extended_height, extended_width, n_classes), dtype=np.float32)\n \n for k in range(0, patches_predict.shape[0]): # for all predicted patches\n if augment:\n r = k % 8 # patch transformation type (0..7)\n i = k // 8 // npatches_horizon # patch x-coordinate\n j = k // 8 % npatches_horizon # patch y-coordinate\n x0, x1 = i * center_sz, (i + 1) * center_sz + 2 * border_sz\n y0, y1 = j * center_sz, (j + 1) * center_sz + 2 * border_sz\n confidence_map_img[x0:x1, y0:y1, :] += confidence_map_patch\n prd[x0:x1, y0:y1, :] += rotate_img(patches_predict[k, :, :, :], r) * confidence_map_patch\n else:\n i = k // npatches_horizon\n j = k % npatches_horizon\n x0, x1 = i * center_sz, (i + 1) * center_sz + 2 * border_sz\n y0, y1 = j * center_sz, (j + 1) * center_sz + 2 * border_sz\n confidence_map_img[x0:x1, y0:y1, :] += confidence_map_patch\n prd[x0:x1, y0:y1, :] += patches_predict[k, :, :, :] * confidence_map_patch\n prd /= confidence_map_img\n return prd[:img_height, :img_width, :]\n\n\n\ndef picture_from_mask(mask, threshold=0):\n colors = {\n 0: [150, 150, 150], # Buildings\n 1: [223, 194, 125], # Roads & Tracks\n 2: [27, 120, 55], # Trees\n 3: [166, 219, 160], # Crops\n 4: [116, 173, 209] # Water\n }\n z_order = {\n 1: 3,\n 2: 4,\n 3: 0,\n 4: 1,\n 5: 2\n }\n pict = 255*np.ones(shape=(3, mask.shape[0], mask.shape[1]), dtype=np.uint8)\n \n #for i in range(1, 6):\n # cl = z_order[i]\n\n for ch in range(3):\n pict[ch,:,:][mask[:,:] > threshold] = colors[3][ch]\n\n return pict\n\n\nif __name__ == '__main__':\n model = get_model()\n model.load_weights(weights_path)\n test_id = 'test'\n img = normalize(tiff.imread('data/mband/{}.tif'.format(test_id)).transpose([1,2,0])) # make channels last\n mask = predict(img, model, patch_sz=PATCH_SZ, n_classes=N_CLASSES) #.transpose([2,0,1]) # make channels first\n map = picture_from_mask(mask, 0.5)\n\n tiff.imsave('result.tif', (255*mask).astype('uint8'))\n tiff.imsave('map.tif', map)\n\n\n","sub_path":"satellite-image-segmentation/code/predict3.py","file_name":"predict3.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"528009938","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cupons', '0008_cupons_path'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('statistic', '0008_other_pages_count_shops_count'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cupons_count',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('date', models.DateTimeField(verbose_name='Дата', null=True, auto_now_add=True)),\n ('cupon', models.ForeignKey(verbose_name='КупонВ ', to='cupons.Cupons')),\n ('user', models.ForeignKey(verbose_name='Пользователь', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"statistic/migrations/0009_cupons_count.py","file_name":"0009_cupons_count.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"116628608","text":"import os\nfrom constant.constant import DESKTOP\n\n\ndef set_env(dir_path):\n try:\n env = dir_path + \";\" + os.environ.get(\"PATH\")\n print(env)\n cmd = 'setx PATH \"%s\"' % env\n print(cmd)\n os.popen(cmd)\n print(os.environ[\"PATH\"])\n return True\n except Exception as error:\n return False\n\n\ndef make_dir(dir_path, is_env):\n if os.path.exists(dir_path):\n return False, \"该目录已存在\"\n os.makedirs(dir_path)\n print(\"目录: %s \\n创建成功!\" % dir_path)\n if is_env:\n if set_env(dir_path):\n print(\"环境变量添加成功!\")\n else:\n print(\"环境变量添加失败!\")\n return False, \"添加环境变量失败\"\n return True, \"已完成目录创建!\"\n\n\ndef get_desktop_file():\n files = os.listdir(DESKTOP)\n file_list = []\n dir_list = []\n for file in files:\n file = os.path.join(DESKTOP, file)\n if os.path.isfile(file):\n file_list.append(file)\n else:\n dir_list.append(file)\n return file_list, dir_list\n\n\nif __name__ == '__main__':\n# make_dir(r'E:\\Test', True)\n print(get_desktop_file())","sub_path":"utils/dir_utils.py","file_name":"dir_utils.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"93811014","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import r2_score, roc_auc_score\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n# визуализация\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nsns.set(style=\"darkgrid\")\r\nsns.set(rc={'figure.figsize':(12,8)})\r\nsns.set(font_scale=1.5)\r\nsns.set_color_codes(\"muted\")\r\n\r\nplt.figure(figsize=(10, 8))\r\n\r\n\r\ndef changing_values_to_boolean(data, col):\r\n data[col] = data.apply(lambda x: 1 if x[col] is not None else 0, axis=1)\r\n return data\r\n\r\n\r\ndef clean_work_time(x):\r\n if (x['AGE']-14)*12 >= x['WORK_TIME'] and x['WORK_TIME'] is not None:\r\n return x['WORK_TIME']\r\n else:\r\n return np.nan\r\n\r\n\r\ndef fact_living_term(x):\r\n if x['FACT_LIVING_TERM'] > (x['AGE']+1)*12:\r\n return np.nan\r\n else:\r\n return x['FACT_LIVING_TERM']\r\n\r\n\r\ndef valid(data, col, unique=False):\r\n print (data[col].sort_values())\r\n if unique:\r\n print (data[col].unique())\r\n data[col].hist(bins=40)\r\n plt.plot()\r\n plt.show()\r\n\r\n\r\ndef family_income(x):\r\n str_ = x['FAMILY_INCOME']\r\n if str_.split(' ')[0] == 'до':\r\n y = [0, int(str_.split(' ')[1])]\r\n elif str_.split(' ')[0] == 'свыше':\r\n y = [int(str_.split(' ')[1]), 1000000000]\r\n else:\r\n str_ = str_.split(' ')\r\n y = [int(str_[1]), int(str_[3])]\r\n \r\n if x['PERSONAL_INCOME'] <= y[-1]:\r\n return y[-1]\r\n else:\r\n if x['PERSONAL_INCOME'] < 10000:\r\n return 10000\r\n elif x['PERSONAL_INCOME'] < 20000:\r\n return 20000\r\n elif x['PERSONAL_INCOME'] < 50000:\r\n return 50000\r\n else:\r\n return 100000\r\n \r\n\r\ndef personal_income(x):\r\n N = x['FACT_ADDRESS_PROVINCE'] + x['GEN_TITLE']\r\n if x['PERSONAL_INCOME']>=1000 and x['PERSONAL_INCOME']<300000 and x['GEN_INDUSTRY']==1:\r\n if x['PERSONAL_INCOME']>200000 and N < .6:\r\n return np.nan\r\n elif x['PERSONAL_INCOME']>100000 and x['PERSONAL_INCOME']<=200000 and N < .4:\r\n return np.nan\r\n elif x['PERSONAL_INCOME']>50000 and x['PERSONAL_INCOME']<=100000 and N < .3:\r\n return np.nan\r\n elif x['PERSONAL_INCOME']>=1000 and x['PERSONAL_INCOME']<=50000 and N < .2:\r\n return np.nan\r\n else:\r\n return x['PERSONAL_INCOME']\r\n else:\r\n return np.nan\r\n\r\n\r\ndef gen_industry(x):\r\n if x == 'Пропуск' or x == 'Другие сферы' or x==0:\r\n return 0\r\n else:\r\n return 1\r\n\r\n\r\ndef gen_title(data, col):\r\n m = {'Индивидуальный предприниматель':.6,\r\n 'Партнер':.6,\r\n 'Руководитель высшего звена':.5,\r\n 'Руководитель среднего звена':.5,\r\n 'Руководитель низшего звена':.4,\r\n 'Военнослужащий по контракту':.3,\r\n 'Высококвалифиц. специалист':.3,\r\n 'Специалист':.2,\r\n 'Служащий':.1,\r\n 'Рабочий':.1,\r\n 'Работник сферы услуг':.1,\r\n 'Пропуск':np.nan,\r\n 'Другое': np.nan,}\r\n data[col] = data[col].map(m)\r\n return data\r\n\r\n\r\ndef reg(x, m, col, ver=True):\r\n for l in m:\r\n if x[col] == l:\r\n return m[l]\r\n if ver:\r\n return 0\r\n else:\r\n return x[col]\r\n\r\n\r\ndef clean(data):\r\n \r\n data = gen_title(data, 'GEN_TITLE')\r\n data['GEN_TITLE'] = data['GEN_TITLE'].fillna(0)\r\n data['GEN_INDUSTRY'] = data.apply(lambda x: gen_industry(x['GEN_INDUSTRY']), axis=1)\r\n\r\n data['ORG_TP_FCAPITAL'] = data.apply(lambda x: 1 if x['ORG_TP_FCAPITAL'] == 'С участием' else 0, axis=1)\r\n\r\n data['JOB_DIR'] = data['JOB_DIR'].fillna(0)\r\n data['JOB_DIR'] = data.apply(lambda x: gen_industry(x['JOB_DIR']), axis=1)\r\n\r\n data['ORG_TP_STATE'] = data['ORG_TP_STATE'].fillna(0)\r\n data['ORG_TP_STATE'] = data.apply(lambda x: gen_industry(x['ORG_TP_STATE']), axis=1)\r\n\r\n m = {\r\n 'Состою в браке':4,\r\n 'Гражданский брак':3,\r\n 'Разведен(а)':2,\r\n 'Не состоял в браке':1,\r\n 'Вдовец/Вдова':0\r\n }\r\n data[\"MARITAL_STATUS\"] = data[\"MARITAL_STATUS\"].map(m)\r\n\r\n m = {'Неполное среднее':0, 'Среднее':.1, 'Среднее специальное':.2,\r\n 'Неоконченное высшее':.3, 'Высшее':.4, 'Два и более высших образования':.5,\r\n 'Ученая степень':.6}\r\n\r\n data[\"EDUCATION\"] = data[\"EDUCATION\"].map(m)\r\n data[\"EDUCATION\"] = data[\"EDUCATION\"].fillna(0)\r\n\r\n\r\n m = {\r\n 'Агинский Бурятский АО':'Бурятия',\r\n 'Башкирия':'Башкортостан',\r\n 'Коми-Пермяцкий АО':'Коми',\r\n 'Марийская республика':'Марий Эл',\r\n 'Мордовская республика':'Мордовия',\r\n 'Пермская область':'Пермский край',\r\n 'Санкт-Петербург':'Ленинградская область',\r\n 'Усть-Ордынский Бурятский АО':'Бурятия',\r\n 'Читинская область':'Бурятия',\r\n 'Эвенкийский АО':'Красноярский край',\r\n }\r\n for col in ['REG_ADDRESS_PROVINCE', 'FACT_ADDRESS_PROVINCE', 'POSTAL_ADDRESS_PROVINCE', 'TP_PROVINCE']:\r\n data[col] = data.apply(lambda x: reg(x, m, col, False), axis=1)\r\n\r\n # print (data[['REG_ADDRESS_PROVINCE', 'FACT_ADDRESS_PROVINCE', 'POSTAL_ADDRESS_PROVINCE', 'TP_PROVINCE']])\r\n m = {\r\n 'Алтайский край':.1,\r\n 'Башкортостан':.1,\r\n 'Вологодская область':.1,\r\n 'Иркутская область':.1,\r\n 'Кемеровская область':.1,\r\n 'Краснодарский край':.1,\r\n 'Красноярский край':.1,\r\n 'Ленинградская область':.1,\r\n 'Москва':.1,\r\n 'Московская область':.1,\r\n 'Нижегородская область':.1,\r\n 'Новосибирская область':.1,\r\n 'Омская область':.1,\r\n 'Оренбургская область':.1,\r\n 'Пермский край':.1,\r\n 'Ростовская область':.1,\r\n 'Самарская область':.1,\r\n 'Татарстан':.1,\r\n 'Челябинская область':.1,\r\n }\r\n for col in ['REG_ADDRESS_PROVINCE', 'FACT_ADDRESS_PROVINCE', 'POSTAL_ADDRESS_PROVINCE', 'TP_PROVINCE']:\r\n data[col] = data.apply(lambda x: reg(x, m, col), axis=1)\r\n\r\n m = {\r\n 'ЦЕНТРАЛЬНЫЙ 1':.1,\r\n 'ЦЕНТРАЛЬНЫЙ 2':.1,\r\n 'УРАЛЬСКИЙ':.1,\r\n 'ЦЕНТРАЛЬНЫЙ ОФИС':.1\r\n }\r\n data['REGION_NM'] = data.apply(lambda x: reg(x, m, 'REGION_NM'), axis=1)\r\n\r\n\r\n data['FACT_LIVING_TERM'] = data.apply(lambda x: fact_living_term(x), axis=1)\r\n data['FACT_LIVING_TERM'] = data['FACT_LIVING_TERM'].fillna(data['FACT_LIVING_TERM'].mean())\r\n\r\n data['WORK_TIME'] = data.apply(lambda x: clean_work_time(x), axis=1)\r\n data['WORK_TIME'] = data['WORK_TIME'].fillna(data['WORK_TIME'].mean())\r\n\r\n\r\n data['PERSONAL_INCOME'] = data.apply(lambda x: x['PERSONAL_INCOME'] if x['PERSONAL_INCOME']>1000 and x['PERSONAL_INCOME']<300000 else np.nan, axis=1)\r\n data['PERSONAL_INCOME'] = data.apply(lambda x: personal_income(x), axis=1)\r\n\r\n # valid(data, 'PERSONAL_INCOME', unique=True)\r\n data['PERSONAL_INCOME'] = data.apply(lambda x: data[data['AGE']==x['AGE']].groupby(['AGE'])['PERSONAL_INCOME'].mean() if x['PERSONAL_INCOME'] is None else x['PERSONAL_INCOME'], axis=1)\r\n\r\n\r\n data['PERSONAL_INCOME'] = data['PERSONAL_INCOME'].fillna(data['PERSONAL_INCOME'].median())\r\n\r\n # valid(data, 'PERSONAL_INCOME', unique=True)\r\n\r\n \r\n data['FAMILY_INCOME'] = data.apply(lambda x: family_income(x), axis=1)\r\n un = sorted(data['FAMILY_INCOME'].unique())\r\n m = dict([(u, i) for i, u in enumerate(un, 0)])\r\n data['FAMILY_INCOME'] = data['FAMILY_INCOME'].map(m)\r\n\r\n\r\n data[\"PREVIOUS_CARD_NUM_UTILIZED\"] = data[\"PREVIOUS_CARD_NUM_UTILIZED\"].fillna(0)\r\n return data\r\n\r\n\r\nif __name__ == '__main__':\r\n name1 = 'Credit_new.csv'\r\n name = 'Credit.csv'\r\n name_out1 = 'Credit_new.clean'\r\n name_out = 'Credit_test.clean'\r\n\r\n\r\n data = pd.read_csv(name1, sep=';', encoding='CP1251', decimal=',')\r\n # data = data.drop([\"TARGET\"], axis=1)\r\n # df = pd.read_csv(name1, sep=';', encoding='CP1251', decimal=',')\r\n # data = pd.concat([data, df])\r\n\r\n # valid(data, 'DL_DOCUMENT_FL', unique=True)\r\n \r\n # del data['AGREEMENT_RK']\r\n\r\n data = clean(data)\r\n\r\n data.to_csv(name_out1, sep='\\t', index=None)\r\n print (data.head())\r\n print ('save')\r\n","sub_path":"my_ml/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":9194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"152680994","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/satella/configuration/sources/from_dict.py\n# Compiled at: 2020-04-14 13:42:23\n# Size of source mod 2**32: 2250 bytes\nimport copy, importlib\nfrom satella.coding.recast_exceptions import rethrow_as\nfrom satella.coding.decorators import for_argument\nfrom satella.configuration import sources\nfrom satella.configuration.sources.base import BaseSource\nfrom satella.exceptions import ConfigurationError\n__all__ = [\n 'load_source_from_dict',\n 'load_source_from_list']\n\ndef handle_import(dct: dict):\n\n def convert(v):\n if 'cast_before' in dct:\n v = EXTRA_TYPES[dct['cast_before']['type']](dct['cast_before'])(v)\n return getattr(importlib.import_module(dct['module']), dct['attribute'])(v)\n\n return convert\n\n\nEXTRA_TYPES = {'binary':lambda dct: dct['value'].encode(dct.get('encoding', 'ascii')), \n 'lambda':lambda dct: eval('lambda x: ' + dct['operation'], globals(), locals()), \n 'import':handle_import}\n\n@rethrow_as(Exception, ConfigurationError)\n@for_argument(copy.copy)\ndef load_source_from_dict(dct: dict) -> BaseSource:\n \"\"\"\n dct has a form of\n\n {\n \"type\": \"BaseSource\",\n \"args\": [] # optional\n ... kwargs\n }\n\n :raises ConfigurationError: upon failure to instantiate\n \"\"\"\n type_ = dct.pop('type')\n args = dct.pop('args', [])\n optional = dct.pop('optional', False)\n\n def to_arg(arg):\n if isinstance(arg, dict):\n if 'type' in arg:\n a_type = arg['type']\n if a_type in EXTRA_TYPES:\n return EXTRA_TYPES[a_type](arg)\n if a_type in sources.__dict__:\n return load_source_from_dict(arg)\n raise ValueError('unrecognized argument type %s' % (arg['type'],))\n else:\n return arg\n\n args = map(to_arg, args)\n kwargs = {k:to_arg(v) for k, v in dct.items()}\n s = (sources.__dict__[type_])(*args, **kwargs)\n if optional:\n s = sources.OptionalSource(s)\n return s\n\n\ndef load_source_from_list(obj: list) -> 'sources.MergingSource':\n \"\"\"\n Builds a MergingSource from dict-ed objects\n \"\"\"\n return (sources.MergingSource)(*map(load_source_from_dict, obj))","sub_path":"pycfiles/satella-2.7.10.linux-x86_64.tar/from_dict.cpython-36.py","file_name":"from_dict.cpython-36.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"55475972","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndp_dict = dict()\n\n\ndef distance(p1, p2):\n return np.linalg.norm(p1 - p2, ord=2)\n\n\ndef plot(path):\n for p in path:\n points = test_points[p[0]], test_points[p[1]]\n plt.plot([x[0] for x in points], [x[1] for x in points], \"--b\")\n\n\ndef dp(points):\n n = len(points)\n m = np.zeros([n, n])\n path = list()\n for i in range(n):\n path.append(list())\n for j in range(n):\n path[i].append(list())\n for j in range(n):\n for i in range(n):\n if i == 1 and j == 0:\n m[i, j] = distance(points[0], points[1])\n path[i][j] = [(0, 1)]\n continue\n if j >= i:\n continue\n elif j == i - 1:\n min_v = float(\"inf\")\n p = None\n for k in range(j):\n d = m[j, k] + distance(points[i], points[k])\n if d < min_v:\n min_v = d\n p = (i, k)\n path[i][j] = path[j][p[1]] + [p]\n m[i, j] = min_v\n else:\n m[i, j] = m[i - 1, j] + distance(points[i - 1], points[i])\n path[i][j] = path[i - 1][j] + [(i - 1, i)]\n path = path[n - 1][n - 2] + [(n - 1, n - 2)]\n plot(path)\n return m[n - 1, n - 2] + distance(points[n - 1], points[n - 2])\n\n\nif __name__ == \"__main__\":\n test_points = np.array([(0, 6), (1, 0), (2, 3), (5, 4), (6, 1), (7, 5), (8, 2)])\n print(dp(test_points))\n # print([x[1] for x in points_test])\n plt.plot([x[0] for x in test_points], [x[1] for x in test_points], \"bo\")\n plt.show()\n # print(dp_dict)\n","sub_path":"dp/bitonic_euclidean_traveling_salesman.py","file_name":"bitonic_euclidean_traveling_salesman.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"327059046","text":"from django.conf.urls import url\nfrom .views import (\n RecipeDetailView,\n RecipeCreateView,\n RecipeUpdateView,\n RecipeDeleteView,\n RecipeSearchView,\n update_recipebooks_view,\n ReviewsListView,\n DeriveRecipeView,\n DerivedRecipesListView,\n)\n\nurlpatterns = [\n url(r'new$', RecipeCreateView.as_view(), name='new_recipe'),\n url(r'(?P[0-9]+)$', RecipeDetailView.as_view(), name='view_recipe'),\n url(r'(?P[0-9]+)/edit$',\n RecipeUpdateView.as_view(),\n name='edit_recipe'),\n url(r'(?P[0-9]+)/delete$',\n RecipeDeleteView.as_view(),\n name='delete_recipe'),\n url(r'(?P[0-9]+)/derive$',\n DeriveRecipeView.as_view(),\n name='derive_recipe'),\n url(r'(?P[0-9]+)/derivations$',\n DerivedRecipesListView.as_view(),\n name='derived_recipes'),\n url(r'search', RecipeSearchView.as_view(), name='recipe_search'),\n url(r'(?P[0-9]+)/update_recipebooks',\n update_recipebooks_view,\n name='recipe_update_recipebooks'),\n url(r'(?P[0-9]+)/reviews$',\n ReviewsListView.as_view(),\n name='recipe_reviews'),\n]\n","sub_path":"recipe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"28848951","text":"from ruamel import yaml\nimport regex\n\n\ndef purge_spoon_fed(d_out, current, i, last_i, current_level):\n d_out.setdefault('{:02d}'.format(current_level), list()).append({\n '_meta': {\n 'source': 'Chinese Sentences and audio, spoon fed (https://ankiweb.net/shared/info/867291675)',\n 'order': f'{last_i+1}-{i+1}'\n },\n 'hanzi': ''.join(current)\n })\n\n\ndef read_spoon_fed(current_level=6):\n with open('spoonfed.txt') as f_in, open('generated.yaml', 'r+') as f_out:\n previous = list(set(regex.findall(r'\\p{IsHan}', f_out.read())))\n f_out.seek(0)\n d_out = yaml.safe_load(f_out)\n current = list()\n last_i = 0\n i = 0\n rows = f_in.readlines()\n\n chunk = len(set(h for h in regex.findall(r'\\p{IsHan}', ''.join(rows))\n if h not in previous)) / (50 - current_level + 1) - 1\n print(chunk)\n\n for i, row in enumerate(rows):\n clean_sentence = list(set(h for h in regex.findall(r'\\p{IsHan}', row) if h not in (previous + current)))\n if len(clean_sentence) == 0 and len(current) == 0:\n last_i = i\n continue\n\n current += clean_sentence\n\n if len(current) > chunk:\n purge_spoon_fed(d_out, current, i, last_i, current_level)\n\n previous += current\n current = list()\n current_level += 1\n last_i = i\n\n purge_spoon_fed(d_out, current, i, last_i, current_level)\n\n f_out.seek(0)\n yaml.safe_dump(d_out, f_out, allow_unicode=True)\n\n\nif __name__ == '__main__':\n read_spoon_fed()\n","sub_path":"dev/hanzi/create_from_spoonfed.py","file_name":"create_from_spoonfed.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"497449452","text":"import collections\n\nimport numpy as np\nimport pandas as pd\n\nimport healthcare.algorithms.patterns\n\n\n# noinspection PyUnresolvedReferences,PyProtectedMember\nclass MVBeds:\n\n def __init__(self, uri):\n \"\"\"\n\n :param uri:\n \"\"\"\n\n self.uri = uri\n Dimensions = collections.namedtuple(typename='Dimensions', field_names=['all', 'descriptive'])\n self.dimensions = Dimensions._make((['region', 'code', 'institution'], ['region', 'institution']))\n\n Data = collections.namedtuple(typename='Data', field_names=['sheet_name', 'cells', 'start', 'end'])\n self.data = Data._make(('MV Beds Occupied', 'C:JZ', 24, 515))\n\n FieldNames = collections.namedtuple(typename='FieldNames', field_names=['cells', 'row'])\n self.fieldnames = FieldNames._make(('F:JZ', 13))\n\n self.notes = \"Provider Level Data - Mechanical Ventilation beds - occupied (as at 08:00)\"\n\n def dataset(self) -> pd.DataFrame:\n \"\"\"\n\n :return:\n \"\"\"\n\n try:\n return pd.read_excel(io=self.uri, sheet_name=self.data.sheet_name, header=None,\n skiprows=np.arange(self.data.start - 1), usecols=self.data.cells,\n nrows=(self.data.end - self.data.start + 1))\n except OSError as err:\n raise Exception(err.strerror) from err\n\n def fields(self) -> list:\n \"\"\"\n\n :return:\n \"\"\"\n\n try:\n names = pd.read_excel(\n io=self.uri, sheet_name=self.data.sheet_name, header=None, skiprows=self.fieldnames.row - 1,\n usecols=self.fieldnames.cells, nrows=1, parse_dates=True)\n except OSError as err:\n raise Exception(err.strerror) from err\n\n return self.dimensions.all + names.astype(str).values.tolist()[0]\n\n def institutions(self, blob) -> pd.DataFrame:\n \"\"\"\n\n :param blob:\n :return:\n \"\"\"\n\n frame = blob[self.dimensions.all]\n assert frame.shape[0] == frame.drop_duplicates().shape[0]\n frame = healthcare.algorithms.patterns.Patterns().exc(blob=frame.copy())\n\n return frame\n\n def exc(self) -> (pd.DataFrame, pd.DataFrame, str):\n \"\"\"\n\n :return:\n \"\"\"\n\n # Bare minimum checks\n assert self.data.cells.rsplit(':')[1] == self.fieldnames.cells.rsplit(':')[1], \\\n \"The last column of the data cells must be the same as the last column \" + \\\n \"of the fields names\"\n\n # Data\n data = self.dataset()\n data = data.set_axis(labels=self.fields(), axis=1)\n\n # Hence\n series = data[data.columns.drop(labels=self.dimensions.descriptive)].copy()\n series.fillna(0, inplace=True)\n institutions = self.institutions(blob=data)\n\n return series, institutions, self.notes\n","sub_path":"healthcare/cases/mvbeds.py","file_name":"mvbeds.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"475615671","text":"\r\nfrom nintendo.common import util, socketutils\r\nimport contextlib\r\nimport socket\r\nimport anyio\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass UDPSocket:\r\n\tdef __init__(self, sock):\r\n\t\tself.sock = sock\r\n\t\r\n\tasync def __aenter__(self): return self\r\n\tasync def __aexit__(self, typ, val, tr):\r\n\t\tawait self.close()\r\n\t\r\n\tasync def send(self, data, addr):\r\n\t\tawait self.sock.send(data, addr[0], addr[1])\r\n\tasync def recv(self, num=65536):\r\n\t\treturn await self.sock.receive(num)\r\n\t\r\n\tasync def close(self): await self.sock.close()\r\n\tasync def abort(self): await self.sock.close()\r\n\t\r\n\tasync def broadcast(self, data, port):\r\n\t\thost = util.broadcast_address()\r\n\t\tawait self.send(data, (host, port))\r\n\t\r\n\tdef local_address(self):\r\n\t\treturn self.sock.address\r\n\r\n\r\nclass UDPClient:\r\n\tdef __init__(self, sock, addr):\r\n\t\tself.sock = sock\r\n\t\tself.addr = addr\r\n\t\r\n\tasync def __aenter__(self): return self\r\n\tasync def __aexit__(self, typ, val, tr):\r\n\t\tawait self.close()\r\n\t\r\n\tasync def send(self, data):\r\n\t\tawait self.sock.send(data, self.addr[0], self.addr[1])\r\n\tasync def recv(self, num=65536):\r\n\t\treturn (await self.sock.receive(num))[0]\r\n\t\r\n\tasync def close(self): await self.sock.close()\r\n\tasync def abort(self): await self.sock.close()\r\n\t\r\n\tdef local_address(self):\r\n\t\treturn self.sock.address\r\n\tdef remote_address(self):\r\n\t\treturn self.addr\r\n\t\t\r\n\t\t\r\nclass UDPServerClient:\r\n\tdef __init__(self, server, addr):\r\n\t\tself.server = server\r\n\t\tself.addr = addr\r\n\t\t\r\n\t\tself.packets = socketutils.PacketQueue()\r\n\t\tself.closed = False\r\n\t\r\n\tasync def __aenter__(self): return self\r\n\tasync def __aexit__(self, typ, val, tr):\r\n\t\tawait self.close()\r\n\t\r\n\tasync def handle(self, data):\r\n\t\tawait self.packets.put(data)\r\n\t\r\n\tasync def send(self, data):\r\n\t\tif self.closed:\r\n\t\t\traise anyio.exceptions.ClosedResourceError\r\n\t\tawait self.server.send(data, self.addr)\r\n\t\r\n\tasync def recv(self):\r\n\t\treturn await self.packets.get()\r\n\t\r\n\tasync def close(self): await self.abort()\r\n\tasync def abort(self):\r\n\t\tif not self.closed:\r\n\t\t\tself.closed = True\r\n\t\t\tself.server.unregister(self.addr)\r\n\t\t\tawait self.packets.close()\r\n\t\r\n\tdef local_address(self): return self.server.local_address()\r\n\tdef remote_address(self): return self.addr\r\n\r\n\r\nclass UDPServer:\r\n\tdef __init__(self, handler, sock, group):\r\n\t\tself.handler = handler\r\n\t\tself.sock = sock\r\n\t\tself.group = group\r\n\t\t\r\n\t\tself.table = {}\r\n\t\r\n\tasync def __aenter__(self): return self\r\n\tasync def __aexit__(self, typ, val, tr):\r\n\t\tawait self.group.cancel_scope.cancel()\r\n\t\tawait self.sock.close()\r\n\t\r\n\tasync def start(self):\r\n\t\tawait self.group.spawn(self.serve)\r\n\t\r\n\tasync def serve(self):\r\n\t\twhile True:\r\n\t\t\tdata, addr = await self.sock.recv()\r\n\t\t\tlogger.debug(\"Received %i bytes from %s\", len(data), addr)\r\n\t\t\tif addr not in self.table:\r\n\t\t\t\tclient = UDPServerClient(self, addr)\r\n\t\t\t\tself.table[addr] = client\r\n\t\t\t\t\r\n\t\t\t\thost, port = client.remote_address()\r\n\t\t\t\tlogger.debug(\"New UDP connection: %s:%i\", host, port)\r\n\t\t\t\t\r\n\t\t\t\tawait self.group.spawn(self.handle, client)\r\n\t\t\tawait self.table[addr].handle(data)\r\n\t\r\n\tasync def handle(self, client):\r\n\t\twith util.catch_all():\r\n\t\t\tasync with client:\r\n\t\t\t\tawait self.handler(client)\r\n\t\r\n\tasync def send(self, data, addr):\r\n\t\tawait self.sock.send(data, addr)\r\n\t\r\n\tdef unregister(self, addr):\r\n\t\tdel self.table[addr]\r\n\r\n\r\n@contextlib.asynccontextmanager\r\nasync def bind(host=\"\", port=0):\r\n\tif not host:\r\n\t\thost = util.local_address()\r\n\t\r\n\tlogger.debug(\"Creating UDP socket at %s:%i\", host, port)\r\n\t\r\n\tsock = await anyio.create_udp_socket(interface=host, port=port)\r\n\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)\r\n\t\r\n\tasync with UDPSocket(sock) as sock:\r\n\t\tyield sock\r\n\t\r\n\tlogger.debug(\"UDP socket is closed\")\r\n\r\n@contextlib.asynccontextmanager\r\nasync def connect(host, port):\r\n\tlogger.debug(\"Connecting UDP client to %s:%i\", host, port)\r\n\tsock = await anyio.create_udp_socket(target_host=host, target_port=port)\r\n\tasync with UDPClient(sock, (host, port)) as client:\r\n\t\tyield client\r\n\tlogger.debug(\"UDP client is closed\")\r\n\r\n@contextlib.asynccontextmanager\r\nasync def serve(handler, host=\"\", port=0):\r\n\tlogger.info(\"Starting UDP server at %s:%i\", host, port)\r\n\tasync with bind(host, port) as sock:\r\n\t\tasync with anyio.create_task_group() as group:\r\n\t\t\tasync with UDPServer(handler, sock, group) as server:\r\n\t\t\t\tawait server.start()\r\n\t\t\t\tyield\r\n\tlogger.info(\"UDP server is closed\")\r\n","sub_path":"nintendo/common/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"580978821","text":"#!/usr/bin/python\n\"\"\"\nplot_results.py\n\"\"\"\nimport matplotlib.pyplot as plt\nimport cPickle as pkl\nimport re\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--pkl-group', required=True, action=\"append\", nargs='+')\n parser.add_argument('--savefig', action='store_true')\n return parser.parse_args()\n\ndef main(args):\n for group in args.pkl_group:\n files = sorted(group, key=lambda s: [int(t) if t.isdigit()\n else t.lower() for t in re.split('(\\d+)', s)])\n figure, (axes1, axes2) = plt.subplots(2,1)\n for file_ in files:\n with open(file_) as f:\n data = pkl.load(f)\n axes1.plot(data['times'], data['magnetizations'], label=data['temperature'], alpha=0.5)\n axes2.plot(data['times'], data['energies'], label=data['temperature'], alpha=0.5)\n axes1.set_title('Magnetization')\n axes2.set_title('Energy')\n axes1.legend(loc='upper left')\n axes2.legend(loc='upper left')\n figure.suptitle('%dx%d' % (data['grid_size'], data['grid_size'])) # assumes same grid size for group\n if args.savefig:\n figure.savefig('%dsweep_%dx%d_plot.png' % (data['times'].shape[0] - 1,\n data['grid_size'], data['grid_size']))\n plt.show()\n\nif __name__ == \"__main__\":\n main(parse_args())\n\n","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"584162837","text":"from sympy import Matrix\nclass cbe:\n def getCoefficient(self, List):\n A = dict()\n B = []\n for compound in List:\n elements = list(compound)\n l = ''\n for e in range(len(elements)):\n num = 1\n if (elements[e].isupper()):\n l = elements[e]\n j = e + 1\n if (j == len(elements)):\n A[l] = 1\n while (j < len(elements)):\n if (elements[j].islower()):\n l += elements[j]\n elif (elements[j].isupper()):\n num = 1\n A[l] = num\n l = ''\n break\n elif (elements[j].isnumeric()):\n num = elements[j]\n A[l] = num\n l = ''\n break\n j += 1\n B.append(A)\n A = {}\n return (B)\n\n def numEquations(self, left):\n unique_elements = set()\n for elements in left:\n for e in elements:\n unique_elements.add(e)\n return ([unique_elements, len(unique_elements)])\n\n def equation(self, left, right, n):\n eList, m = self.numEquations(left)\n eList = list(eList)\n A = []\n B = []\n for x in eList:\n l = []\n for u in left:\n\n found = 0\n for t in u:\n if (t == x):\n found = 1\n l.append(int(u[t]))\n break\n if (found == 1):\n continue\n else:\n l.append(0)\n\n A.append(l)\n for x in eList:\n l = []\n for u in right:\n\n found = 0\n for t in u:\n if (t == x):\n found = 1\n l.append(-int(u[t]))\n break\n if (found == 1):\n continue\n else:\n l.append(0)\n\n B.append(l)\n C = []\n for i in range(len(A)):\n C.append(A[i] + B[i])\n return ([C, len(eList)])\n\n def getBalancedEquation(self, lhsList, rhsList, nullspace):\n left_eqn = ''\n\n for i in range(len(lhsList)):\n if (i is len(lhsList) - 1):\n left_eqn += str(nullspace[i]) + lhsList[i]\n else:\n left_eqn += str(nullspace[i]) + lhsList[i] + '+'\n\n right_eqn = ''\n for i in range(len(rhsList)):\n if (i is len(rhsList) - 1):\n right_eqn += str(nullspace[m + i]) + rhsList[i]\n else:\n right_eqn += str(nullspace[m + i]) + rhsList[i] + '+'\n\n balanced = left_eqn + '-->' + right_eqn\n return (balanced)\n\nlhs = 'NaOH+H2SO4'\nrhs = 'Na2SO4+H2O'\nlhsList = lhs.split('+')\nrhsList = rhs.split('+')\nmolecules = lhsList + rhsList\nm = len(lhsList)\n\nchemical=cbe()\nB = chemical.getCoefficient(lhsList)\nC = chemical.getCoefficient(rhsList)\n\nA, n =chemical.equation(B, C, len(B) + len(C))\nnullspace = Matrix(A).nullspace()[0].tolist()\n\nans =chemical.getBalancedEquation(lhsList, rhsList, nullspace)\nprint(ans)\n","sub_path":"cbe.py","file_name":"cbe.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"319479439","text":"import string\nfrom typing import Sequence\n\nimport pytest\n\nfrom homework_02.custom_range import custom_range\n\n\n@pytest.mark.parametrize(\n [\"value\", \"expected_result\"],\n [\n ((string.ascii_lowercase, \"g\"), [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]),\n ((string.ascii_lowercase, \"d\", \"h\"), [\"d\", \"e\", \"f\", \"g\"]),\n ((string.ascii_lowercase, \"p\", \"g\", -2), [\"p\", \"n\", \"l\", \"j\", \"h\"]),\n (([1, 2, \"ha-ha\", 4, 5, 6, 7, 8, 9, 0], 5), [1, 2, \"ha-ha\", 4]),\n (([1, 2, \"ha-ha\", 4, 5, 6, 7, 8, 9, 0], \"ha-ha\", 5), [\"ha-ha\", 4]),\n (([1, 2, \"ha-ha\", 4, 5, 6, 7, 8, 9, 0], 8, 2, -2), [8, 6, 4]),\n (([set, list, tuple(), tuple, 3, str(), str], tuple), [set, list, tuple()]),\n ],\n)\ndef test_custom_range(value: Sequence, expected_result: Sequence):\n actual_result = custom_range(*value)\n assert actual_result == expected_result\n\n\n@pytest.mark.parametrize(\"value\", [\"abcc\", [1, True], [0, False]])\ndef test_custom_range_assert_non_unique(value):\n with pytest.raises(Exception, match=\"Input consist non-unique values\"):\n custom_range(value, 5)\n\n\ndef test_custom_range_assert_no_elements():\n with pytest.raises(Exception, match=\"Input have no elements\"):\n custom_range([], 0)\n","sub_path":"homework_02/tests/test_custom_range.py","file_name":"test_custom_range.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"532744182","text":"class Stack:\n def __init__(self):\n self.stack = []\n\n def push(self, item):\n self.stack.append(item)\n\n def pop(self):\n if self.stack == []:\n return None\n else:\n return self.stack.pop()\n\n def display(self):\n if self.stack == []:\n return None\n else:\n return self.stack\n\n def is_empty(self):\n return self.stack == []\n\nif __name__ == \"__main__\":\n s = Stack()\n\n while True:\n print(\"Enter 1 to insert elements into the stack \\n\")\n print(\"Enter 2 to pop an element from a stack \\n\")\n print(\"Enter 3 to display the elements \\n\")\n print(\"Enter 4 to stop \\n\")\n choice = input(\"Enter your choice \\n\")\n\n if choice == \"1\":\n item = input(\"Enter the item to be inserted into the stack\")\n s.push(item)\n\n elif choice == \"2\":\n print(s.pop())\n\n elif choice == \"3\":\n print(s.display())\n\n else:\n break\n\n","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"151197031","text":"from room import Room\nfrom player import Player\nfrom item import Item\n\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n\n# Link rooms together\n\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n# Create items\ndagger = Item('dagger', 'Thief dagger made of Mythril')\nprint(dagger.name)\nsword = Item('sword', 'The mighty Ragnarok!')\nprint(sword.name)\n\n# Add items to rooms\nroom['foyer'].add_item(sword.name)\nroom['foyer'].add_item(dagger.name)\nroom['foyer'].print_list()\n\n#\n# Main\n#\n\n# Make a new player object that is currently in the 'outside' room.\n\nplayerOne = Player('Matt', room['outside'])\n\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n\nactive = True\n\n#gamplay loop\nwhile active == True:\n\n print('============================================')\n print(f'Player: {playerOne.name}')\n print(f'Current Room: {playerOne.current_room.name}')\n print(f'Items Available: {playerOne.current_room.list}\\n')\n print(f'{playerOne.current_room.description}')\n print('============================================')\n\n #input\n direction = input('Choose a direction: ').lower()\n\n #movement logic\n if direction == 'n' and playerOne.current_room.n_to:\n playerOne.current_room = playerOne.current_room.n_to\n elif direction == 's' and playerOne.current_room.s_to:\n playerOne.current_room = playerOne.current_room.s_to\n elif direction == 'e' and playerOne.current_room.e_to:\n playerOne.current_room = playerOne.current_room.e_to\n elif direction == 'w' and playerOne.current_room.w_to:\n playerOne.current_room = playerOne.current_room.w_to\n elif direction == 'q':\n active = False\n else:\n print('You cannot move in that direction')\n\n #item logic","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"77174157","text":"#!/usr/bin/env python\nimport re, os, sys, shutil\nfrom math import * \nfrom string import *\nfrom optparse import OptionParser\nimport operator\n\n\ndef main(argv):\n\tparser = OptionParser()\n\tparser.add_option(\"-c\", \"--sample\", action=\"store\", type=\"int\", dest=\"index\", metavar=\"\", help=\"sample label\")\n\tparser.add_option(\"-i\", \"--file1\", action=\"store\", type=\"string\", dest=\"infile\", metavar=\"\", help=\"input file\")\n\tparser.add_option(\"-o\", \"--outfile\", action=\"store\", type=\"string\", dest=\"outfile\", metavar=\"\", help=\"output file name\")\n\n\t(opt, args) = parser.parse_args(argv)\n\tif len(argv) < 4:\n \tparser.print_help()\n \tsys.exit(1)\n\t\n\tf = open(opt.infile, 'r')\n\to = open(opt.outfile, 'w')\n\tfor line in f:\n\t\tif not re.match(\"#\",line):\n\t\t\tline = line.strip()\n\t\t\tsline = line.split()\n\t\t\tassert opt.index <= len(sline)\n\t\t\texpression = atof(sline[opt.index])\n\t\t\tList = []\n\t\t\tfor i in range(1,len(sline)):\n\t\t\t\tList.append(atof(sline[i]))\n\t\t\tList.sort()\n\t\t\tindex = List.index(expression) + 1\n\t\t\to.write(sline[0] + '\\t' + str(index) + '\\n')\n\tf.close()\n\to.close()\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)","sub_path":"expression_celltype_specific_gene_list.py","file_name":"expression_celltype_specific_gene_list.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"260189207","text":"import bs4\nimport requests\n\n\nsite = requests.get(\"https://www.empireonline.com/movies/features/best-movies-2/\").text\nsoup = bs4.BeautifulSoup(site, \"html.parser\")\n\ntitles = soup.findAll(\"img\")\ntitles = [title.get(\"alt\") for title in titles]\ntitles = list(filter(lambda x: x, titles))\n\nprint(titles)\n","sub_path":"day_045/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158834535","text":"import streamlit as st\nimport pandas as pd\nimport datetime\n\nfrom funcoes_auxiliares.mobility_graphs import *\n\ndef mobilidade():\n mob = pd.read_csv('Base_de_dados/mobilidade.csv', sep=';')\n mob = mob.drop(['Unnamed: 0'], axis=1)\n mob['date'] = pd.to_datetime(mob['date'])\n\n dataAnalise = [datetime.datetime(2020, 3, 1), mob['date'].max()]\n dataAnalise[0] = st.sidebar.date_input('Data de inicio', dataAnalise[0], datetime.datetime(2020, 3, 1), mob['date'].max())\n dataAnalise[1] = st.sidebar.date_input('Data de termino', dataAnalise[1], dataAnalise[0], mob['date'].max())\n dataAnalise = pd.to_datetime(dataAnalise, errors = 'coerce')\n \n filtroDt = (mob.date >= dataAnalise[0]) & (mob.date <= dataAnalise[1])\n df4 = mob[filtroDt]\n\n st.markdown('# Analise de Mobilidade')\n mob_graphs(df4)","sub_path":"mobilidade.py","file_name":"mobilidade.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"98941477","text":"# -*- coding:utf-8 -*-\n\n\n# There is an m by n grid with a ball. Given the start coordinate (i,j) of the ball, you can move the ball to adjacent cell or cross the grid boundary in four directions (up, down, left, right). However, you can at most move N times. Find out the number of paths to move the ball out of grid boundary. The answer may be very large, return it after mod 109 + 7.\r\n#\n# Example 1:\r\n#\n# Input:m = 2, n = 2, N = 2, i = 0, j = 0\r\n# Output: 6\r\n# Explanation:\r\n#\n#\n#\n#\n# Example 2:\r\n#\n# Input:m = 1, n = 3, N = 3, i = 0, j = 1\r\n# Output: 12\r\n# Explanation:\r\n#\n#\n#\n#\n# Note:\r\n#\n# Once you move the ball out of boundary, you cannot move it back.\r\n# The length and height of the grid is in range [1,50].\r\n# N is in range [0,50].\r\n#\n#\n\n\nclass Solution(object):\n def findPaths(self, m, n, N, i, j):\n \"\"\"\n :type m: int\n :type n: int\n :type N: int\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n \n large = 10**9+7\n \n if N==0:\n return 0\n \n dirs = [(-1,0),(1,0),(0,-1),(0,1)]\n current_step = [[0]*n for c in range(m)]\n current_step[i][j] = 1\n \n out = 0\n \n for z in range(N):\n next_step = [[0]*n for c in range(m)]\n for a in range(m):\n for b in range(n):\n for d in dirs:\n x = a + d[0]\n y = b + d[1]\n if x>=0 and x=0 and y 1 and dp[i-1][j+1] then dp[i][j]=True\n\n# time complxity: O(n^2), n is the length of `s`\n# space complxity: O(n^2), n is the length of `s`\n\n\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n slen = len(s)\n dp = [[False for x in range(slen)] for x in range(slen)]\n left = right = 0\n for j in range(slen):\n for i in range(j+1):\n if s[i] == s[j]:\n if j-i > 1:\n dp[i][j] = (i+1 < slen and j-1 >= 0 and dp[i+1][j-1])\n else:\n dp[i][j] = True\n if dp[i][j] and j-i+1 > right - left + 1:\n left = i\n right = j\n return s[left: right+1]\n","sub_path":"000/longest_palindromic_substring.py","file_name":"longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"119418798","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n产生每位数字相同的n位数\r\n读入2个正整数A和B,1<=A<=9, 1<=B<=10,产生数字AA...A,一共B个A\r\n\r\n输入格式:\r\n在一行中输入A和B。\r\n输出格式:\r\n在一行中输出整数AA...A,一共B个A\r\n\r\n输入样例1:\r\n在这里给出一组输入。例如:1, 5\r\n输出样例1:\r\n在这里给出相应的输出。例如:11111\r\n\r\n输入样例2:\r\n在这里给出一组输入。例如:3 ,4\r\n输出样例2:\r\n在这里给出相应的输出。例如:3333\r\n\"\"\"\r\n\r\n\r\na, b = input().split(',')\r\na=a.strip()\r\nb=int(b.strip())\r\nprint(int(a*b))\r\n\r\n","sub_path":"ZJU-Python/CH2/ch2-6.py","file_name":"ch2-6.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"111914977","text":"\n\nimport pygame\nimport random\nimport includes.helpers as helpers\nimport math\n\nclass Wall(pygame.sprite.Sprite):\n def __init__(self, pic, x, sides):\n pygame.sprite.Sprite.__init__(self)\n self.screen = pygame.display.get_surface()\n self.image, self.rect = helpers.load_image(pic, -1)\n new_size = (40, 40)\n self.image = pygame.transform.scale(self.image, (40, 40))\n self.rect = self.image.get_rect()\n self.x = x[0]\n self.y = x[1] \n self.rect.topleft = (self.x, self.y)\n self.speedx = 0\n self.speedy = 0\n self.curr_ticks = pygame.time.get_ticks()\n self.wallList = []\n self.side = sides[0]\n self.side2 = sides[1]\n\n def update(self):\n self.rect.topleft = (self.x, self.y)\n\n\nclass Blocker():\n def __init__(self):\n self.wList = []\n self.lWalls = []\n\n def blitWalls(self):\n inx = 0\n iny = 0\n side = -1\n side2 = -1\n#-----\n# top = 0\n# bot = 1\n# lef = 2\n# rgt = 3\n#----\n \n for i in range(9): #T\n side = 0\n self.wList.append(((inx, iny), (side, side2)))\n inx = inx + 40\n inx = 0\n iny = 560\n for i in range(9): #B\n side = 1\n self.wList.append(((inx, iny), (side, side2)))\n inx = inx + 40\n inx = 760\n iny = 560\n for i in range(9): #B\n side = 1\n self.wList.append(((inx, iny), (side, side2)))\n inx = inx - 40\n inx = 760\n iny = 0\n for i in range(9): #T\n side = 0\n self.wList.append(((inx, iny), (side, side2)))\n inx = inx - 40\n inx = 0\n iny = 40\n for i in range(6): #L\n side = 2\n self.wList.append(((inx, iny), (side, side2)))\n iny = iny + 40\n inx = 760\n iny = 40\n for i in range(6): #R\n side = 3\n self.wList.append(((inx, iny), (side, side2)))\n iny = iny + 40\n inx = 760\n iny = 520\n for i in range(6): #R\n side = 3\n self.wList.append(((inx, iny), (side, side2)))\n iny = iny - 40\n inx = 0\n iny = 520\n for i in range(6): #L\n side = 2\n self.wList.append(((inx, iny), (side, side2)))\n iny = iny - 40\n #self.wList.append(((0, 380), (0,2)))\n #self.wList.append(((0, 440), (0,3)))\n #self.wList.append(((240, 760), (3, 0)))\n #self.wList.append(((320, 760), (3, 1)))\n #self.wList.append(((560, 440), (1, 3)))\n #self.wList.append(((560, 320), (1, 2)))\n #self.wList.append(((320, 0), (3, 1)))\n #self.wList.append(((240, 0), (3, 0)))\n\n #print len(self.wList)\n#-------Above correctly returns 60\n for x in self.wList:\n #print x\n\t#print \"blah\"\n wll = Wall(\"wall.jpg\", x[0], x[1])\n self.lWalls.append(wll)\n #for y in wall.wallList:\n\t#print y\n #print len(self.lWalls)\n return self.lWalls\n\n\nif __name__ == \"__main__\":\n\n b = Blocker()\n b.blitWalls()\n","sub_path":"includes/blockers.py","file_name":"blockers.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"491841451","text":"#!/usr/bin/env python\n\nimport serial\nimport time\nimport sys\nimport os\nimport yaml\nfrom pynput.keyboard import Key, Listener\n\nspeedFactor=1.0\ntraj_folder = \"traj_built\"\ncurr_flag_file = os.path.join(\"traj_built\",\"last_sent.txt\")\n\nrestartFlag = False\n\n# Read in data from the pressure controller (this seems not to work yet)\ndef serialRead(ser):\n while ser.in_waiting: # Or: while ser.inWaiting():\n print (ser.readline())\n\n\n\n\n\nclass TrajSend:\n def __init__(self, devname,baudrate):\n self.s = serial.Serial(devname,baudrate)\n self.traj_folder = traj_folder\n self.speedFactor = speedFactor\n\n time.sleep(1)\n\n self.s.write(\"echo;0\"+'\\n')\n self.s.write(\"load\"+'\\n')\n #self.s.write(\"load\"+'\\n')\n self.s.write(\"set;0\"+'\\n')\n self.s.write(\"mode;2\"+'\\n')\n #s.write('on')\n\n time.sleep(0.5)\n\n # Read in the trajectory and store it in a list of arrays\n def getTraj(self,filename):\n self.filename = filename\n # Read in the setpoint file\n inFile=os.path.join(traj_folder,filename+\".traj\")\n with open(inFile,'r') as f:\n # use safe_load instead of load\n trajIn = yaml.safe_load(f)\n f.close()\n\n\n # Get data from the file\n #self.settings = trajIn.get(\"settings\")\n self.traj = trajIn.get(\"setpoints\")\n self.wrap = trajIn.get(\"wrap\",False)\n \n\n\n\n def sendTraj(self):\n lastTime = 0.0\n configstring = \"trajconfig;%d;%d;%d\" %(0,len(self.traj),self.wrap)\n print(configstring)\n self.s.write(configstring+'\\n')\n for idx, entry in enumerate(self.traj):\n # Send a string to the pressure controller\n string=\"trajset;%d;%0.3f;%0.3f;%0.3f;%0.3f;%0.3f\" %(\n idx,\n self.speedFactor*entry[0],\n entry[1],\n entry[2],\n entry[3],\n entry[4])\n print(string)\n self.s.write(string+'\\n')\n \n time.sleep(0.05)\n\n # Sleep for a short time before the next send action\n #time.sleep(entry[0]-lastTime)\n #lastTime=entry[0]\n \n\n def shutdown(self):\n self.s.write(\"mode;3\"+'\\n')\n self.s.write(\"set;0\"+'\\n')\n self.s.close()\n\n\n dirname = os.path.dirname(curr_flag_file)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.out_file = open(curr_flag_file, \"w+\")\n self.out_file.write(self.filename)\n self.out_file.close()\n \n \n def readStuff(self):\n if self.s.in_waiting: # Or: while ser.inWaiting():\n line = self.s.readline().strip()\n print(line)\n\n\n \n\n\n\n\n \n\n\nif __name__ == '__main__':\n if 2<= len(sys.argv)<=3:\n\n if len(sys.argv)==3:\n speedFact = 1.0/float(sys.argv[2])\n else:\n speedFact= 1.0\n \n try:\n # Get the serial object to use\n inFile=os.path.join(\"config\",\"comms\",\"serial_config.yaml\")\n with open(inFile) as f:\n # use safe_load instead of load\n serial_set = yaml.safe_load(f)\n f.close()\n\n # Create a pressure controller object\n pres=TrajSend(serial_set.get(\"devname\"), serial_set.get(\"baudrate\"))\n pres.getTraj(sys.argv[1])\n\n\n pres.speedFactor = speedFact\n\n # Upload the trajectory and start it\n pres.sendTraj()\n \n pres.readStuff()\n pres.shutdown()\n \n \n except KeyboardInterrupt:\n pres.shutdown()\n \n else:\n print(\"Please include the filename as the input argument\")","sub_path":"pressure_control_run/send_pre_built.py","file_name":"send_pre_built.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"214833830","text":"#!/usr/bin/env python\n\nfrom __future__ import division\nimport numpy as np\nimport time\nimport rospy\nimport dispenvlib as dl\nimport random\n\nfrom taskgen.msg import Task\nfrom std_msgs.msg import String\n\n# mct -> most cumulative time selection\n\n\nclass HeurDisp():\n\n def __init__(self):\n # node inti\n rospy.init_node('heur_disp_node')\n rospy.Subscriber('task', Task, self.taskgen_callback)\n self.pub_msg = rospy.Publisher('/disp_msg', String, queue_size=10)\n self.pub_test_info = rospy.Publisher('/test_info', Task, queue_size=10)\n # class init\n self.no_robots = 4\n self.new_msg = None\n self.prev_msg = None\n self.incoming_buffer = dl.TaskBuffer()\n self.robot_buffers = dl.TaskBuffers(self.no_robots)\n self.dltools = dl.DispatcherTools(self.no_robots)\n self.check_interval = 1.0\n self.prev_occ = np.zeros(self.no_robots)\n self.active_tasks = np.zeros(self.no_robots)\n self.cumulative_times = np.zeros(self.no_robots)\n self.time_recorded = np.zeros(self.no_robots) # 0 if not, 1 if is\n self.start_times = np.zeros(self.no_robots)\n self.end_times = np.zeros(self.no_robots)\n time.sleep(1.0)\n\n def publish_info(self, task_id, stage, robot, task_level):\n # for publishing test info\n info = Task() # task is repurposed for sharing test info\n info.task_id = task_id\n info.x_position = stage\n # stages:\n # 0 -> task is placed in robots' buffer\n # 1 -> task is started\n # 2 -> task is completed\n info.y_position = robot\n info.z_orientation = task_level\n info.deadline = float(rospy.Time.now().to_sec())\n self.pub_test_info.publish(info)\n rospy.sleep(0.3)\n\n def taskgen_callback(self, msg):\n self.new_msg = msg\n if self.new_msg != self.prev_msg:\n task = np.array([msg.task_id, msg.x_position, msg.y_position, msg.z_orientation, msg.deadline])\n self.incoming_buffer.add_task(task)\n self.prev_msg = self.new_msg\n else:\n pass\n\n def execute_tasks(self):\n curr_occ = self.dltools.get_robot_occupancy()\n for robot in range(self.no_robots):\n if self.robot_buffers.is_buffer_empty_for_robot(robot) == False:\n if curr_occ[robot] == 0 and self.prev_occ[robot] == 0:\n task = self.robot_buffers.check_last_task(robot)\n self.dltools.set_goal(robot, task, self.pub_msg)\n self.publish_info(int(task[0]), 1, robot, task[-1]) # stage 1\n self.active_tasks[robot] = task[0]\n self.prev_occ[robot] = 1\n curr_occ[robot] = 1\n elif curr_occ[robot] == 0 and self.prev_occ[robot] == 1:\n task_id = self.active_tasks[robot]\n self.publish_info(int(task_id), 2, robot, 0) # stage 2\n self.robot_buffers.delete_task_by_id(robot, task_id)\n self.prev_occ[robot] = 0\n msg_str = \"Goal achieved for robot \" + str(robot) + \". Task id: \" + str(int(task_id)) + \". Time: %s\" % rospy.Time.now().to_sec()\n self.pub_msg.publish(msg_str)\n else:\n pass\n else:\n pass\n\n def add_waiting_times(self):\n curr_occ = self.dltools.get_robot_occupancy()\n min_time = np.min(self.cumulative_times)\n self.cumulative_times -= min_time # so that numbers don't add to infinity\n for robot in range(self.no_robots):\n if curr_occ[robot] == 0 and self.time_recorded[robot] == 0:\n # robot is waiting\n # start recording\n self.start_times[robot] = rospy.Time.now().to_sec()\n self.time_recorded[robot] = 1\n elif curr_occ[robot] == 1 and self.time_recorded[robot] == 1:\n # robot got the task and is no longer waiting\n self.time_recorded[robot] = 0\n elif curr_occ[robot] == 0 and self.time_recorded[robot] == 1:\n end_time = rospy.Time.now().to_sec()\n delta_time = end_time - self.start_times[robot]\n self.cumulative_times[robot] += delta_time\n self.start_times[robot] = end_time\n else:\n pass\n\n def mct_selection(self):\n start_time = rospy.Time.now().to_sec()\n while(1):\n self.add_waiting_times()\n curr_time = rospy.Time.now().to_sec()\n if curr_time - start_time >= self.check_interval:\n start_time = curr_time\n self.execute_tasks()\n self.robot_buffers.print_all_buffers()\n print(\"cumulative times:\")\n print(self.cumulative_times)\n # jedro noda\n if self.incoming_buffer.is_buffer_empty() == False:\n # imamo cakajoce naloge\n # robot_id = np.random.randint(self.no_robots)\n # task = self.incoming_buffer.get_task()\n # self.robot_buffers.add_task(robot_id, task)\n robot_ids = np.where(self.cumulative_times == np.max(self.cumulative_times))\n # robot_id = robot_ids[0][0]\n robot_id = random.choice(robot_ids[0])\n print(\"robot ids, robot id\", robot_ids, robot_id)\n task = self.incoming_buffer.get_task()\n self.robot_buffers.add_task(robot_id, task)\n self.publish_info(int(task[0]), 0, robot_id, task[-1]) # stage 0\n\n print(\"_____________________________________________________________\")\n print(\"Robot \" + str(robot_id) + \" dobi nalogo \" + str(int(task[0])) + \"!\")\n print(\"_____________________________________________________________\")\n\n\ndef main():\n random_dispatcher = HeurDisp()\n random_dispatcher.mct_selection()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"disp2_ws/src/heuristic_dispatcher/scripts/mctdisp.py","file_name":"mctdisp.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"439280527","text":"import eHive\nimport os\n\nclass SeedVCFIntegration(eHive.BaseRunnable):\n \"\"\"Class for seeding the VCFIntegration pipeline\"\"\"\n\n def run(self):\n\n self.warning('Analysing file: %s'% self.param_required('filepath'))\n \n filepath=self.param_required('filepath')\n \n flist=[] # will store the list of tuples\n with open(filepath) as f:\n for line in f:\n line=line.rstrip('\\n')\n flist.append((line.split('\\t')[0],line.split('\\t')[1]))\n \n self.param('flist', flist)\n\n def write_output(self):\n self.warning('Work is done!')\n self.dataflow( { 'flist' : self.param('flist') }, 1)\n\n","sub_path":"VARIANT_CALLING/PyHive/Seed/SeedVCFIntegration.py","file_name":"SeedVCFIntegration.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"13301678","text":"from flask import Flask, request, jsonify, Response, abort, send_file\nfrom time import time\nfrom binascii import b2a_hex\nimport os\nimport shutil\nfrom helpers import *\n\n\nSPACES_DIR = '/Users/tandav/Documents/spaces'\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PATCH, DELETE')\n return response\n\n\n@app.route('/space/', methods=['GET'])\ndef get_space_notes_and_files(space):\n \n space_dir = f'{SPACES_DIR}/{space}'\n \n if os.path.isdir(space_dir):\n \n if space == 'all':\n update_spaces_stats(SPACES_DIR)\n \n return jsonify({\n 'notes': get_notes(space_dir),\n 'files': get_files(space_dir),\n })\n else:\n abort(404)\n\n\n\n@app.route('//note', methods=['POST'])\ndef create_note(space):\n hash = b2a_hex(os.urandom(8)).decode('utf-8')\n\n note_dir = f'{SPACES_DIR}/{space}/notes/{hash}'\n\n os.makedirs(note_dir)\n\n new_html = f'{note_dir}/{hash}.html'\n open(new_html, 'x').close() # x mode: if file exist, raise an error\n\n edited_html_string = edit_in_text_editor(new_html)\n \n return jsonify({\n 'hash': hash, \n 'html': edited_html_string,\n })\n\n\n@app.route('//empty_note', methods=['POST'])\ndef create_empty_note(space):\n hash = b2a_hex(os.urandom(8)).decode('utf-8')\n\n note_dir = f'{SPACES_DIR}/{space}/notes/{hash}'\n\n os.makedirs(note_dir)\n\n new_html = f'{note_dir}/{hash}.html'\n open(new_html, 'x').close() # x mode: if file exist, raise an error\n\n return jsonify({\n 'hash': hash, \n 'html': '',\n })\n\n\n\n@app.route('//', methods=['PATCH'])\ndef edit_note(space, note):\n edited_html_string = edit_in_text_editor(f'{SPACES_DIR}/{space}/notes/{note}/{note}.html')\n return Response(edited_html_string, mimetype='text/xml')\n\n\n@app.route('//', methods=['DELETE'])\ndef delete_note(space, note_hash):\n shutil.rmtree(f'{SPACES_DIR}/{space}/notes/{note_hash}')\n return Response(None, 200)\n\n@app.route('/', methods=['POST'])\ndef new_space(space_name):\n os.makedirs(f'{SPACES_DIR}/{space_name}/notes')\n return Response(None, 200)\n\n@app.route('/space/', methods=['DELETE'])\ndef delete_space(space):\n shutil.rmtree(f'{SPACES_DIR}/{space}')\n return Response(None, 200)\n\n\n@app.route('/space//', methods=['POST'])\ndef new_link_note(space, new_space_name):\n link_note_hash = b2a_hex(os.urandom(8)).decode('utf-8')\n\n link_note_dir = f'{SPACES_DIR}/{space}/notes/{link_note_hash}'\n\n os.makedirs(link_note_dir)\n\n link_note_html = f'[{new_space_name}]'\n with open(f'{link_note_dir}/{link_note_hash}.html', 'x') as link_note:\n link_note.write(link_note_html)\n\n os.makedirs(f'{SPACES_DIR}/{new_space_name}/notes')\n\n return jsonify({\n 'hash': link_note_hash, \n 'html': link_note_html,\n })\n\n@app.route('/eval', methods=['POST'])\ndef eval():\n script = request.get_json()['script']\n\n if request.remote_addr == '127.0.0.1':\n os.system(script)\n return Response(None, 200)\n else:\n log.write(str(403) + '\\n')\n abort(403)\n\n@app.route('/space//eval', methods=['POST'])\ndef eval_from_space(space):\n script = request.get_json()['script']\n\n if request.remote_addr == '127.0.0.1':\n os.chdir(f'{SPACES_DIR}/{space}')\n os.system(script)\n return Response(None, 200)\n else:\n log.write(str(403) + '\\n')\n abort(403)\n\n@app.route('/space//finder', methods=['GET'])\ndef open_space_in_finder(space):\n os.system(f'open {SPACES_DIR}/{space}')\n return Response(None, 200)\n \n@app.route('/space//terminal', methods=['GET'])\ndef open_space_in_terminal(space):\n os.system(f'open -a iTerm {SPACES_DIR}/{space}')\n return Response(None, 200)\n\n@app.route('/space//sublime', methods=['GET'])\ndef open_space_in_sublime(space):\n os.system(f'/Applications/Sublime\\ Text.app/Contents/SharedSupport/bin/subl {SPACES_DIR}/{space}')\n return Response(None, 200)\n\n@app.route('/space//note//finder', methods=['GET'])\ndef open_note_in_finder(space, note):\n os.system(f'open {SPACES_DIR}/{space}/notes/{note}')\n return Response(None, 200)\n\n@app.route('/space//eval/