\\w+)', api.confirm_staff)\n]\n","sub_path":"staff/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"219713108","text":"# -*- coding: UTF-8 -*-\n# type of the change: Created\n# Comments: Creacion de generacion de codigo para clientes y proveedores (depends for res_partner)\n\n\n\nfrom odoo import fields, models, api,exceptions\nimport re\n\nclass RespartnerRif(models.Model):\n _inherit = 'res.partner'\n\n def write(self, vals):\n res = {}\n if vals.get('vat'):\n res = self.validate_rif_er(vals.get('vat', False))\n if not res:\n raise exceptions.except_orm(('Advertencia!'), (\n 'El rif tiene el formato incorrecto. Ej: V-012345678, E-012345678, J-012345678 o G-012345678. Por favor intente de nuevo'))\n if not self.validate_rif_duplicate(vals.get('vat', False)):\n raise exceptions.except_orm(('Advertencia!'),\n (\n u'El cliente o proveedor ya se encuentra registrado con el rif: %s y se encuentra activo') % (\n vals.get('vat', False)))\n if vals.get('email'):\n res = self.validate_email_addrs(vals.get('email'), 'email')\n if not res:\n raise exceptions.except_orm(('Advertencia!'), (\n 'El email es incorrecto. Ej: cuenta@dominio.xxx. Por favor intente de nuevo'))\n res = super(RespartnerRif, self).write(vals)\n return res\n\n\n\n @api.model\n def create(self, vals):\n res = {}\n if vals.get('vat'):\n res = self.validate_rif_er(vals.get('vat'))\n if not res:\n raise exceptions.except_orm(('Advertencia!'), (\n 'El rif tiene el formato incorrecto. Ej: V-012345678, E-012345678, J-012345678 o G-012345678. Por favor intente de nuevo'))\n if not self.validate_rif_duplicate(vals.get('vat', False), True):\n raise exceptions.except_orm(('Advertencia!'),\n (\n u'El cliente o proveedor ya se encuentra registrado con el rif: %s y se encuentra activo') % (\n vals.get('vat', False)))\n if vals.get('email'):\n res = self.validate_email_addrs(vals.get('email'), 'email')\n if not res:\n raise exceptions.except_orm(('Advertencia!'), (\n 'El email es incorrecto. Ej: cuenta@dominio.xxx. Por favor intente de nuevo'))\n res = super(RespartnerRif, self).create(vals)\n return res\n\n\n def validate_rif_er(self, field_value):\n res = {}\n\n rif_obj = re.compile(r\"^[V|E|J|G]+[-][\\d]{9}\", re.X)\n if rif_obj.search(field_value.upper()):\n res = {\n 'vat':field_value\n }\n return res\n\n\n def validate_rif_duplicate(self, valor, create=False):\n found = True\n partner = self.search([('vat', '=', valor)])\n for partner_ids in partner:\n if create:\n if partner_ids and (partner_ids.customer_rank or partner_ids.supplier_rank):\n found = False\n elif partner_ids and (partner_ids.customer_rank or partner_ids.supplier_rank):\n found = False\n return found\n\n def validate_email_addrs(self, email, field):\n res = {}\n\n mail_obj = re.compile(r\"\"\"\n \\b # comienzo de delimitador de palabra\n [\\w.%+-] # usuario: Cualquier caracter alfanumerico mas los signos (.%+-)\n +@ # seguido de @\n [\\w.-] # dominio: Cualquier caracter alfanumerico mas los signos (.-)\n +\\. # seguido de .\n [a-zA-Z]{2,3} # dominio de alto nivel: 2 a 6 letras en minúsculas o mayúsculas.\n \\b # fin de delimitador de palabra\n \"\"\", re.X) # bandera de compilacion X: habilita la modo verborrágico, el cual permite organizar\n # el patrón de búsqueda de una forma que sea más sencilla de entender y leer.\n if mail_obj.search(email):\n res = {\n field:email\n }\n return res\n","sub_path":"l10n_ve_validation_rif_res_company/models/res_partner_validation_3mit.py","file_name":"res_partner_validation_3mit.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"628094591","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom typing import Union\n\nimport aiohttp\nfrom uonet_request_signer_hebe import get_signature_values\nfrom yarl import URL\n\nfrom ._api_helper import ApiHelper\nfrom ._exceptions import (\n ExpiredTokenException,\n InvalidPINException,\n InvalidSignatureValuesException,\n InvalidSymbolException,\n InvalidTokenException,\n UnauthorizedCertificateException,\n VulcanAPIException,\n)\nfrom ._keystore import Keystore\nfrom ._utils import (\n APP_NAME,\n APP_OS,\n APP_USER_AGENT,\n APP_VERSION,\n log,\n millis,\n now_datetime,\n now_gmt,\n now_iso,\n urlencode,\n uuid,\n)\nfrom .model import Period, Student\n\n\nclass Api:\n \"\"\"The API service class.\n\n Provides methods for sending GET/POST requests on a higher\n level, automatically generating the required headers\n and other values.\n\n :var `~vulcan._api_helper.ApiHelper` ~.helper: a wrapper for getting\n most data objects more easily\n \"\"\"\n\n def __init__(self, keystore: Keystore, account=None, session=None):\n self._session = session or aiohttp.ClientSession()\n self._keystore = keystore\n if account:\n self._account = account\n self._rest_url = account.rest_url\n self._student = None\n self._period = None\n self.helper = ApiHelper(self)\n\n def _build_payload(self, envelope: dict) -> dict:\n return {\n \"AppName\": APP_NAME,\n \"AppVersion\": APP_VERSION,\n \"CertificateId\": self._keystore.fingerprint,\n \"Envelope\": envelope,\n \"FirebaseToken\": self._keystore.firebase_token,\n \"API\": 1,\n \"RequestId\": uuid(),\n \"Timestamp\": millis(),\n \"TimestampFormatted\": now_iso(),\n }\n\n def _build_headers(self, full_url: str, payload: str) -> dict:\n dt = now_datetime()\n digest, canonical_url, signature = get_signature_values(\n self._keystore.fingerprint,\n self._keystore.private_key,\n payload,\n full_url,\n dt,\n )\n\n headers = {\n \"User-Agent\": APP_USER_AGENT,\n \"vOS\": APP_OS,\n \"vDeviceModel\": self._keystore.device_model,\n \"vAPI\": \"1\",\n \"vDate\": now_gmt(dt),\n \"vCanonicalUrl\": canonical_url,\n \"Signature\": signature,\n }\n\n if digest:\n headers[\"Digest\"] = digest\n headers[\"Content-Type\"] = \"application/json\"\n\n return headers\n\n async def _request(\n self, method: str, url: str, body: dict = None, **kwargs\n ) -> Union[dict, list]:\n if self._session.closed:\n raise RuntimeError(\"The AioHttp session is already closed.\")\n\n full_url = (\n url\n if url.startswith(\"http\")\n else self._rest_url + url\n if self._rest_url\n else None\n )\n\n if not full_url:\n raise ValueError(\"Relative URL specified but no account loaded\")\n\n payload = self._build_payload(body) if body and method == \"POST\" else None\n payload = json.dumps(payload) if payload else None\n headers = self._build_headers(full_url, payload)\n\n log.debug(f\" > {method} to {full_url}\")\n\n # a workaround for aiohttp incorrectly re-encoding the full URL\n full_url = URL(full_url, encoded=True)\n async with self._session.request(\n method, full_url, data=payload, headers=headers, **kwargs\n ) as r:\n try:\n response = await r.json()\n status = response[\"Status\"]\n envelope = response[\"Envelope\"]\n\n # check for the presence of a b64 string preceded with ': '\n if status[\"Code\"] == 100 and \": \" in status[\"Message\"]:\n raise InvalidSignatureValuesException()\n elif status[\"Code\"] == 108:\n log.debug(f\" ! {status}\")\n raise UnauthorizedCertificateException()\n elif status[\"Code\"] == 200:\n log.debug(f\" ! {status}\")\n raise InvalidTokenException()\n elif status[\"Code\"] == 203:\n log.debug(f\" ! {status}\")\n raise InvalidPINException()\n elif status[\"Code\"] == 204:\n log.debug(f\" ! {status}\")\n raise ExpiredTokenException()\n elif status[\"Code\"] == -1:\n log.debug(f\" ! {status}\")\n raise InvalidSymbolException()\n elif status[\"Code\"] != 0:\n log.debug(f\" ! {status}\")\n raise VulcanAPIException(status[\"Message\"])\n\n log.debug(f\" < {str(envelope)}\")\n return envelope\n except ValueError as e:\n raise VulcanAPIException(\"An unexpected exception occurred.\") from e\n\n async def get(self, url: str, query: dict = None, **kwargs) -> Union[dict, list]:\n query = \"&\".join(f\"{x}={urlencode(query[x])}\" for x in query) if query else None\n\n if query:\n url += f\"?{query}\"\n return await self._request(\"GET\", url, body=None, **kwargs)\n\n async def post(self, url: str, body: dict, **kwargs) -> Union[dict, list]:\n return await self._request(\"POST\", url, body, **kwargs)\n\n async def open(self):\n if self._session.closed:\n self._session = aiohttp.ClientSession()\n\n async def close(self):\n await self._session.close()\n\n @property\n def account(self):\n return self._account\n\n @property\n def student(self) -> Student:\n return self._student\n\n @student.setter\n def student(self, student: Student):\n if not self._account:\n raise AttributeError(\"Load an Account first!\")\n self._rest_url = self._account.rest_url + student.unit.code + \"/\"\n self._student = student\n self.period = student.current_period\n\n @property\n def period(self) -> Period:\n return self._period\n\n @period.setter\n def period(self, period: Period):\n self._period = period\n","sub_path":"vulcan/_api.py","file_name":"_api.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"152454054","text":"import math\nimport re\nfrom typing import List, Tuple, Callable, Any\n\n\nclass Monkey:\n \"\"\"\n A monkey carries a list of items and can inspect and throw them\n \"\"\"\n\n def __init__(self,\n items: List[int],\n test_divisor: int,\n inspect_function: Callable[[int], int],\n true_target: int,\n false_target: int\n ) -> None:\n # The only property an item has is its worry level\n self.items = items\n # The factor used to determine target\n self.test_divisor = test_divisor\n # The function used to adjust worry on inspection\n self.inspect_function = inspect_function\n # Target for if test is true\n self.true_target = true_target\n # Target for if test is false\n self.false_target = false_target\n # Count how many items I have inspected\n self.inspections = 0\n\n def __str__(self):\n return f\"Monkey\\n\" \\\n f\"Items: {self.items}\\n\" \\\n f\"Test: Divide by {self.test_divisor}\\n\" \\\n f\"True Target: {self.true_target}\\n\" \\\n f\"False Target: {self.false_target}\\n\" \\\n f\"Inspections: {self.inspections}\\n\"\n\n @staticmethod\n def construct_from_string(input_string: str) -> \"Monkey\":\n \"\"\"\n Create a monkey from an input string\n \"\"\"\n input_string = input_string.strip()\n input_lines = input_string.split(\"\\n\")\n\n items_line, operation_line, test_line, true_line, false_line = input_lines[1:6]\n\n # items line\n items_string = items_line.split(\":\")[1]\n items = items_string.split(\",\")\n items = [int(item) for item in items]\n\n # operation line\n regex = re.compile(\".* new = .* (.) (.*)\")\n op, right = regex.findall(operation_line)[0]\n inspect_function = Monkey.create_inspect_function(op, right)\n test_divisor = int(test_line.split(\"divisible by \")[1])\n true_target = int(true_line.split(\"throw to monkey \")[1])\n false_target = int(false_line.split(\"throw to monkey \")[1])\n\n return Monkey(items, test_divisor, inspect_function, true_target, false_target)\n\n @staticmethod\n def create_inspect_function(op: str, arg: str) -> Callable[[int], int]:\n \"\"\"\n Create the operation for altering worry level\n \"\"\"\n if arg == \"old\":\n return lambda x: Monkey.times_or_plus(op)(x, x)\n else:\n param = int(arg)\n return lambda x: Monkey.times_or_plus(op)(x, param)\n\n @staticmethod\n def times_or_plus(op: str) -> Callable[[int, int], int]:\n if op == \"*\":\n return lambda x, y: x * y\n if op == \"+\":\n return lambda x, y: x + y\n raise Exception(f\"{op} is not an operator\")\n\n def inspect_all(self):\n \"\"\"\n Inspect all my items\n \"\"\"\n self.items = [self.inspect_function(item) for item in self.items]\n self.items = [math.floor(item / 3.0) for item in self.items]\n self.inspections += len(self.items)\n\n def throw_all(self) -> List[Tuple[int, int]]:\n \"\"\"\n Declare all throws I intend to make\n \"\"\"\n passes = [self.throw(item) for item in self.items]\n self.items = []\n return passes\n\n def throw(self, item):\n \"\"\"\n Declare a throw for one of my items. I do not remove the item from my inventory because\n I do that in throw_all.\n \"\"\"\n if item % self.test_divisor == 0:\n target = self.true_target\n else:\n target = self.false_target\n return target, item\n\n def catch(self, item):\n \"\"\"\n Receive an item into my inventory\n \"\"\"\n self.items.append(item)\n\n\nclass Barrel:\n \"\"\"\n A collection of monkeys is called a barrel.\n It deals with throwing and comparisons between monkeys.\n \"\"\"\n\n def __init__(self, monkeys: List[Monkey]) -> None:\n self.monkeys = monkeys\n\n @staticmethod\n def construct_from_string(input_string: str) -> \"Barrel\":\n \"\"\"\n Create a new barrel using an input string\n \"\"\"\n monkey_inputs = input_string.split(\"\\n\\n\")\n monkeys = [Monkey.construct_from_string(monkey_input) for monkey_input in monkey_inputs]\n return Barrel(monkeys)\n\n def round(self):\n \"\"\"\n Do all passes for one round\n \"\"\"\n for monkey in self.monkeys:\n monkey.inspect_all()\n passes = monkey.throw_all()\n\n for target, item in passes:\n self.monkeys[target].catch(item)\n\n def n_rounds(self, n):\n \"\"\"\n Do n rounds\n \"\"\"\n for _ in range(n):\n self.round()\n\n def monkey_business(self):\n \"\"\"\n Compute the amount of monkey business\n \"\"\"\n activity = [monkey.inspections for monkey in self.monkeys]\n one, two = sorted(activity)[-2:]\n return one * two\n\n\ndef main():\n with open(\"../input.txt\", \"r\") as file:\n input_file = file.read()\n barrel = Barrel.construct_from_string(input_file)\n barrel.n_rounds(20)\n print(barrel.monkey_business())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2022/day_11/Python/monkeys.py","file_name":"monkeys.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"204618206","text":"def canSum(targetSum, numbers, memo = None):\n\tif memo == None: memo = {}\n\t\n\tif targetSum in memo: return memo[targetSum]\n\tif targetSum == 0: return True\n\tif targetSum < 0: return False\n\n\tfor num in numbers:\n\t\trem = targetSum - num\n\t\tif (canSum(rem, numbers, memo) == True):\n\t\t\tmemo[targetSum] = True\n\t\t\treturn True\n\n\tmemo[targetSum] = False\n\treturn False\n\nprint(canSum(7, [2, 3]))\nprint(canSum(7, [5, 3, 4, 7]))\nprint(canSum(7, [2, 4]))\nprint(canSum(8, [2, 3, 5]))\nprint(canSum(300, [7, 14]))\n\n","sub_path":"canSum.py","file_name":"canSum.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"11426628","text":"import os\nimport re\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom medicine.models.compound import Compound\nfrom mlxtend.preprocessing import TransactionEncoder\nfrom sklearn import cluster\nfrom sklearn.externals import joblib\nfrom medicine import config\n\nFile_Dir = config.Path['models_dir']\nFile_Name = 'kmeans_{}.pkl'\n\n\ndef create_model(transform_df, n_clusters=8):\n model = cluster.KMeans(n_clusters=n_clusters).fit(transform_df)\n return model\n\n\ndef get_data_from_compound():\n id_list = []\n name_list = []\n medicine_list = []\n for compound in Compound.query.all():\n id_list.append(compound.id)\n name_list.append(compound.name)\n\n medicines = []\n for medicine in compound.medicine:\n medicines.append(medicine.name)\n medicine_list.append(medicines)\n return id_list, name_list, medicine_list\n\n\ndef get_cut_list(datas):\n cut_list = []\n for data in datas:\n data = re.sub('(\\s|KT)', '', data) # 清除 KT 及 空白\n sentences = re.split('\\W', data) # 依據非字元符號split成句子\n\n word_list = []\n for sentence in sentences:\n word_list.extend([word for word in sentence]) # 1字\n word_list.extend([sentence[i:i + 2] for i in range(len(sentence) - 1)]) # 2字\n cut_list.append(word_list)\n return cut_list\n\n\ndef get_transform_df(cut_list):\n te = TransactionEncoder()\n te_ary = te.fit(cut_list).transform(cut_list)\n return pd.DataFrame(te_ary, columns=te.columns_)\n\n\nclass Cluster(object):\n def __init__(self, n_clusters=8):\n self.compound_df = self.get_compound_df()\n self.fit = None\n self.get_KMeans_fit(n_clusters)\n\n def get_compound_df(self):\n compound_df = None\n df_file_name = 'compound_df'\n df_file = Path(os.path.join(File_Dir, df_file_name))\n\n if df_file.is_file():\n compound_df = pd.read_pickle(df_file)\n else:\n compound_ids, compound_names, compound_medicines = get_data_from_compound()\n compound_df = pd.DataFrame(data={'id': compound_ids,\n 'name': compound_names,\n 'medicine': compound_medicines})\n compound_df = compound_df.set_index('id')\n compound_df.to_pickle(df_file)\n return compound_df\n\n def get_KMeans_fit(self, n_clusters=8):\n model_file = Path(os.path.join(File_Dir, File_Name.format(n_clusters)))\n if model_file.is_file():\n self.load_KMeans_model(model_file)\n if self.fit.n_clusters != n_clusters:\n self.fit = None\n if self.fit is None:\n compound_names = self.compound_df['name']\n cut_list = get_cut_list(compound_names)\n transform_df = get_transform_df(cut_list)\n self.fit = create_model(transform_df, n_clusters)\n self.save_KMeans_model(model_file)\n\n def save_KMeans_model(self, path=os.path.join(File_Dir, File_Name)):\n joblib.dump(self.fit, path)\n\n def load_KMeans_model(self, path=os.path.join(File_Dir, File_Name)):\n self.fit = joblib.load(path)\n\n def get_cluster_dict(self):\n cluster_dict = {i: np.where(self.fit.labels_ == i)[0]+1 for i in range(self.fit.n_clusters)}\n return cluster_dict\n\n\nif __name__ == '__main__':\n c = Cluster()\n print(c.fit)\n","sub_path":"medicine/medicine/analysis/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"633261880","text":"from influxdb import InfluxDBClient\r\nimport constants as CS\r\nimport time\r\nimport random\r\n\r\n\r\nif __name__ == \"__main__\":\r\n client = InfluxDBClient(host='localhost', port=8086, database=\"name\")\r\n fields = {x: random.randint(0, 100) for x in CS.VARS_LIST}\r\n while True:\r\n client.write_points([\r\n {\r\n \"measurement\": \"mes\",\r\n \"fields\": fields\r\n }\r\n ])\r\n for a in fields:\r\n fields[a] += -1 + random.randint(0, 1)*2\r\n time.sleep(1)","sub_path":"publ.py","file_name":"publ.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"430835813","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 02 14:01:04 2018\n\n@author: z003umpb\n\"\"\"\nimport os\n\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt, QFile, QIODevice, pyqtSignal\nfrom PyQt5.QtWidgets import QTreeWidgetItem, QFileSystemModel\nfrom PyQt5.QtXml import QDomDocument\n\n#from PyQt5.QtCore import pyqtRemoveInputHook\nfrom rdfnavigatorobjectsdommodel import RDFNavigatorObjectsDomModel\n\nfrom projectstructure_ui import Ui_ProjectStructureWidget\n\nclass RDFNavigatorProjectStructure(QWidget, Ui_ProjectStructureWidget):\n open_file_request = pyqtSignal(str, int)\n def __init__(self, parent=None,):\n super(RDFNavigatorProjectStructure, self).__init__(parent)\n self.setupUi(self)\n self.basicStructureWidget.itemDoubleClicked.connect(self.createOpenFileRequest)\n self.childType = None\n self.objectsStructureModel = RDFNavigatorObjectsDomModel(QDomDocument(), self)\n self.objectsStructureView.setModel(self.objectsStructureModel)\n\n def createProjectTree(self, fileName, graph, childType):\n def createProjectTreeHelper(fileName, graph, root):\n for i in graph[fileName]:\n item = QTreeWidgetItem(root)\n filePath = os.path.join(os.path.dirname(fileName), i)\n item.setText(0, i)\n item.setData(0, Qt.UserRole, filePath)\n item.setIcon(0, QIcon(':/images/xsd.png'))\n createProjectTreeHelper(filePath, graph, item)\n self.childType =childType\n root = QTreeWidgetItem(self.basicStructureWidget)\n root.setText(0, os.path.basename(fileName))\n root.setData(0, Qt.UserRole, fileName)\n root.setIcon(0, QIcon(':/images/xsd.png'))\n createProjectTreeHelper(fileName, graph, root)\n\n def createOpenFileRequest(self, item, column):\n self.open_file_request.emit(item.data(0, Qt.UserRole), self.childType)\n\n def createObjectsTree(self, filePath):\n if os.path.exists(filePath):\n f = QFile(filePath)\n if f.open(QIODevice.ReadOnly):\n document = QDomDocument()\n if document.setContent(f):\n newModel = RDFNavigatorObjectsDomModel(document, self)\n self.objectsStructureView.setModel(newModel)\n self.objectsStructureModel = newModel\n f.close()\n\n def createFileSystemTree(self, filePath):\n self.filesystemModel = QFileSystemModel(self)\n self.filesystemModel.setRootPath(os.path.dirname(filePath))\n self.filesystemTreeView.setModel(self.filesystemModel)\n map(self.filesystemTreeView.hideColumn, range(1, 4))\n\n def on_filesystemTreeView_doubleClicked(self, index):\n file_path = self.filesystemModel.filePath(index)\n self.open_file_request.emit(file_path, self.childType)","sub_path":"rdfnavigatorprojectstructure.py","file_name":"rdfnavigatorprojectstructure.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"423204378","text":"# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n def cloneGraph(self, node):\n if not node: return None\n mem = {}\n def dfs(cur):\n if cur.label not in mem:\n mem[cur.label] = UndirectedGraphNode(cur.label)\n for child in cur.neighbors:\n if child.label in mem:\n mem[cur.label].neighbors += mem[child.label],\n else:\n mem[cur.label].neighbors += dfs(child),\n \n return mem[cur.label]\n return dfs(node)\n def cloneGraph(self, node):\n if not node: return None\n mem = {}\n q = [node]\n while q:\n cur = q.pop(0)\n if cur.label not in mem:\n mem[cur.label] = UndirectedGraphNode(cur.label)\n for child in cur.neighbors:\n if child.label not in mem:\n mem[child.label] = UndirectedGraphNode(child.label)\n q += child,\n mem[cur.label].neighbors += mem[child.label],\n return mem[node.label]\n ","sub_path":"python/leetcode/graph/133_Clone_Graph.py","file_name":"133_Clone_Graph.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"248277120","text":"\"\"\"MultiThread_vis URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom logical_vis import views, test_diagrams, error_handling\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.home, name='index'),\n url(r'^trace_vis', views.trace_vis, name='index'),\n url(r'^home', views.home, name='home'),\n url(r'logical_comp', views.logical_comp, name='logical_comp'),\n url(r'cy', test_diagrams.cy, name='cy'),\n url(r'draw2d', test_diagrams.draw2d, name='draw2d'),\n url(r'no_canvas', test_diagrams.no_canvas, name='no_canvas'),\n url(r'gojs', test_diagrams.gojs, name='gojs'),\n url(r'mxGraph', test_diagrams.mxgraph, name='mxGraph'),\n url(r'Logical_Data_L0', views.logical_data_l0, name='technical_data_L0'),\n url(r'RawVis', views.raw_tech_vis, name='raw_tech_vis'),\n url(r'Logical_Data_L1', views.logical_data_l1, name='Logical_Data_L1'),\n url(r'Logical_Data_L3', views.logical_data_l3, name='structural_view_L3'),\n url(r'LD_L2_unG', views.logical_data_l2_ungrouped, name='logical_data_l2_ungrouped'),\n url(r'exe_path_L2', views.ld_exe_path_l2, name='logical_data_l2_exe_path'),\n url(r'time_line_view', views.time_line_view, name='time_line_view'),\n url(r'operation_functions_L2', views.functions_ld_l2, name='logical_data_l2_functions'),\n url(r'logical_decision_L2', views.logical_decision_ld_l2, name='logical_data_l2_decision'),\n\n]\n# handler404 = error_handling.error_404_view\n\n# This should be here only during development\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"MultiThread_vis/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"329381618","text":"# -*- coding: utf-8 -*-\n\n# =============================================================================\n# =======概述\n# 创建日期:2018-05-16\n# 编码人员:王学良\n# 简述:主窗口类\n#\n# =======使用说明\n# 。。。\n#\n# =======日志\n# 1.2018-05-16 王学良创建文件\n# =============================================================================\n\n\n# =============================================================================\n# Qt imports\n# =============================================================================\nfrom PyQt5.QtCore import (QSize, QRect, Qt, QMetaObject, QCoreApplication,\n pyqtSignal)\nfrom PyQt5.QtWidgets import (QWidget, QMainWindow, QMenuBar, QMessageBox,\n QFileDialog, QMenu, QToolBar, QAction, QStatusBar,\n QHBoxLayout)\n\n# =============================================================================\n# Package views imports\n# =============================================================================\nfrom stacked_widget import StackedWidget\nfrom paralist_dock import ParalistDock\nfrom new_project_dialog import NewProjectDialog\n\n# =============================================================================\n# Main Window\n# =============================================================================\nclass MainWindow(QMainWindow):\n\n #此处定义常量\n \n #此处定义信号\n \n def __init__(self):\n QMainWindow.__init__(self)\n \n def setup(self):\n# 定义主窗口\n self.setEnabled(True)\n self.resize(800, 600)\n self.setMinimumSize(QSize(800, 600))\n\n# 创建堆叠窗口 \n self.mw_stacked_widget = StackedWidget()\n self.mw_stacked_widget.setup()\n\n# 创建参数列表窗口\n self.mw_paralist_dock = ParalistDock()\n self.mw_paralist_dock.setup()\n self.addDockWidget(Qt.DockWidgetArea(1), self.mw_paralist_dock)\n\n# 设置主窗口布局\n self.mainwindow_layout = QWidget(self)\n self.mainwindow_layout.setObjectName(\"mainwindow_layout\") \n self.horizontalLayout = QHBoxLayout(self.mainwindow_layout)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setSpacing(0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.horizontalLayout.addWidget(self.mw_stacked_widget)\n self.setCentralWidget(self.mainwindow_layout)\n \n# 创建菜单栏\n self.menubar = QMenuBar(self)\n self.menubar.setGeometry(QRect(0, 0, 800, 23))\n self.menubar.setObjectName(\"menubar\")\n self.menu_file = QMenu(self.menubar)\n self.menu_file.setObjectName(\"menu_file\")\n self.menu_import = QMenu(self.menu_file)\n self.menu_import.setObjectName(\"menu_import\")\n self.menu_export = QMenu(self.menu_file)\n self.menu_export.setObjectName(\"menu_export\")\n self.menu_edit = QMenu(self.menubar)\n self.menu_edit.setObjectName(\"menu_edit\")\n self.menu_view = QMenu(self.menubar)\n self.menu_view.setObjectName(\"menu_view\")\n self.menu_tools = QMenu(self.menubar)\n self.menu_tools.setObjectName(\"menu_tools\")\n self.menu_window = QMenu(self.menubar)\n self.menu_window.setObjectName(\"menu_window\")\n self.menu_help = QMenu(self.menubar)\n self.menu_help.setObjectName(\"menu_help\")\n self.menu_analysis = QMenu(self.menubar)\n self.menu_analysis.setObjectName(\"menu_analysis\")\n self.menu_mathematics = QMenu(self.menu_analysis)\n self.menu_mathematics.setObjectName(\"menu_mathematics\")\n self.menu_data_manipulation = QMenu(self.menu_analysis)\n self.menu_data_manipulation.setObjectName(\"menu_data_manipulation\")\n self.menu_data_manage = QMenu(self.menu_analysis)\n self.menu_data_manage.setObjectName(\"menu_data_manage\")\n self.menu_plot = QMenu(self.menubar)\n self.menu_plot.setObjectName(\"menu_plot\")\n self.setMenuBar(self.menubar)\n \n# 创建状态栏\n self.statusbar = QStatusBar(self)\n self.statusbar.setObjectName(\"statusbar\")\n self.setStatusBar(self.statusbar)\n \n# 创建工具栏\n self.toolbar = QToolBar(self)\n self.toolbar.setObjectName(\"toolbar\")\n self.addToolBar(Qt.TopToolBarArea, self.toolbar)\n \n# 创建动作\n self.action_new = QAction(self)\n self.action_new.setObjectName(\"action_new\")\n self.action_open = QAction(self)\n self.action_open.setObjectName(\"action_open\")\n self.action_import_normal_datafile = QAction(self)\n self.action_import_normal_datafile.setObjectName(\"action_import_normal_datafile\")\n self.action_export_data = QAction(self)\n self.action_export_data.setObjectName(\"action_export_data\")\n self.action_exit = QAction(self)\n self.action_exit.setObjectName(\"action_exit\")\n self.action_simple_math = QAction(self)\n self.action_simple_math.setObjectName(\"action_simple_math\")\n self.action_testpoint_manage = QAction(self)\n self.action_testpoint_manage.setObjectName(\"action_testpoint_manage\")\n self.action_synchronization = QAction(self)\n self.action_synchronization.setObjectName(\"action_synchronization\")\n self.action_tuning = QAction(self)\n self.action_tuning.setObjectName(\"action_tuning\")\n self.action_para_manage = QAction(self)\n self.action_para_manage.setObjectName(\"action_para_manage\")\n self.action_temp_manage = QAction(self)\n self.action_temp_manage.setObjectName(\"action_temp_manage\")\n self.action_options = QAction(self)\n self.action_options.setObjectName(\"action_options\")\n self.action_about = QAction(self)\n self.action_about.setObjectName(\"action_about\")\n self.action_quick_plot = QAction(self)\n self.action_quick_plot.setObjectName(\"action_quick_plot\")\n self.action_custom_defined_plot = QAction(self)\n self.action_custom_defined_plot.setObjectName(\"action_custom_defined_plot\")\n self.action_multi_source_plot = QAction(self)\n self.action_multi_source_plot.setObjectName(\"action_multi_source_plot\")\n self.action_paralist_dock_isclosed = QAction(self)\n self.action_paralist_dock_isclosed.setCheckable(True)\n self.action_paralist_dock_isclosed.setChecked(True)\n self.action_paralist_dock_isclosed.setObjectName(\"action_paralist_dock_isclosed\")\n \n# 将动作添加到对应的菜单下\n self.menu_import.addAction(self.action_import_normal_datafile)\n self.menu_export.addAction(self.action_export_data)\n self.menu_file.addAction(self.action_new)\n self.menu_file.addAction(self.action_open)\n self.menu_file.addSeparator()\n self.menu_file.addAction(self.menu_import.menuAction())\n self.menu_file.addAction(self.menu_export.menuAction())\n self.menu_file.addSeparator()\n self.menu_file.addAction(self.action_exit)\n self.menu_view.addAction(self.action_paralist_dock_isclosed)\n self.menu_tools.addAction(self.action_options)\n self.menu_help.addAction(self.action_about)\n self.menu_mathematics.addAction(self.action_simple_math)\n self.menu_data_manipulation.addAction(self.action_testpoint_manage)\n self.menu_data_manipulation.addAction(self.action_synchronization)\n self.menu_data_manipulation.addAction(self.action_tuning)\n self.menu_data_manage.addAction(self.action_para_manage)\n self.menu_data_manage.addAction(self.action_temp_manage)\n self.menu_analysis.addAction(self.menu_mathematics.menuAction())\n self.menu_analysis.addAction(self.menu_data_manipulation.menuAction())\n self.menu_analysis.addAction(self.menu_data_manage.menuAction())\n self.menu_plot.addAction(self.action_quick_plot)\n self.menu_plot.addAction(self.action_custom_defined_plot)\n self.menu_plot.addAction(self.action_multi_source_plot)\n self.menubar.addAction(self.menu_file.menuAction())\n self.menubar.addAction(self.menu_edit.menuAction())\n self.menubar.addAction(self.menu_view.menuAction())\n self.menubar.addAction(self.menu_analysis.menuAction())\n self.menubar.addAction(self.menu_plot.menuAction())\n self.menubar.addAction(self.menu_tools.menuAction())\n self.menubar.addAction(self.menu_window.menuAction())\n self.menubar.addAction(self.menu_help.menuAction())\n self.toolbar.addAction(self.action_import_normal_datafile)\n\n self.retranslate()\n# QMetaObject.connectSlotsByName(self)\n \n# =======连接信号与槽\n self.mw_paralist_dock.signal_close.connect(self.slot_paralist_dock_close)\n\n\n# =============================================================================\n# Views\n# =============================================================================\n\n# =============================================================================\n# ���新建项目有关的显示\n def view_new(self):\n return NewProjectDialog().get_project_info()\n\n def view_set_window_title(self, title):\n if title:\n self.setWindowTitle(title + ' - Demo')\n# =============================================================================\n# 与打开项目有关的显示\n \n# 响应打开项目的指令,函数返回一个str型的文件路径\n def view_open(self):\n sel_pro = QFileDialog.getExistingDirectory(self, 'Open Program')\n if sel_pro:\n sel_pro = sel_pro.replace('/','\\\\')\n return sel_pro\n else:\n return None\n \n def view_open_status(self, status, pro_name):\n if status:\n \n tipDialog = QMessageBox(self)\n tipDialog.resize(300,100)\n tipDialog.setWindowTitle(\"Information\")\n tipDialog.setText(\"Open a project suceessfully!\")\n tipDialog.exec_()\n else:\n tipDialog = QMessageBox(self)\n tipDialog.resize(300,100)\n tipDialog.setWindowTitle(\"Caution\")\n tipDialog.setText(\"Unsuceessfully, It's not a project!\")\n tipDialog.exec_() \n\n# =============================================================================\n# 与关于数据导入有关的显示\n \n# \n\n# =============================================================================\n# 与关于信息显示有关的显示\n \n# 显示About信息\n def view_about(self):\n QMessageBox.about(self,\n QCoreApplication.translate(\"MainWindow\", \"关于演示程序\"),\n QCoreApplication.translate(\"MainWindow\", \"\"\"演示程序\n
试飞数据绘图软件\n
Copyright © FTCC\n 由试飞中心试飞工程部绘图软件开发团队开发维护\n \"\"\"))\n\n# =============================================================================\n# 与参数窗口有关的显示\n \n# 响应参数窗口显示动作\n def control_paralist_dock_isclosed(self):\n if self.mw_paralist_dock.isHidden():\n self.mw_paralist_dock.setHidden(False)\n else:\n self.mw_paralist_dock.setHidden(True)\n\n\n# =============================================================================\n# Slots \n# =============================================================================\n\n# 参数窗口关闭后需要把视图下的勾选去掉\n def slot_paralist_dock_close(self):\n self.action_paralist_dock_isclosed.setChecked(False)\n\n\n# =============================================================================\n# 汉化\n# =============================================================================\n def retranslate(self):\n _translate = QCoreApplication.translate\n self.setWindowTitle(_translate(\"MainWindow\", \"演示程序\"))\n self.menu_file.setTitle(_translate(\"MainWindow\", \"文件\"))\n self.menu_import.setTitle(_translate(\"MainWindow\", \"导入\"))\n self.menu_export.setTitle(_translate(\"MainWindow\", \"导出\"))\n self.menu_edit.setTitle(_translate(\"MainWindow\", \"编辑\"))\n self.menu_view.setTitle(_translate(\"MainWindow\", \"视图\"))\n self.menu_tools.setTitle(_translate(\"MainWindow\", \"工具\"))\n self.menu_window.setTitle(_translate(\"MainWindow\", \"窗口\"))\n self.menu_help.setTitle(_translate(\"MainWindow\", \"帮助\"))\n self.menu_analysis.setTitle(_translate(\"MainWindow\", \"分析\"))\n self.menu_mathematics.setTitle(_translate(\"MainWindow\", \"数学计算\"))\n self.menu_data_manipulation.setTitle(_translate(\"MainWindow\", \"数据操作\"))\n self.menu_data_manage.setTitle(_translate(\"MainWindow\", \"数据管理\"))\n self.menu_plot.setTitle(_translate(\"MainWindow\", \"绘图\"))\n self.mw_paralist_dock.setWindowTitle(_translate(\"MainWindow\", \"参数窗口\"))\n self.mw_paralist_dock.line_edit_search_para.setPlaceholderText(_translate(\"MainWindow\", \"过滤器\"))\n self.toolbar.setWindowTitle(_translate(\"MainWindow\", \"工具栏\"))\n self.action_new.setText(_translate(\"MainWindow\", \"新建\"))\n self.action_open.setText(_translate(\"MainWindow\", \"打开\"))\n self.action_import_normal_datafile.setText(_translate(\"MainWindow\", \"通用数据\"))\n self.action_export_data.setText(_translate(\"MainWindow\", \"数据文件\"))\n self.action_export_data.setToolTip(_translate(\"MainWindow\", \"数据文件\"))\n self.action_exit.setText(_translate(\"MainWindow\", \"退出\"))\n self.action_simple_math.setText(_translate(\"MainWindow\", \"简单计算\"))\n self.action_testpoint_manage.setText(_translate(\"MainWindow\", \"试验点\"))\n self.action_synchronization.setText(_translate(\"MainWindow\", \"时间同步\"))\n self.action_tuning.setText(_translate(\"MainWindow\", \"调频\"))\n self.action_para_manage.setText(_translate(\"MainWindow\", \"参数\"))\n self.action_temp_manage.setText(_translate(\"MainWindow\", \"模板\"))\n self.action_options.setText(_translate(\"MainWindow\", \"选项\"))\n self.action_about.setText(_translate(\"MainWindow\", \"关于\"))\n self.action_quick_plot.setText(_translate(\"MainWindow\", \"快速绘图\"))\n self.action_custom_defined_plot.setText(_translate(\"MainWindow\", \"自定义绘图\"))\n self.action_multi_source_plot.setText(_translate(\"MainWindow\", \"并行绘图\"))\n self.action_paralist_dock_isclosed.setText(_translate(\"MainWindow\", \"参数窗口\"))\n \n# =============================================================================\n# def retranslate(self):\n# _translate = QCoreApplication.translate\n# self.setWindowTitle(_translate(\"MainWindow\", \"演示\"))\n# self.menu_file.setTitle(_translate(\"MainWindow\", \"File\"))\n# self.menu_import.setTitle(_translate(\"MainWindow\", \"Import\"))\n# self.menu_export.setTitle(_translate(\"MainWindow\", \"Export\"))\n# self.menu_edit.setTitle(_translate(\"MainWindow\", \"Edit\"))\n# self.menu_view.setTitle(_translate(\"MainWindow\", \"View\"))\n# self.menu_tools.setTitle(_translate(\"MainWindow\", \"Tools\"))\n# self.menu_window.setTitle(_translate(\"MainWindow\", \"Window\"))\n# self.menu_help.setTitle(_translate(\"MainWindow\", \"Help\"))\n# self.menu_analysis.setTitle(_translate(\"MainWindow\", \"Analysis\"))\n# self.menu_mathematics.setTitle(_translate(\"MainWindow\", \"Mathematics\"))\n# self.menu_data_manipulation.setTitle(_translate(\"MainWindow\", \"Data Manipulation\"))\n# self.menu_data_manage.setTitle(_translate(\"MainWindow\", \"Data Manage\"))\n# self.menu_plot.setTitle(_translate(\"MainWindow\", \"Plot\"))\n# self.mw_paralist_dock.setWindowTitle(_translate(\"MainWindow\", \"Parameters\"))\n# self.mw_paralist_dock.line_edit_search_para.setPlaceholderText(_translate(\"MainWindow\", \"Filter\"))\n# self.toolbar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\"))\n# self.action_new.setText(_translate(\"MainWindow\", \"New\"))\n# self.action_open.setText(_translate(\"MainWindow\", \"Open\"))\n# self.action_import_normal_datafile.setText(_translate(\"MainWindow\", \"Normal Datafile\"))\n# self.action_export_data.setText(_translate(\"MainWindow\", \"Data File\"))\n# self.action_export_data.setToolTip(_translate(\"MainWindow\", \"Data File\"))\n# self.action_exit.setText(_translate(\"MainWindow\", \"Exit\"))\n# self.action_simple_math.setText(_translate(\"MainWindow\", \"Simple Math...\"))\n# self.action_testpoint_manage.setText(_translate(\"MainWindow\", \"Test Point\"))\n# self.action_synchronization.setText(_translate(\"MainWindow\", \"Synchronization\"))\n# self.action_tuning.setText(_translate(\"MainWindow\", \"Tuning\"))\n# self.action_para_manage.setText(_translate(\"MainWindow\", \"Parameters\"))\n# self.action_temp_manage.setText(_translate(\"MainWindow\", \"Templates\"))\n# self.action_options.setText(_translate(\"MainWindow\", \"Options\"))\n# self.action_about.setText(_translate(\"MainWindow\", \"About Demo\"))\n# self.action_quick_plot.setText(_translate(\"MainWindow\", \"Quick Plot\"))\n# self.action_custom_defined_plot.setText(_translate(\"MainWindow\", \"Custom Defined Plot\"))\n# self.action_multi_source_plot.setText(_translate(\"MainWindow\", \"Multi-source Plot\"))\n# self.action_paralist_dock_isclosed.setText(_translate(\"MainWindow\", \"Parameters Dock\"))\n# =============================================================================\n","sub_path":"lib/views/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":17735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"325780173","text":"import os\r\nimport csv\r\nimport shutil\r\nfrom pathlib import Path\r\nimport re\r\n\r\nfrom datetime import datetime\r\n\r\ndataIndex = 0\r\n\r\nclass ImFolderSearch():\r\n \"\"\"\r\n 指定ディレクトリの配下にあるディレクトリやファイルのパスを生成する\r\n @params:path 対象ディレクトリ\r\n @return:fileList\r\n \"\"\"\r\n \r\n\r\n def __init__(self, folderPath, ReadSysfile=False):\r\n self.folderPath = folderPath\r\n self.ReadSysfile = ReadSysfile\r\n self.resultList = []\r\n \r\n\r\n def __gene(self, folderPath):\r\n \"\"\"\r\n 再帰的にフォルダ内を検索する。\r\n @params フォルダパス\r\n @return 更新があったファイル名を返す。\r\n \"\"\"\r\n for dirpath, dirnames, filenames in os.walk(folderPath, topdown=True):\r\n\r\n print(\"データ処理中:\" + datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\r\n print(\"_dirpath:\", dirpath)\r\n\r\n #更新されてないフォルダリストを作成。\r\n deleteDirnames = self.__judgeUpdateFolder(dirnames, dirpath)\r\n\r\n #検索対象外フォルダを除外\r\n for deleteName in deleteDirnames:\r\n try:\r\n dirnames.remove(deleteName)\r\n except:\r\n print(\"exception!\")\r\n\r\n #ファイル処理\r\n for name in filenames:\r\n if self.__judgeUpdateFile(os.path.join(dirpath, name)):\r\n fullPath = os.path.join(dirpath, name)\r\n yield self.__getCsvValue(fullPath)\r\n\r\n\r\n def __judgeUpdateFile(self, filePath):\r\n \"\"\"\r\n ファイル更新日が今日のファイルかを判断(csvのみ)\r\n @param:ファイルのフルパス\r\n @return:更新日が今日の場合にはtrue\r\n \"\"\"\r\n #拡張子.csv以外はスルー\r\n fileName, ext = os.path.splitext(filePath)\r\n if ext != '.csv':\r\n return False\r\n \r\n #今日の日付、更新日を取得する\r\n today = datetime.now().date()\r\n lastModified = os.stat(filePath).st_mtime\r\n lastModifiedDt = datetime.fromtimestamp(lastModified).date()\r\n\r\n #ファイル更新日チェック\r\n if today == lastModifiedDt:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def __judgeUpdateFolder(self, dirnames, dirpath):\r\n \"\"\"\r\n フォルダの更新日、ファイル名:QRを調べる\r\n @params:フォルダ名のリスト、フォルダパス\r\n @return:更新日以外のフォルダ名リスト\r\n \"\"\"\r\n removeNameList = []\r\n today = datetime.now().date()\r\n\r\n for strdirname in dirnames:\r\n ftime = os.path.getmtime(os.path.join(dirpath, strdirname))\r\n modFolderTime = datetime.fromtimestamp(ftime).date()\r\n #print(today, \"_\", modFolderTime)\r\n #更新日以外のファイルを取得\r\n if today != modFolderTime:\r\n removeNameList.append(strdirname)\r\n continue\r\n #QR以外を取得\r\n if \"QR\" not in strdirname:\r\n removeNameList.append(strdirname)\r\n\r\n return removeNameList\r\n\r\n\r\n def __getCsvValue(self, fullPath):\r\n \"\"\"\r\n 測定器のcsvファイルを読み込んで、測定日・ロット・品番などをListで取得する。\r\n @params:ファイルのフルパス\r\n @return:ファイル内容のList\r\n \"\"\"\r\n fileContensList = []\r\n global dataIndex\r\n\r\n #ディレクトリの場合、対象外\r\n if os.path.isdir(fullPath):\r\n return fileContensList\r\n\r\n #データ読取り用TEMPファイルを作る\r\n tempBaseName = \"_\" + os.path.basename(fullPath)\r\n tempFullFileName = os.path.join(os.path.dirname(fullPath), tempBaseName)\r\n shutil.copy(fullPath, tempFullFileName)\r\n\r\n #親ディレクトリを取得する。\r\n p = str(Path(fullPath).parent)\r\n p = p.split(\"\\\\\")\r\n dirName = p[-1]\r\n \r\n #ファイルを開く\r\n with open(tempFullFileName, \"r\") as csv_file:\r\n #リスト形式で取得\r\n f = csv.reader(csv_file)\r\n\r\n for index, row in enumerate(f): \r\n # ヘッダーを読み飛ばし\r\n if index < 4: \r\n continue\r\n #内容を辞書に保存\r\n contents = {}\r\n contents['index'] = '{0:04d}'.format(dataIndex) \r\n contents['upDate'] = row[1]\r\n contents['lotNo'] = row[2]\r\n contents['dirName'] = dirName\r\n contents['fileName'] = os.path.basename(fullPath)\r\n contents['fullPath'] = fullPath\r\n dataIndex += 1\r\n \r\n fileContensList.append(contents)\r\n\r\n #一時ファイルを消去\r\n if os.path.exists(tempFullFileName):\r\n os.remove(tempFullFileName)\r\n\r\n return fileContensList\r\n\r\n\r\n def ImFolderSearch(self):\r\n \"\"\"\r\n 測定器フォルダ内の更新ありファイルを検索してリストを返す。\r\n \"\"\"\r\n \r\n # リストにする\r\n self.resultList = [file for file in self.__gene(self.folderPath)]\r\n\r\n return(self.resultList)\r\n\r\n\r\n def __listCheck(self, lotNoList):\r\n \"\"\"\r\n ロットNoをチェック\r\n @params:ロットNoのlist(品番毎)\r\n @return:OKの場合trueを返す。NGの場合はfalseを返す。\r\n \"\"\"\r\n\r\n #リストを集合に変換\r\n setNoList = set(lotNoList)\r\n\r\n #check1:記入されていない\r\n if \"\" in lotNoList:\r\n return False\r\n\r\n #check2:サンプル総数が3の倍数か?\r\n if (len(lotNoList) % 3) != 0:\r\n return False\r\n \r\n #check3:ロット毎に違うロットNoが記入されているか?\r\n if (len(lotNoList) / 3) != len(setNoList):\r\n return False\r\n \r\n #check4:ロット番号の打ち間違いを正規表現で検索(例:20180405-01 or 20180405-1はOK)\r\n for i in lotNoList:\r\n result = re.search('20[0-9]{2}[0-1][0-9][0-3][0-9]-[0-9]{1,4}$', i)\r\n if result is None:\r\n return False\r\n \r\n return True\r\n\r\n\r\n def isCheckContents(self):\r\n \"\"\"\r\n 内容に重複、間違いがあるかをチェックする。\r\n @params:\r\n @return:検査結果NGの品番\r\n \"\"\"\r\n\r\n errorList = []\r\n\r\n #dirNameとLotNoを取得\r\n for innerContent in self.resultList:\r\n\r\n #品番ごとに処理\r\n lotNoList = []\r\n #lotNoのリスト作成\r\n for content in innerContent:\r\n lotNoList.append(content['lotNo'])\r\n productName = content['dirName']\r\n #エラー検査\r\n if self.__listCheck(lotNoList) == False:\r\n errorList.append(productName) \r\n\r\n return (errorList)\r\n\r\n\r\n\r\n","sub_path":"ImFolderSearch.py","file_name":"ImFolderSearch.py","file_ext":"py","file_size_in_byte":7165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"203772509","text":"import json\n\nfrom django.db import models\nfrom rest_framework import viewsets, filters, serializers, status\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\nfrom simulador.pagination import BasePagination\nfrom simulador.resources.account import Account, AccountDetailSerializer\nfrom simulador.resources.program_practice import get_results_by_user\nfrom simulador.resources.results import Results, ResultsSerializer, ResultsDetailSerializer\nfrom simulador.resources.results_zone import ResultsZone, ResultsZoneSerializer\n\n\nclass CustomPractices(models.Model):\n practicing = models.ForeignKey(Account)\n results = models.ManyToManyField(Results, help_text='''\n [\n {\n \"lesson\":1,\n \"type_of_fire\":1,\n \"position\":2,\n \"results_zone\":[\n {\n \"zone\": 10,\n \"time\": 3000,\n \"score\": 10\n },\n {\n \"zone\": 3,\n \"time\": 3000,\n \"score\": 3\n }\n ]\n },\n {\n \"lesson\":1,\n \"type_of_fire\":2,\n \"position\":2,\n \"results_zone\":[\n {\n \"zone\": 5,\n \"time\": 3000,\n \"score\": 10\n },\n {\n \"zone\": 5,\n \"time\": 3000,\n \"score\": 5\n }\n ]\n }\n ]\n ''')\n date_practice = models.DateTimeField(auto_now_add=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __unicode__(self):\n return \"%s\" % self.id\n\n class Meta:\n ordering = ['created_at']\n\n\nclass CustomPracticesSerializer(serializers.ModelSerializer):\n # results = serializers.CharField(\n # help_text='''[{\"lesson\":1,\"type_of_fire\":1,\"position\":2,\"results_zone\":[{\"zone\": \"10\",\"time\": 3000,\"score\": 10},{\"zone\": \"3\",\"time\": 3000,\"score\": 3}]},{\"lesson\":1,\"type_of_fire\":2,\"position\":2,\"results_zone\":[{\"zone\": \"5\",\"time\": 3000,\"score\": 10},{\"zone\": \"5\",\"time\": 3000,\"score\": 5}]}]''')\n class Meta:\n model = CustomPractices\n fields = (\n 'id', 'practicing', 'results', 'date_practice', 'created_at', 'updated_at')\n\n\nclass CustomPracticesDetailSerializer(serializers.ModelSerializer):\n practicing = AccountDetailSerializer(read_only=True)\n results = ResultsDetailSerializer(read_only=True, many=True)\n\n class Meta:\n model = CustomPractices\n fields = (\n 'id', 'practicing', 'results', 'date_practice', 'created_at', 'updated_at')\n\n\nclass CustomPracticesViewSet(viewsets.ModelViewSet):\n queryset = CustomPractices.objects.all()\n serializer_class = CustomPracticesSerializer\n pagination_class = BasePagination\n filter_backends = (filters.DjangoFilterBackend, filters.SearchFilter,)\n filter_fields = ('id', 'practicing', 'date_practice',)\n search_fields = ('$id', 'practicing', 'date_practice',)\n\n @detail_route()\n def results(self, request, pk):\n query_params = self.request.query_params\n practices_serial_data = CustomPracticesSerializer(CustomPractices.objects.filter(id=pk), many=True).data\n response = {}\n response_status = status.HTTP_200_OK\n if len(practices_serial_data) > 0:\n list_result_practice = []\n if 'practicing' in query_params:\n response = get_results_by_user(query_params['practicing'], pk)\n else:\n for practice_result in practices_serial_data:\n list_result_practice.append(get_results_by_user(practice_result['practicing'], pk))\n response = {'result': list_result_practice}\n else:\n response['detail'] = 'No se encontraron resultados'\n response_status = status.HTTP_204_NO_CONTENT\n return Response(response, response_status)\n\n def get_serializer_class(self):\n query_params = self.request.query_params\n if 'is_complete_serializer' in query_params and query_params['is_complete_serializer'] == '1':\n return CustomPracticesDetailSerializer\n else:\n return CustomPracticesSerializer\n\n def create(self, request, *args, **kwargs):\n data = self.request.data\n response = {}\n is_complete = False\n if 'results' in data:\n d = json.dumps(data['results'], ensure_ascii=False, encoding='utf8')\n # noinspection PyBroadException\n try:\n results = json.loads(d, encoding='utf-8')\n if type(results) in (tuple, list):\n list_serialized = []\n is_all_valid = True\n last_serialized = \"\"\n try:\n for result in results:\n # add zones\n list_result_zone = []\n list_result_zone_valid = True\n for result_zone in result['results_zone']:\n serial_result_zone = ResultsZoneSerializer(data=result_zone)\n list_result_zone.append(serial_result_zone)\n if not serial_result_zone.is_valid():\n list_result_zone_valid = False\n if list_result_zone_valid:\n for result_zone in list_result_zone:\n result_zone.save()\n\n result[\"results_zone\"] = []\n [result[\"results_zone\"].append(x.data['id']) for x in list_result_zone]\n list_id_zones = result[\"results_zone\"]\n serialized = ResultsSerializer(data=result)\n if serialized.is_valid():\n list_serialized.append(serialized)\n else:\n last_serialized = serialized\n is_all_valid = False\n list_id = []\n if is_all_valid is True:\n for serialized in list_serialized:\n serialized.save()\n list_id.append(serialized.data['id'])\n is_complete = True\n else:\n response = last_serialized.errors\n for id_result_zone in list_id_zones:\n ResultsZone.objects.filter(id=id_result_zone).delete()\n\n if is_complete:\n data_practice = {'results': list_id, 'practicing': request.data['practicing']}\n practice_serial = CustomPracticesSerializer(data=data_practice)\n if practice_serial.is_valid():\n practice_serial.save()\n response = practice_serial.data\n else:\n for id_result in list_id:\n Results.objects.filter(id=id_result).delete()\n response = practice_serial.errors\n else:\n for id_result in list_id:\n Results.objects.filter(id=id_result).delete()\n\n except StandardError as err:\n response = {\"results_zone\": \"El formato Json debe ser array\", \"fd\": repr(err)}\n else:\n response = {\"result\": \"El formato Json debe ser array\"}\n except:\n response = {\"result\": \"Formato Json no valido\"}\n return Response(response)\n","sub_path":"simulador/resources/custom_practices.py","file_name":"custom_practices.py","file_ext":"py","file_size_in_byte":9333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"354767947","text":"#!/usr/bin/env python3\n\nimport asyncio,json\nfrom collections import deque\nimport time,random,itertools\n\n'''\nMessages passed between nodes are simply Python dictionaries that are\nserialized to JSON.\n\nEvery RPC request contains the following fields:\n\n rpc: string, possible values are \"PING\", \"FIND_NODE\", \"FIND_VALUE\", \"STORE\"\n type: \"REQ\"\n echo: random 160 bit value encoded as hex string, needs to be echoed back in reply\n src: the requester's id\n\n Additional fields for PING: None\n Additional fields for FIND_NODE: id: string, 160 bit node id as hex string\n Additional fields for FIND_VALUE: key: string, 160 bit key as hex string\n Additional fields for STORE: key: string, 160 bit key as hex string\n val: string, 160 bit key as hex string\n\n {\"rpc\": \"PING\", \"type\": \"REQ\", \"echo\": \"f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0\", \"id\": \"70c07ec18ef89c5309bbb0937f3a6342411e1fdd\"}\n\nEvery RPC reply contains the following fields:\n\n rpc and echo as above\n\n type: \"REP\"\n\n src: the replier's id\n\n Additional fields for PING: None\n Additional fields for FIND_NODE: nodes: list of tuples with (\"address\",port) pairs\n\n Additional fields for FIND_VALUE: if key/val not on this node:\n nodes: list of tuples with (\"address\",port) pairs\n else:\n value: the stored value\n\n Additional fields for STORE: None\n'''\n\n# this sublass implements the transmission of RPC requsts to other Kademlia nodes\nclass KademliaRPCProtocol(asyncio.DatagramProtocol):\n def __init__(self,rpc):\n self.rpc = rpc\n\n def connection_made(self,transport):\n try:\n tmp = json.dumps(self.rpc).encode()\n except:\n print(\"Error serializing RPC request\")\n else:\n transport.sendto(tmp)\n finally:\n transport.close()\n\n# this subclass is used for listening for RPCs and replies from other Kademlia nodes\nclass KademliaListenProtocol(asyncio.DatagramProtocol):\n def __init__(self,message_queue):\n # message queue will have the pending requests appended to it\n self.message_queue = message_queue\n\n def connection_made(self,transport):\n self.transport = transport\n print(\"Connected to socket\")\n\n def datagram_received(self,data,addr):\n print(f\"{data} received from {addr}\")\n\n try:\n req = json.loads(data.decode())\n except:\n print(\"Error deserializing received data\")\n else:\n tmp = {\"addr\": addr}\n self.message_queue.append({**tmp, **req})\n\n# class that stores peer information such as ID, address and alive status\n# which is important for updating the routing table\nclass KademliaPeer:\n def __init__(self,id,peer_addr,own_id,event_loop,alive=True,timeout=5):\n self.id = id\n self.peer_addr = peer_addr\n self.own_id = own_id\n self.event_loop = event_loop\n self.alive = alive\n # timeout after which the peer is considered dead after sending a request\n self.timeout = timeout\n # this property is set by the KademliaNode class when it finds a response\n # from this peer in the incoming message queue\n self.response = None\n # the expected random echo value\n self.expected_echo = None\n\n # wait for a reply to come in\n async def wait_response(self):\n while self.response == None:\n await asyncio.sleep(0.01)\n tmp = self.response\n self.response = None\n return tmp\n\n # launch a generic request\n async def generic_request(self,req):\n # make sure the response field is none otherwise we might be getting\n # in trouble later\n self.response = None\n\n conn = self.event_loop.create_datagram_endpoint(\n lambda: KademliaRPCProtocol(req),\n remote_addr=self.peer_addr,\n reuse_port=True)\n\n # not sure about this yet yep -->\n # conn is a task therefore a future therefore can be awaited\n await self.event_loop.create_task(conn)\n\n try:\n rep = await asyncio.wait_for(self.wait_response(),self.timeout)\n if rep[\"echo\"] != self.expected_echo:\n rep = None\n except asyncio.TimeoutError:\n rep = None\n\n return rep\n\n # ping this node to see if it is still alive\n async def ping(self):\n self.expected_echo = hex(random.randint(0,2**160-1))\n\n req = { \"rpc\": \"PING\", \"type\": \"REQ\", \"src\": self.own_id,\n \"echo\": self.expected_echo}\n\n rep = await self.generic_request(req)\n\n # if the reply is a None object then the node timed out or responded\n # with an incorrect echo\n self.alive = rep != None\n\n # send a find_node rpc to this node\n async def find_node(self,id):\n self.expected_echo = hex(random.randint(0,2**160-1))\n\n req = { \"rpc\": \"FIND_NODE\", \"type\": \"REQ\", \"src\": self.own_id,\n \"echo\": self.expected_echo,\n \"id\": id}\n\n rep = await self.generic_request(req)\n\n if rep != None:\n return rep[\"nodes\"]\n else:\n return None\n\n # send a find_value rpc to this node\n async def find_value(self,key):\n self.expected_echo = hex(random.randint(0,2**160-1))\n\n req = { \"rpc\": \"FIND_VALUE\", \"type\": \"REQ\", \"src\": self.own_id,\n \"echo\": self.expected_echo,\n \"key\": key}\n\n rep = await self.generic_request(req)\n\n if rep != None:\n try:\n tmp = rep[\"value\"]\n except KeyError:\n tmp = rep[\"nodes\"]\n return tmp\n else:\n return None\n\n # send a store RPC to this node\n async def store(self,key,value):\n self.expected_echo = hex(random.randint(0,2**160-1))\n\n req = { \"rpc\": \"STORE\", \"type\": \"REQ\", \"src\": self.own_id,\n \"echo\": self.expected_echo,\n \"key\": key,\n \"value\": value}\n\n rep = await self.generic_request(req)\n\n self.response = None\n\n # reply to this node\n async def reply(self,rep):\n conn = self.event_loop.create_datagram_endpoint(\n lambda: KademliaRPCProtocol(rep),\n remote_addr=self.peer_addr,\n reuse_address=True,\n reuse_port=True)\n\n await self.event_loop.create_task(conn)\n\nclass KademliaNode:\n def __init__(self,id,addr,event_loop,bucket_size=20,concurrency=3):\n self.id = id\n self.addr = addr\n self.event_loop = event_loop\n # queues messages received from other Kademlia nodes\n self.message_queue = deque()\n # concurrency parameter\n self.concurrency = concurrency\n\n # storage area\n self.storage = {}\n\n # size of one k-bucket in the routing table\n self.bucket_size = bucket_size\n # initialize k buckets\n self.kbuckets = [deque(maxlen=self.bucket_size) for i in range(160)]\n # remember when we last performed a node lookup in the ith bucket\n self.lookup_times = [0 for i in range(160)]\n\n # add the listener task to the event queue\n listener = self.event_loop.create_datagram_endpoint(\n lambda: KademliaListenProtocol(self.message_queue),\n local_addr=self.addr,\n reuse_address=True,\n reuse_port=True)\n\n self.event_loop.create_task(listener)\n\n # initially we're not bootstrapped, user has to kick off the\n # bootstrap procedure by registering that task with the event loop\n self.bootstrapped = False\n\n # kick off the bootstrap process of joining the network\n async def bootstrap(self):\n\n\n self.event_loop.create_task(self.process_message_queue())\n\n # work on the incoming message queue\n async def process_message_queue(self):\n while True:\n # retrieve first message\n try:\n current_message = self.message_queue.popleft()\n except IndexError: # nothing in the queue\n pass\n else: \n if current_message[\"type\"] == \"REQ\":\n await self.serve_request(current_message)\n elif current_message[\"type\"] == \"REP\":\n # find the replying node in the Kbuckets and deliver the message\n peer = self.find_peer(current_message.id)\n if peer != None:\n peer.response = current_message\n\n # update the routing table\n await self.update_kbucket(peer)\n\n await asyncio.sleep(0.01)\n\n # compose a reply to the passed in request\n async def serve_request(self,req):\n\n peer = KademliaPeer(id=req[\"id\"],\n peer_addr=req[\"addr\"],\n own_id=self.id,\n event_loop=self.event_loop)\n \n rep = None\n\n if req[\"rpc\"] == \"PING\":\n rep = {\"src\": self.id, \"type\": \"REP\", \"echo\": req[\"echo\"]}\n\n elif req[\"rpc\"] == \"STORE\":\n tmp = {req[\"key\"]: req[\"val\"]}\n self.storage = {**self.storage,**tmp} \n rep = {\"src\": self.id, \"type\": \"REP\", \"echo\": req[\"echo\"]}\n\n elif req[\"rpc\"] == \"FIND_NODE\":\n nodes = self.find_closest_nodes(req[\"id\"])\n\n rep = {\"src\": self.id, \"type\": \"REP\", \"echo\": req[\"echo\"],\n \"nodes\": nodes}\n\n elif req[\"rpc\"] == \"FIND_VALUE\":\n try:\n val = self.storage[req[\"key\"]]\n rep = {\"src\": self.id, \"type\": \"REP\", \n \"echo\": req[\"echo\"], \"value\": val}\n except KeyError:\n nodes = self.find_closest_nodes(req[\"key\"]) \n rep = {\"src\": self.id, \"type\": \"REP\", \"echo\": req[\"echo\"],\n \"nodes\": nodes}\n \n await peer.reply(rep)\n \n # update the routing table\n await self.update_kbucket(peer)\n\n\n # find the k closest nodes to the given id and return a list of tuples (addr,id)\n def find_closest_nodes(self,id):\n # get the intial k-bucket index which contains the closest known\n # nodes to the given ID\n initial_kbucket = self.id_to_bucket(str(self.distance(self.id,id)))\n # reply with k nodes\n tmp = deque(self.kbuckets)\n tmp.reverse()\n tmp.rotate(initial_kbucket+1)\n it = itertools.chain.from_iterable(tmp)\n\n peers = (next(it) for i in range(self.bucket_size))\n nodes = [(p.peer_addr[0],p.peer_addr[1],p.id) for p in peers] \n\n return nodes\n\n # update the appropriate kbucket using the given KademliaPeer object\n async def update_kbucket(self,peer):\n bucket_index = self.id_to_bucket(peer.id)\n\n if peer in self.kbuckets[bucket_index]:\n self.kbuckets[bucket_index].rotate(-1)\n elif len(self.kbuckets[bucket_index]) < self.bucket_size:\n self.kbuckets[bucket_index].append(peer)\n else:\n # ping the least recently seen node == first node in the deque\n lrs_peer = self.kbuckets[bucket_index].popleft()\n await lrs_peer.ping()\n\n if lrs_peer.alive:\n self.kbuckets[bucket_index].append(lrs_peer)\n else:\n self.kbuckets[bucket_index].append(peer)\n\n # calculate the Kademlia distance metric given two id strings\n def distance(self,id1,id2):\n return int(id1,16) ^ int(id2,16)\n\n # calculate the index in the kbucket list which is the position of\n # the most significant bit in the binary representation of the id\n # this relies on the bin() function not returning leading zeros\n def id_to_bucket(self,id):\n return len(bin(int(id,16))[2:])-1\n\n # find the KademliaPeer object with the given id in the k buckets\n def find_peer(self,id):\n bucket_index = self.id_to_bucket(id)\n\n try:\n peer = [p for p in self.kbuckets[bucket_index] if p.id == id][0]\n except IndexError:\n return None\n else:\n return peer\n\nif __name__ == \"__main__\":\n\n loop = asyncio.get_event_loop()\n\n try:\n kn = KademliaNode(id='2ef7bde608ce5404e97d5f042f95f89f1c232871',\n addr=('127.0.0.1',5000),\n event_loop=loop)\n loop.create_task(kn.bootstrap())\n loop.run_forever()\n\n finally:\n loop.stop()\n","sub_path":"kademlia/kademlia.py","file_name":"kademlia.py","file_ext":"py","file_size_in_byte":12554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"650836621","text":"\nfrom django.core.management import BaseCommand\nfrom api.models.models import supplier_hotels, sn_hotel_map, hotel_listing\nimport pandas as pd\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n\n main_data = pd.read_csv(r\"C:\\Users\\tony\\Downloads\\with_iceportal.csv\")\n print(main_data.head())\n stars = ['3', '0', '4', '2', '6', '1', '5', '3.5', '2.5', '4.5', '1.5', '5.5']\n main_data[\"stars\"] = main_data.stars.str.replace(\"+\", \".5\")\n main_data[\"stars\"] = main_data.stars.str.replace(\" \", \"\")\n main_data[\"stars\"] = main_data.stars.str.replace(\"ANDAHALF\", \".5\")\n main_data[\"stars\"] = main_data.stars.str.replace(\"star\", \"\")\n main_data[\"stars\"] = main_data.stars.str.replace(\"LUXURY\", \"\")\n main_data[\"stars\"] = main_data.stars.str.replace(\" \", \"0\")\n\n # main_data = main_data[main_data.isin(stars)]\n # main_data[\"stars\"] = main_data[\"stars\"].astype(\"float64\")\n\n print(main_data[\"stars\"].value_counts())\n # print(main_data[\"stars\"].value_counts().index)\n for i in range(0, len(main_data)):\n if i in [100, 1000, 20000, 100000, 200000, 400000]:\n print(i)\n try:\n stars = int(main_data.iloc[i][\"stars\"])\n except:\n stars = 0\n hotel_listing.objects.update_or_create(\n provider=main_data.iloc[i][\"provider\"],\n simplenight_id=main_data.iloc[i][\"sn_id\"],\n address=main_data.iloc[i][\"address\"],\n city=main_data.iloc[i][\"city_names\"],\n hotelid=main_data.iloc[i][\"hotelid\"],\n zipcode=main_data.iloc[i][\"zipcode\"],\n stars=stars,\n countrycode=main_data.iloc[i][\"countrycode\"]\n )\n # except:\n # print(main_data.iloc[i])\n # for x in range(0, len(hotels)):\n # try:\n # if \"HALF\" in (str(hotels.iloc[x]['Category Name']).replace(\"STARS\", \"\")):\n # rating = int(\n # str(str(hotels.iloc[x]['Category Name']).replace(\"STARS\", \"\").replace(\"AND A HALF\", \"\")))+.5\n # else:\n # rating = int(\n # str(hotels.iloc[x]['Category Name']).replace(\"STARS\", \"\"))\n # except:\n # pass\n # try:\n # c = hotels.iloc[x]['City'].strip()\n # add = hotels.iloc[x]['Address'].strip()\n # country = hotels.iloc[x]['Country Name'].strip()\n # print(c.lower())\n # print(add.lower())\n # print(country.lower())\n # supplier_hotels.objects.update_or_create(\n # provider_id=0,\n # hotel_codes=hotels.iloc[x]['Hotel Code'],\n # hotel_name=hotels.iloc[x]['Hotel Name'].strip(),\n # rating=rating,\n # chain_name=hotels.iloc[x]['Chain Name'].strip(),\n # country_name=country.lower(),\n # destination_name=hotels.iloc[x]['Destination Name'].strip(\n # ),\n # address=add.lower(),\n # postal_code=hotels.iloc[x]['Postal Code'],\n # city=c.lower()\n # )\n # except:\n # print(\"something went wrong with {}\".format(\n # hotels.iloc[x]['Hotel Code']))\n\n # LIST OF HOTELS FROM PROVIDER\n","sub_path":"api/management/commands/hotelimports.py","file_name":"hotelimports.py","file_ext":"py","file_size_in_byte":3507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"58507021","text":"#1 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]\n\n def dft(self, starting_vertex):\n \"\"\"\n Print each vertex in depth-first order\n beginning from starting_vertex.\n \"\"\"\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)\n \n# def get_neighbors_list(list_node):\n# list_neighbors = []\n# gg = Graph()\n# for node in list_node:\n# list_neighbors.append(gg.get_neighbors(node))\n# return list_neighbors\ndef earliest_ancestor(ancestors, starting_node):\n # hierarchy = {}\n # level = 0 \n gg = Graph()\n # list_neighbors = []\n for parent, child in ancestors:\n gg.add_vertex(parent)\n gg.add_vertex(child)\n for parent, child in ancestors:\n gg.add_edge(child,parent)\n \n # hierarchy[level] = [starting_node]\n # list_neighbors = gg.get_neighbors(starting_node)\n # while list_neighbors is not None:\n # new_list = []\n # print(\"---- \",hierarchy)\n # level += 1 \n # for node in list_neighbors:\n # print(\"node ---- \",node)\n # if (gg.get_neighbors(node) != set()):\n # new_list.append(list(gg.get_neighbors(node)))\n # hierarchy[level] = new_list\n # list_neighbors = new_list\n # print(\"---- \",hierarchy)\n\n\n # print(gg.get_neighbors(2))\n # gg.get_neighbors(hierarchy[level])\n # ll = []\n # for next_vert in gg.get_neighbors(hierarchy[level]):\n # ll.append(next_vert)\n # hierarchy[level] = ll\n # print(hierarchy)\n\n # else:\n # if starting_vertex == gg.get_neighbors(hierarchy[level]):\n # return -1\n\n return gg.dft(starting_node)\n\n\nif __name__ == \"__main__\":\n gg = Graph()\n ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\n # ancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8)]\n print(earliest_ancestor(ancestors,9))\n # print(gg.vertices)\n # print(gg.dft(9))\n\n\n \n \n \n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":6141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"410891712","text":"#!/usr/bin/env python3\n\n\nfrom ev3dev.ev3 import (\n Motor, LargeMotor, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C,\n TouchSensor, ColorSensor, InfraredSensor, RemoteControl, INPUT_1, INPUT_3, INPUT_4\n)\n\nfrom threading import Thread\n\n\nclass Kraz33Hors3:\n def __init__(\n self,\n back_foot_motor_port: str = OUTPUT_C, front_foot_motor_port: str = OUTPUT_B,\n gear_motor_port: str = OUTPUT_A,\n touch_sensor_port: str = INPUT_1, color_sensor_port: str = INPUT_3,\n ir_sensor_port: str = INPUT_4, ir_beacon_channel: int = 1):\n self.front_foot_motor = LargeMotor(address=front_foot_motor_port)\n self.back_foot_motor = LargeMotor(address=back_foot_motor_port)\n\n self.gear_motor = MediumMotor(address=gear_motor_port)\n\n self.touch_sensor = TouchSensor(address=touch_sensor_port)\n self.color_sensor = ColorSensor(address=color_sensor_port)\n\n self.ir_sensor = InfraredSensor(address=ir_sensor_port)\n self.remote_control = RemoteControl(sensor=self.ir_sensor,\n channel=ir_beacon_channel)\n\n\n def drive_once_by_ir_beacon(\n self,\n speed: float = 1000 # deg/s\n ):\n # forward\n if self.remote_control.red_up and self.remote_control.blue_up:\n self.front_foot_motor.run_timed(\n speed_sp=speed,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.back_foot_motor.run_timed(\n speed_sp=-speed,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.front_foot_motor.wait_while(Motor.STATE_RUNNING)\n self.back_foot_motor.wait_while(Motor.STATE_RUNNING)\n \n # backward\n elif self.remote_control.red_down and self.remote_control.blue_down:\n self.front_foot_motor.run_timed(\n speed_sp=-speed,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.back_foot_motor.run_timed(\n speed_sp=speed,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.front_foot_motor.wait_while(Motor.STATE_RUNNING)\n self.back_foot_motor.wait_while(Motor.STATE_RUNNING)\n \n # move crazily\n elif self.remote_control.beacon:\n self.gear_motor.run_forever(speed_sp=speed)\n\n self.front_foot_motor.run_timed(\n speed_sp=speed / 3,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.back_foot_motor.run_timed(\n speed_sp=speed / 3,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.front_foot_motor.wait_while(Motor.STATE_RUNNING)\n self.back_foot_motor.wait_while(Motor.STATE_RUNNING)\n\n else:\n self.gear_motor.stop(stop_action=Motor.STOP_ACTION_COAST)\n \n def keep_driving_by_ir_beacon(\n self,\n speed: float = 1000 # deg/s\n ):\n while True: \n self.drive_once_by_ir_beacon(speed=speed)\n\n \n def back_whenever_touched(\n self,\n speed: float = 1000 # deg/s\n ):\n while True:\n if self.touch_sensor.is_pressed:\n self.front_foot_motor.run_timed(\n speed_sp=-speed,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST)\n self.back_foot_motor.run_timed(\n speed_sp=speed,\n time_sp=1000, # ms\n stop_action=Motor.STOP_ACTION_COAST) \n self.front_foot_motor.wait_while(Motor.STATE_RUNNING)\n self.back_foot_motor.wait_while(Motor.STATE_RUNNING)\n \n\n def main(self,\n speed: float = 1000 # deg/s\n ):\n Thread(target=self.back_whenever_touched).start()\n \n self.keep_driving_by_ir_beacon(speed=speed)\n\n\nif __name__ == '__main__':\n KRAZ33_HORS3 = Kraz33Hors3()\n \n KRAZ33_HORS3.main()\n","sub_path":"Computing-Platforms/EV3/LuongPham-Bots/Kraz33-Hors3/Kraz33-Hors3.EV3Dev1.Threading.py","file_name":"Kraz33-Hors3.EV3Dev1.Threading.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"7879901","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nfrom future import standard_library\n\nfrom nzbhydra import config\n\n#standard_library.install_aliases()\nfrom builtins import *\nimport calendar\nimport datetime\nimport email\nimport logging\nimport re\nimport time\nimport xml.etree.ElementTree as ET\nimport arrow\nfrom furl import furl\nimport requests\nimport concurrent\nfrom requests.exceptions import RequestException, HTTPError\nfrom nzbhydra.nzb_search_result import NzbSearchResult\nfrom nzbhydra.datestuff import now\nfrom nzbhydra import infos\nfrom nzbhydra.exceptions import IndexerAuthException, IndexerAccessException, IndexerResultParsingException\nfrom nzbhydra.search_module import SearchModule, IndexerProcessingResult\n\nlogger = logging.getLogger('root')\n\ncategories_to_newznab = {\n # Used to map sabnzbd categories to our categories. newznab results always return a general category and optionally a more specific one, for example 2000,2030. In that case we know it's an SD movie. \n # If it would return 2000,2010 (=foreign) we could still map it to ourt general movies category \n 'All': [],\n 'Movies': [2000],\n 'Movies HD': [2040, 2050, 2060],\n 'Movies SD': [2030],\n 'TV': [5000],\n 'TV SD': [5030],\n 'TV HD': [5040],\n 'Audio': [3000],\n 'Audio FLAC': [3040],\n 'Audio MP3': [3010],\n 'Audiobook': [3030],\n 'Console': [1000],\n 'PC': [4000],\n 'XXX': [6000],\n 'Other': [7000],\n 'Ebook': [7020, 8010]\n}\n\n\ndef get_age_from_pubdate(pubdate):\n timepub = datetime.datetime.fromtimestamp(email.utils.mktime_tz(email.utils.parsedate_tz(pubdate)))\n timenow = now()\n dt = timenow - timepub\n epoch = calendar.timegm(time.gmtime(email.utils.mktime_tz(email.utils.parsedate_tz(pubdate))))\n pubdate_utc = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(email.utils.mktime_tz(email.utils.parsedate_tz(pubdate))))\n age_days = int(dt.days)\n return epoch, pubdate_utc, int(age_days)\n\n\ndef map_category(category):\n # This is somewhat hack, will need to fix this later (or never)\n # We check if the category string looks like a typical newznab string (e.g. \"2030,2040\") and if yes just return it. If not we map it because it probably/hopefully came from us\n\n if category is None:\n return []\n catparts = category.split(\",\")\n try:\n cats = []\n for cat in catparts:\n intcat = int(cat)\n cats.append(intcat)\n return cats\n except ValueError:\n # Apparently no newznab category string\n # If we know this category we return a list of newznab categories\n if category in categories_to_newznab.keys():\n return categories_to_newznab[category]\n else:\n # If not we return an empty list so that we search in all categories\n return []\n\n\ndef check_auth(body, indexer):\n if ' 30:\n logger.info(\"%d%% wrong results, this indexer probably doesn't support %s\" % (percentWrong, idkey))\n return False, t\n logger.info(\"%d%% wrong results, this indexer probably supports %s\" % (percentWrong, idkey))\n\n return True, t\n\n\ndef checkCapsBruteForce(supportedTypes, toCheck, host, apikey):\n supportedIds = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=len(toCheck)) as executor:\n futures_to_ids = {executor.submit(_testId, host, apikey, x[\"t\"], x[\"id\"], x[\"key\"], x[\"expected\"]): x[\"id\"] for x in toCheck}\n for future in concurrent.futures.as_completed(futures_to_ids):\n id = futures_to_ids[future]\n try:\n supported, t = future.result()\n if supported:\n supportedIds.append(id)\n supportedTypes.append(t)\n except Exception as e:\n logger.error(\"An error occurred while trying to test the caps of host %s: %s\" % (host, e))\n raise IndexerResultParsingException(\"Unable to check caps: %s\" % str(e), None)\n return sorted(list(set(supportedIds))), sorted(list(set(supportedTypes)))\n\n\ndef check_caps(host, apikey, userAgent=None, timeout=None):\n toCheck = [\n {\"t\": \"tvsearch\",\n \"id\": \"tvdbid\",\n \"key\": \"121361\",\n \"expected\": \"Thrones\"\n },\n {\"t\": \"movie\",\n \"id\": \"imdbid\",\n \"key\": \"0848228\",\n \"expected\": \"Avengers\"\n },\n {\"t\": \"tvsearch\",\n \"id\": \"rid\",\n \"key\": \"24493\",\n \"expected\": \"Thrones\"\n },\n {\"t\": \"tvsearch\",\n \"id\": \"tvmazeid\",\n \"key\": \"82\",\n \"expected\": \"Thrones\"\n },\n {\"t\": \"tvsearch\",\n \"id\": \"traktid\",\n \"key\": \"1390\",\n \"expected\": \"Thrones\"\n },\n {\"t\": \"tvsearch\",\n \"id\": \"tmdbid\",\n \"key\": \"1399\",\n \"expected\": \"Thrones\"\n }\n\n ]\n supportedIds = []\n supportedTypes = []\n #Try to find out from caps first\n try:\n url = _build_base_url(host, apikey, \"caps\", None)\n headers = {\n 'User-Agent': userAgent if userAgent is not None else config.settings.searching.userAgent\n }\n logger.debug(\"Requesting %s\" % url)\n r = requests.get(url, verify=False, timeout=timeout if timeout is not None else config.settings.searching.timeout, headers=headers)\n r.raise_for_status()\n \n tree = ET.fromstring(r.content)\n searching = tree.find(\"searching\")\n doBruteForce = False\n if searching is not None:\n tvsearch = searching.find(\"tv-search\")\n if tvsearch is not None and tvsearch.attrib[\"available\"] == \"yes\":\n supportedTypes.append(\"tvsearch\")\n logger.debug(\"Found supported TV search\")\n if \"supportedParams\" in tvsearch.attrib:\n params = tvsearch.attrib[\"supportedParams\"]\n params = params.split(\",\")\n for x in [\"q\", \"season\", \"ep\"]:\n if x in params:\n params.remove(x)\n supportedIds.extend(params)\n logger.debug(\"Found supported TV IDs: %s\" % params)\n else:\n doBruteForce = True\n movie_search = searching.find(\"movie-search\")\n if movie_search is not None and movie_search.attrib[\"available\"] == \"yes\":\n supportedTypes.append(\"movie\")\n logger.debug(\"Found supported movie search\")\n if \"supportedParams\" in movie_search.attrib:\n params = movie_search.attrib[\"supportedParams\"]\n params = params.split(\",\")\n for x in [\"q\", \"genre\"]:\n if x in params:\n params.remove(x) \n supportedIds.extend(params)\n logger.debug(\"Found supported movie IDs: %s\" % params)\n else:\n doBruteForce = True\n book_search = searching.find(\"book-search\")\n if book_search is not None and book_search.attrib[\"available\"] == \"yes\":\n supportedTypes.append(\"movie\")\n logger.debug(\"Found supported book search\")\n \n can_handle = [y[\"id\"] for y in toCheck]\n supportedIds = [x for x in supportedIds if x in can_handle] #Only use those we can handle\n supportedIds = set(supportedIds) # Return a set because IMDB might be included for TV and movie search, for example\n \n if doBruteForce:\n logger.info(\"Unable to read supported params from caps. Will continue with brute force\")\n return checkCapsBruteForce(supportedTypes, toCheck, host, apikey)\n return sorted(list(set(supportedIds))), sorted(list(set(supportedTypes)))\n \n except HTTPError as e:\n logger.error(\"Error while trying to determine caps: %s\" % e)\n raise IndexerResultParsingException(\"Unable to check caps: %s\" % str(e), None)\n except Exception as e:\n logger.error(\"Error getting or parsing caps XML. Will continue with brute force. Error message: %s\" % e)\n return checkCapsBruteForce(supportedTypes, toCheck, host, apikey)\n \n\n\ndef _build_base_url(host, apikey, action, category, limit=None, offset=0):\n f = furl(host)\n f.path.add(\"api\")\n f.query.add({\"apikey\": apikey, \"extended\": 1, \"t\": action, \"offset\": offset})\n if limit is not None:\n f.query.add({\"limit\": limit})\n \n if category is not None:\n categories = map_category(category)\n if len(categories) > 0:\n f.query.add({\"cat\": \",\".join(str(x) for x in categories)})\n return f\n\n\nclass NewzNab(SearchModule):\n grouppattern = re.compile(r\"Group: ?([\\w\\.]+)
\")\n guidpattern = re.compile(r\"(.*/)?([a-zA-Z0-9@\\.]+)\")\n \n # todo feature: read caps from server on first run and store them in the config/database\n def __init__(self, settings):\n super(NewzNab, self).__init__(settings)\n self.settings = settings # Already done by super.__init__ but this way PyCharm knows the correct type\n self.module = \"newznab\"\n self.category_search = True\n self.supportedFilters = [\"maxage\"]\n self.supportsNot = True\n \n\n def build_base_url(self, action, category, offset=0):\n url = _build_base_url(self.settings.host, self.settings.apikey, action, category, self.limit, offset)\n if config.settings.searching.ignorePassworded:\n url.query.add({\"password\": \"0\"})\n return url\n\n def get_search_urls(self, search_request, search_type=\"search\"):\n f = self.build_base_url(search_type, search_request.category, offset=search_request.offset)\n query = search_request.query\n if query:\n query = self.addExcludedWords(query, search_request)\n f = f.add({\"q\": query})\n if search_request.maxage:\n f = f.add({\"maxage\": search_request.maxage})\n \n return [f.url]\n\n def addExcludedWords(self, query, search_request):\n if \"nzbgeek\" in self.settings.host and search_request.ignoreWords: # NZBGeek isn't newznab but sticks to its standards in most ways but not in this. Instead of adding a new search module just for this small part I added this small POC here\n query += \" --\" + \" \".join([x for x in search_request.ignoreWords if not (\" \" in x or \"-\" in x or \".\" in x)])\n else:\n for word in search_request.ignoreWords:\n if \" \" in word or \"-\" in word or \".\" in word:\n logger.debug('Not using ignored word \"%s\" in query because it contains a space, dash or dot which is not supported by newznab queries' % word)\n continue\n query += \" --\" + word\n return query\n\n def get_showsearch_urls(self, search_request):\n if search_request.category is None:\n search_request.category = \"TV\"\n\n url = self.build_base_url(\"tvsearch\", search_request.category, offset=search_request.offset)\n if search_request.identifier_key:\n canBeConverted, toType, id = infos.convertIdToAny(search_request.identifier_key, self.search_ids, search_request.identifier_value)\n if canBeConverted:\n search_request.identifier_key = toType.replace(\"tvrage\", \"rid\").replace(\"tvdb\", \"tvdbid\")\n search_request.identifier_value = id\n else:\n self.info(\"Unable to search using ID type %s\" % search_request.identifier_key)\n return []\n\n url.add({search_request.identifier_key: search_request.identifier_value})\n if search_request.episode:\n url.add({\"ep\": search_request.episode})\n if search_request.season:\n url.add({\"season\": search_request.season})\n if search_request.query:\n url.add({\"q\": search_request.query})\n\n return [url.url]\n\n def get_moviesearch_urls(self, search_request):\n if search_request.category is None:\n search_request.category = \"Movies\"\n \n #A lot of indexers seem to disregard the \"q\" parameter for \"movie\" search, so if we have a query use regular search instead \n if search_request.query:\n url = self.build_base_url(\"search\", search_request.category, offset=search_request.offset)\n url.add({\"q\": search_request.query})\n else:\n url = self.build_base_url(\"movie\", search_request.category, offset=search_request.offset)\n if search_request.identifier_key:\n canBeConverted, toType, id = infos.convertIdToAny(search_request.identifier_key, self.search_ids, search_request.identifier_value)\n if canBeConverted:\n search_request.identifier_key = toType.replace(\"tvrage\", \"rid\").replace(\"tvdb\", \"tvdbid\").replace(\"imdb\", \"imdbid\")\n search_request.identifier_value = id\n else:\n self.info(\"Unable to search using ID type %s\" % search_request.identifier_key)\n return []\n \n url.add({search_request.identifier_key: search_request.identifier_value})\n\n return [url.url]\n\n def get_ebook_urls(self, search_request):\n if not search_request.category:\n search_request.category = \"Ebook\"\n if search_request.author or search_request.title:\n if \"book\" in self.searchTypes:\n #API search\n url = self.build_base_url(\"book\", search_request.category, offset=search_request.offset)\n if search_request.author:\n url.add({\"author\": search_request.author})\n if search_request.title:\n url.add({\"title\": search_request.title})\n return [url.url]\n else:\n search_request.query = \"%s %s\" % (search_request.author if search_request.author else \"\", search_request.title if search_request.title else \"\")\n return self.get_search_urls(search_request)\n else:\n #internal search\n return self.get_search_urls(search_request)\n \n \n\n def get_audiobook_urls(self, search_request):\n if not search_request.category:\n search_request.category = \"Audiobook\"\n return self.get_search_urls(search_request)\n\n def get_details_link(self, guid):\n f = furl(self.settings.host)\n f.path.add(\"details\")\n f.path.add(guid)\n return f.url\n\n def get_entry_by_id(self, guid, title):\n url = furl(self.settings.host)\n url.path.add(\"api\")\n url.add({\"apikey\": self.settings.apikey, \"t\": \"details\", \"o\": \"xml\", \"id\": guid})\n\n response, papiaccess, _ = self.get_url_with_papi_access(url, \"nfo\")\n if response is None:\n return None\n try:\n tree = ET.fromstring(response.content)\n item = tree.find(\"channel\").find(\"item\")\n return self.parseItem(item)\n except ET.ParseError:\n self.error(\"Error parsing response for GUID %s\" % guid)\n return None\n \n \n\n def process_query_result(self, xml_response, searchRequest, maxResults=None):\n self.debug(\"Started processing results\")\n\n entries = []\n countRejected = 0\n\n try:\n tree = ET.fromstring(xml_response)\n except Exception:\n self.exception(\"Error parsing XML: %s...\" % xml_response[:500])\n raise IndexerResultParsingException(\"Error parsing XML\", self)\n for item in tree.find(\"channel\").findall(\"item\"):\n entry = self.parseItem(item)\n\n accepted, reason = self.accept_result(entry, searchRequest, self.supportedFilters)\n if accepted:\n entries.append(entry)\n else:\n countRejected += 1\n self.debug(\"Rejected search result. Reason: %s\" % reason)\n if maxResults is not None and len(entries) == maxResults:\n break\n\n response_total_offset = tree.find(\"./channel[1]/newznab:response\", {\"newznab\": \"http://www.newznab.com/DTD/2010/feeds/attributes/\"})\n if response_total_offset is None or response_total_offset.attrib[\"total\"] == \"\" or response_total_offset.attrib[\"offset\"] == \"\":\n self.warn(\"Indexer returned a result page without total results and offset. Shame! *rings bell*\")\n offset = 0\n total = len(entries)\n else:\n total = int(response_total_offset.attrib[\"total\"])\n offset = int(response_total_offset.attrib[\"offset\"])\n if total == 0 or len(entries) == 0:\n self.info(\"Query returned no results\")\n return IndexerProcessingResult(entries=entries, queries=[], total=0, total_known=True, has_more=False, rejected=0)\n\n return IndexerProcessingResult(entries=entries, queries=[], total=total, total_known=True, has_more=offset + len(entries) < total, rejected=countRejected)\n\n def parseItem(self, item):\n usenetdate = None\n entry = self.create_nzb_search_result()\n # These are the values that absolutely must be contained in the response\n entry.title = item.find(\"title\").text\n entry.link = item.find(\"link\").text\n entry.attributes = []\n entry.pubDate = item.find(\"pubDate\").text\n entry.indexerguid = item.find(\"guid\").text\n entry.has_nfo = NzbSearchResult.HAS_NFO_MAYBE\n m = self.guidpattern.search(entry.indexerguid)\n if m:\n entry.indexerguid = m.group(2)\n description = item.find(\"description\")\n if description is not None:\n description = description.text\n if description is not None and \"Group:\" in description: # DogNZB has the group in its description\n m = self.grouppattern.search(description)\n if m and m.group(1) != \"not available\":\n entry.group = m.group(1)\n categories = []\n for i in item.findall(\"./newznab:attr\", {\"newznab\": \"http://www.newznab.com/DTD/2010/feeds/attributes/\"}):\n attribute_name = i.attrib[\"name\"]\n attribute_value = i.attrib[\"value\"]\n if attribute_name == \"size\":\n entry.size = int(attribute_value)\n elif attribute_name == \"guid\":\n entry.indexerguid = attribute_value\n elif attribute_name == \"category\" and attribute_value != \"\":\n try:\n categories.append(int(attribute_value))\n except ValueError:\n self.error(\"Unable to parse category %s\" % attribute_value)\n elif attribute_name == \"poster\":\n entry.poster = attribute_value\n elif attribute_name == \"info\":\n entry.details_link = attribute_value\n elif attribute_name == \"password\" and attribute_value != \"0\":\n entry.passworded = True\n elif attribute_name == \"group\" and attribute_value != \"not available\":\n entry.group = attribute_value\n elif attribute_name == \"usenetdate\":\n usenetdate = arrow.get(attribute_value, 'ddd, DD MMM YYYY HH:mm:ss Z')\n # Store all the extra attributes, we will return them later for external apis\n entry.attributes.append({\"name\": attribute_name, \"value\": attribute_value})\n if entry.details_link is None:\n entry.details_link = self.get_details_link(entry.indexerguid)\n if usenetdate is None:\n # Not provided by attributes, use pubDate instead\n usenetdate = arrow.get(entry.pubDate, 'ddd, DD MMM YYYY HH:mm:ss Z')\n entry.epoch = usenetdate.timestamp\n entry.pubdate_utc = str(usenetdate)\n entry.age_days = (arrow.utcnow() - usenetdate).days\n entry.precise_date = True\n # Map category. Try to find the most specific category (like 2040), then the more general one (like 2000)\n categories = sorted(categories, reverse=True) # Sort to make the most specific category appear first\n if len(categories) > 0:\n for k, v in categories_to_newznab.items():\n for c in categories:\n if c in v:\n entry.category = k\n break\n return entry\n\n def check_auth(self, body):\n return check_auth(body, self)\n\n def get_nfo(self, guid):\n # try to get raw nfo. if it is xml the indexer doesn't actually return raw nfos (I'm looking at you, DOGNzb)\n url = furl(self.settings.host)\n url.path.add(\"api\")\n url.add({\"apikey\": self.settings.apikey, \"t\": \"getnfo\", \"o\": \"xml\", \"id\": guid, \"raw\": \"1\"})\n\n response, papiaccess, _ = self.get_url_with_papi_access(url, \"nfo\")\n if response is None:\n return False, None, \"Unable to access indexer\"\n\n nfo = response.content\n if \"\n#pprint(ratings_data.take(10))\n#pprint(\"------------------\")\n\n#Otteniamo i due RDD dai movies e l'RDD dei titoli passando come parametro lo SparkContext e il numero di partizioni con cui verrà splittao l'RDD\nmovies_data, movie_titles=ObtainRDD_Movies(sc,n_partitions)\n#Avremo tuple del tipo \n#pprint(movies_data.take(400))\n\n\n#RDD contenente per ogni movie il numero di rating associato. ReduceByKey è stato utilizzato per ridurre lo shuffle\nIDRatings = ratings_data.map(lambda row: (int(row[1]), 1)).reduceByKey(add)\n#pprint(IDRatings.takeOrdered(25, key=lambda x: x[1]))\n#pprint(IDRatings.take(10))\n\n\n\n#Per applicare il collaborative-filtering, è necessario creare un nuovo utente per ottenere le raccomandazioni\n#su nuovi film sulla base di interessi in comune con altri utenti\nuser_ratings, user_ID, list_user_ratings =DefNewUser(sc)\n#print(list_user_ratings)\n\n#{\n #Per fare in modo che l'RDD ottenuto dal ratings dell'utente faccia parte del dataset con gli altri rating,\n #utilizziamo union\n #final_ratings_RDD= ratings_data.union(user_ratings)\n\n #Adesso che l'RDD comprende anche i dati dell'utente, lo dividiamo in training set e test set\n #training_final, test_final=final_ratings_RDD.randomSplit([8,2], seed=42)\n #test_final_map=test_final.map(lambda x: (x[0], x[1]))\n#}\n\n#Nuova parte\n\ntraining_final, test_final=ratings_data.randomSplit([8,2], seed=42)\ntraining_userRatings,test_userRatings=user_ratings.randomSplit([5,5], seed=42)\n\nfinal_test=test_final.union(test_userRatings)\nfinal_training=training_final.union(training_userRatings)\n#Ora rialleniamo il modello con i nuovi rating e con i parametri di default\nnew_ratings_model = ALS.train(final_training, 20, seed=5, iterations=10, lambda_=0.1)\nGetRealAndPreds(new_ratings_model, final_test, 1)\n\n#Lo applichiamo una volta sui film che l'utente non ha visto e una volta su quelli che ha visto per fare i confronti e vedere se il modello sta\n#predicendo bene\n\n#Reccomandations\n#Prendiamo l'id dei movie che l'utente ha valutato\nnewUser_movies_rated= map(lambda x: x[1], list_user_ratings)\n\nnewUser_unrated_movies_RDD=sc.emptyRDD()\n#Filtriamo i film in un nuovo RDD che conterrà quei film non valutati dall'utente\nnewUser_unrated_movies_RDD=FilterMovie(newUser_unrated_movies_RDD, newUser_movies_rated, movies_data)\nnewUser_unrated_movies_RDD = newUser_unrated_movies_RDD.map(lambda x: (user_ID, x[0]))\n\n#Utiliziamo il modello allenato con i rating originali per predirre i nuovi ratings sui film non votati dall'utente\n#Questo RDD conterrà quindi le predizioni previste per il nuovo utente con tuple del tipo \nnewUser_moviePrediction_RDD = new_ratings_model.predictAll(newUser_unrated_movies_RDD)\n\n#Dal precedente RDD prendiamo solo l'ID del film e il suo rating\nnewUser_moviePrediction_rating_RDD = newUser_moviePrediction_RDD.map(lambda x: (x.product, x.rating))\n#pprint(newUser_moviePrediction_rating_RDD.take(3))\n\n#Eseguendo i join tra l'RDD precedente, quello dei titoli dei film e quello del conteggio dei ratings, otteniamo un RDD\n#con tuple del tipo ,countRatings>\nnewUser_join_recommendations_RDD = \\\n newUser_moviePrediction_rating_RDD.join(movie_titles).join(IDRatings)\n\n#Riorganizzazione delle tuple con un livello in meno. Avremo tuple del tipo \nnew_final_recommendations_RDD = \\\n newUser_join_recommendations_RDD.map(lambda r: (r[1][0][1], r[1][0][0], r[1][1]))\n#pprint(new_final_recommendations_RDD.take(4))\n\n#Prendiamo le prime 25 migliori raccomandazioni con 25 o più review in ordine decrescente\n#(-x[1], con x[1] che equivale al campo rating e - che indica l'ordinamento decrescente)\ntop_movies = new_final_recommendations_RDD.filter(lambda r: r[2]>=25).takeOrdered(25, key=lambda x: -x[1])\n\n\nprint('Top Movie Recommandation:\\n%s' % '\\n'.join(map(str, top_movies)))\nprint('---- %s seconds ----' % (time.time()-startTime))\n#Fine nuova parte\n\n#Altra nuova parte: modello sui film che l'utente ha visto. APPLICHIAMO LO STESSO MODELLO\nGetRealAndPreds(new_ratings_model, test_userRatings, 2)\n#\n\n\"\"\"\n#best_rank=GetBestRank(training_final, test_final_map, test_final)\n#Otteniamo l'errore del modello\nnew_ratings_model=GetErrorModel(training_final, test_final_map, test_final)\n\n#Ora rialleniamo il modello con i nuovi rating e con i parametri di default\n#new_ratings_model = ALS.train(final_ratings_RDD, 10, seed=42, iterations=10, lambda_=0.1)\n\n\n#Reccomandations\n#Prendiamo l'id dei movie che l'utente ha valutato\nnewUser_movies_rated= map(lambda x: x[1], list_user_ratings)\n\nnewUser_unrated_movies_RDD=sc.emptyRDD()\n#Filtriamo i film in un nuovo RDD che conterrà quei film non valutati dall'utente\nnewUser_unrated_movies_RDD=FilterMovie(newUser_unrated_movies_RDD, newUser_movies_rated, movies_data)\n\n\npprint(newUser_unrated_movies_RDD.take(18))\nnewUser_unrated_movies_RDD = newUser_unrated_movies_RDD.map(lambda x: (user_ID, x[0]))\npprint(newUser_unrated_movies_RDD.take(newUser_unrated_movies_RDD.count()))\n\n#Utiliziamo il modello allenato con i rating originali per predirre i nuovi ratings sui film non votati dall'utente\n#Questo RDD conterrà quindi le predizioni previste per il nuovo utente con tuple del tipo \nnewUser_moviePrediction_RDD = new_ratings_model.predictAll(newUser_unrated_movies_RDD)\n#newUser_unrated_movies_RDD)\n#pprint(newUser_moviePrediction_RDD.take(3))\n\n#Dal precedente RDD prendiamo solo l'ID del film e il suo rating\nnewUser_moviePrediction_rating_RDD = newUser_moviePrediction_RDD.map(lambda x: (x.product, x.rating))\n#pprint(newUser_moviePrediction_rating_RDD.take(3))\n\n#Eseguendo i join tra l'RDD precedente, quello dei titoli dei film e quello del conteggio dei ratings, otteniamo un RDD \n#con tuple del tipo ,countRatings>\nnewUser_join_recommendations_RDD = \\\n newUser_moviePrediction_rating_RDD.join(movie_titles).join(IDRatings)\n\n#Riorganizzazione delle tuple con un livello in meno. Avremo tuple del tipo \nnew_final_recommendations_RDD = \\\n newUser_join_recommendations_RDD.map(lambda r: (r[1][0][1], r[1][0][0], r[1][1]))\n#pprint(new_final_recommendations_RDD.take(4))\n\n#Prendiamo le prime 25 migliori raccomandazioni con 25 o più review in ordine decrescente \n#(-x[1], con x[1] che equivale al campo rating e - che indica l'ordinamento decrescente)\ntop_movies = new_final_recommendations_RDD.filter(lambda r: r[2]>=25).takeOrdered(25, key=lambda x: -x[1])\n\n\nprint('Top Movie Recommandation:\\n%s' % '\\n'.join(map(str, top_movies)))\nprint('---- %s seconds ----' % (time.time()-startTime))\n\n\"\"\"\n#Valutazione\n#testUserMovieAlreadyRaterd=\n\n\n#QUERY\n\n\n\n#Prendiamo il tempo corrente\n#startTime=time.time()\n\n#Definiamo un dataframe dagli RDD e una vista temporanea che è possibile utilizzare come tabella e che non persiste in memoria \n#Per salvarla in memoria sarebbe necessario utilizzare il metodo .cache() come fatto per gli RDD\n\"\"\"schemaMovies=sqlContext.createDataFrame(movies_data)\nschemaMovies.createOrReplaceTempView(\"movies\")\n\nschemaRatings=sqlContext.createDataFrame(ratings_data)\nschemaRatings.createOrReplaceTempView(\"ratings\")\n\n\n#pprint(\"-------------------------MOVIES WITH MYSTERY GENRES ----------------------------\")\nquery_misteryMovies=sqlContext.sql(\"SELECT movies._2 FROM movies WHERE movies._3 LIKE '%Mystery%'\")\n#pprint(query_misteryMovies.take(query_misteryMovies.count()))\n\n#print(\"-------------------------MOVIES of 2018----------------------------\")\n\nquery_misteryMovies=sqlContext.sql(\"SELECT movies._2 FROM movies WHERE movies._2 LIKE '%(2018)%' \")\n#pprint(query_misteryMovies.take(query_misteryMovies.count()))\n\n\n#pprint(\"-------------------------MOVIES BETWEEN 2016 AND 2018-----------------------------------\")\nquery_misteryMovies=sqlContext.sql(\"SELECT movies._2 FROM movies WHERE movies._2 LIKE '%(2016)%' OR movies._2 LIKE '%(2018)%' \")\n#pprint(query_misteryMovies.take(query_misteryMovies.count()))\n\n#pprint(\"-------------------------MOVIES WITH RATINGS 5-------------------------------\")\nquery_join=sqlContext.sql(\"SELECT DISTINCT movies._2 FROM movies JOIN ratings ON movies._1=ratings._2 WHERE ratings._3=5.0\")\n#pprint(query_join.take(query_join.count()))\n\n#pprint(\"--------------------MOVIES OF FANTASY AND ADVENTURE GENRES AND WITH RATINGS 5-------------------------------\")\nquery_join=sqlContext.sql(\"SELECT DISTINCT movies._2 FROM movies JOIN ratings ON movies._1=ratings._2 WHERE movies._3 LIKE '%Fantasy%' AND movies._3 LIKE '%Adventure%' AND ratings._3=5.0\")\n#pprint(query_join.take(query_join.count()))\n\n#pprint(\"--------------------MOVIES WITH RATINGS 1 OR 2 AND OF 1995-------------------------------\")\nquery_join=sqlContext.sql(\"SELECT movies._2 FROM movies JOIN ratings ON movies._1=ratings._2 WHERE ratings._3=1 OR ratings._3=2 AND movies._2 LIKE '%(1995)%'\")\n#pprint(query_join.take(query_join.count())\n\n#pprint(\"--------------------MOVIES AND RATINGS IN A DESCENDING ORDER-------------------------------\")\nquery_join=sqlContext.sql(\"SELECT DISTINCT movies._2, ratings._3 FROM movies JOIN ratings ON movies._1=ratings._2 ORDER BY ratings._3 DESC\")\n#pprint(query_join.take(query_join.count()))\"\"\"\n\n#tempo di esecuzione\n#print('---- %s seconds ----' % (time.time()-startTime)\n\n","sub_path":"FinalGit/files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"444164970","text":"#Access modes for files\n#They specify you want to read/write/both\nmyfile=open(\"textdoc.txt\",\"r\")\nmyfile.read()\n#this is for normal opening and reading\nmyfile=open(\"myfile.txt\",mode=\"w\",encoding=\"utf-8\")\n#now we mentioned mode as w means to write\n# r Open file for reading only. Starts reading from beginning of file. This default mode.\n# w Open file for writing only. File pointer placed at beginning of the file. Overwrites existing file and creates a new one if it does not exists.\nmyfile.write(\"Writing first line\\n\")\nmyfile.write(\"Writing second line\\n\")\nmyfile.write(\"Writing third line\\n\")\n# rb Open a file for reading only in binary format. Starts reading from beginning of file.\n# r+ Open file for reading and writing. File pointer placed at beginning of the file.\nmyfile.close()\n# wb Same as w but opens in binary mode.\n# w+ Same as w but also alows to read from file.\n# wb+ Same as wb but also alows to read from file.\n","sub_path":"readwriteclose.py","file_name":"readwriteclose.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"377807636","text":"from pathlib import Path\nimport string\n\ninput_list = Path(\"Day8Input.txt\").resolve().read_text()\n# print(repr(input_list))\n# print(input_list)\n\n# IT WORKS!\n\n# for full instructions, visit https://adventofcode.com/2020/day/8\n\n# currently input text is the test data:\n\n# nop +0\n# acc +1\n# jmp +4\n# acc +3\n# jmp -3\n# acc -99\n# acc +1\n# jmp -4\n# acc +6\n\n# value of the accumulator before the code repeats itself should be 5.\n\n# BEGIN PARSING\n\ndef data_parser(raw_list):\n\tsplit_lines = [line.split() for line in raw_list.splitlines()]\n\treturn split_lines\n\n\ndef make_master(data):\n\tfor line in data:\n\t\tlindex = data.index(line)\n\t\tline.append(lindex)\n\treturn data\n\n\n# def convert_to_dictionary(lst):\n# \tmain_dict = {}\n# \tfor line in lst:\n# \t\tprint(lst.index(line))\n# \t\tres_dct = {lst.index(line): [line[0], line[1]]}\n# \t\tmain_dict.update(res_dct)\n# \treturn main_dict\n\n\ndef executor(index, main_list):\n\t# given an index and a list, returns (index change, accumulator change)\n\top = main_list[index]\n\tif op[0] == 'nop':\n\t\treturn (1, 0)\n\tif op[0] == 'acc':\n\t\t# print(op[1], 'should be added once')\n\t\treturn (1, op[1])\n\tif op[0] == 'jmp':\n\t\treturn (op[1], 0)\n\n\ndef run_code(main_list):\n\tused_indexes = []\n\t(current_index, accumulator) = (0,7)\n\twhile not (current_index in used_indexes):\n\t\tused_indexes.append(current_index) # collection of indexes of operations we've already visited\n\t\t# print(used_indexes)\n\t\tcurrent_index += int(executor(current_index, main_list)[0])\n\t\t# print(current_index) #changing the index based on the current rule\n\t\tif current_index == 648:\n\t\t\tprint('Found it!')\n\t\t\tprint(accumulator)\n\t\t\treturn True\n\t\telif not(current_index in used_indexes): #checking if we're revisiting an operation\n\t\t\t# print(int(executor(current_index, main_list)[1]), 'add this')\n\t\t\taccumulator += int(executor(current_index, main_list)[1])\n\t\t\t# print(accumulator, 'is accumulated') #if not, then we do the operation and proceed\n\t\telse:\n\t\t\t# print(main_list[current_index], 'this is the end.')\n\t\t\treturn False #if we are, then the function ends and we get the accumulator value\n\n\ndef switch_op(op):\n\t# given an operation: if it's nop or jmp, switch. if not, return nothing\n\tif op[0] == \"nop\":\n\t\top[0] = \"jmp\"\n\t\treturn op\n\tif op[0] == \"jmp\":\n\t\top[0] = \"nop\"\n\t\treturn op\n\telse:\n\t\tprint('Error! Not a nop or jump')\n\n\ndef op_search(main_list):\n\tcurrent_index = 0\n\twhile not(run_code(main_list)):\n\t\tfor line in main_list:\n\t\t\tif line[0] == \"jmp\" or line[0] == \"nop\":\n\t\t\t\tline = switch_op(line)\n\t\t\t\tcurrent_index = line[2]\n\t\t\tif not(run_code(main_list)):\n\t\t\t\tif line[0] == \"jmp\" or line[0] == \"nop\":\n\t\t\t\t\tline = switch_op(line)\n\t\t\t\t\tcurrent_index = line[2]\n\n\n\nparsed_data = data_parser(input_list)\nmaster_list = make_master(parsed_data)\nop_search(master_list)\n# print(run_code(master_list))\n","sub_path":"Day8.HandheldHalting.py","file_name":"Day8.HandheldHalting.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"479588143","text":"\"\"\"\nBlind Source Separation for sparsely mixed signals based on Independent Vector Analysis (IVA) with Auxiliary Function\n\nJanský, Jakub & Koldovský, Zbyněk & Ono, Nobutaka. (2016). A computationally cheaper method for blind speech separation\n based on AuxIVA and incomplete demixing transform. 1-5. IWAENC2016\n\n\n\n2018 (c) Yaron Dibner & Virgile Hernicot, MIT License\n\"\"\"\nimport numpy as np\n\nfrom pyroomacoustics import stft, istft\nfrom .common import projection_back\nfrom scipy.linalg import dft\n\n# A few contrast functions\nf_contrasts = {\n 'norm': {'f': (lambda r, c, m: c * r), 'df': (lambda r, c, m: c)},\n 'cosh': {'f': (lambda r, c, m: m * np.log(np.cosh(c * r))), 'df': (lambda r, c, m: c * m * np.tanh(c * r))}\n}\n\n\ndef sparseauxiva(X, S, n_iter, proj_back=True, return_filters=False, lasso=True):\n \"\"\"\n\n :param X: STFT transform of the mixed signal from the mics\n :param S: Index set of frequency bins used in auxiva\n :param n_iter: Number of iteratios in auxiva\n :param proj_back: whether it performs the back projection or not\n :param return_filters: return filters\n :param lasso: bool indicating if lasso regularization is applied or not\n :return:\n \"\"\"\n n_frames, n_freq, n_chan = X.shape\n\n k_freq = S.shape[0]\n\n # default to determined case\n n_src = n_chan\n\n # initialize the demixing matrices\n W = np.array([np.eye(n_chan, n_src) for f in range(n_freq)], dtype=X.dtype)\n\n f_contrast = f_contrasts['norm']\n f_contrast_args = [1, 1]\n\n I = np.eye(n_src, n_src)\n Y = np.zeros((n_frames, n_freq, n_src), dtype=X.dtype)\n V = np.zeros((n_freq, n_src, n_chan, n_chan), dtype=X.dtype)\n r = np.zeros((n_frames, n_src))\n G_r = np.zeros((n_frames, n_src))\n\n\n for epoch in range(n_iter):\n\n demixsparse(Y, X, S, W)\n\n # simple loop as a start\n # shape: (n_frames, n_src)\n r[:, :] = np.sqrt(np.sum(np.abs(Y * np.conj(Y)), axis=1))\n\n # Apply derivative of contrast function\n G_r[:, :] = f_contrast['df'](r, *f_contrast_args) / r # shape (n_frames, n_src)\n\n # Compute Auxiliary Variable\n for f in range(k_freq):\n for s in range(n_src):\n V[S[f], s, :, :] = (np.dot(G_r[None, :, s] * X[:, S[f], :].T, np.conj(X[:, S[f], :]))) / X.shape[0]\n\n # Update now the demixing matrix\n for f in range(k_freq):\n for s in range(n_src):\n WV = np.dot(np.conj(W[S[f], :, :].T), V[S[f], s, :, :])\n W[S[f], :, s] = np.linalg.solve(WV, I[:, s])\n W[S[f], :, s] /= np.sqrt(np.inner(np.conj(W[S[f], :, s]), np.dot(V[S[f], s, :, :], W[S[f], :, s])))\n\n # print(\"Successfully computed the sparse weights, proceeding to lasso...\")\n\n np.set_printoptions(precision=2)\n\n # Check if LASSO regularization activated\n if lasso:\n Z = np.zeros((n_src, k_freq), dtype=W.dtype)\n G = np.zeros((n_src, n_freq, 1), dtype=Z.dtype)\n hrtf = np.zeros((n_freq, n_src), dtype=W.dtype)\n Hrtf = np.zeros((n_freq, n_src), dtype=W.dtype)\n\n for i in range(n_src):\n\n # sparse relative transfer function\n Z[i, :] = np.array([-W[S[f], 0, i] / W[S[f], 1, i] for f in range(k_freq)]).conj().T\n\n # mask frequencies Z with S and copy the result into G\n G[i, S] = (np.expand_dims(Z[i, :], axis=1))\n\n # solve LASSO in the time domain\n hrtf[:, i] = sparir(G[i, :], S)\n\n # convert transfer function from time domain to frequency domain\n Hrtf[:, i] = np.fft.fft(hrtf[:, i])\n W[:, :, i] = np.conj(np.insert(Hrtf[:, i, None], 1, -1, axis=1))\n\n # final demixing\n demixsparse(Y, X, np.array(range(n_freq)), W)\n\n if proj_back:\n z = projection_back(Y, X[:, :, 0])\n Y *= np.conj(z[None, :, :])\n\n if return_filters:\n return Y, W\n else:\n return Y\n\n \ndef demixsparse(Y, X, S, W):\n \"\"\"\n :param Y:\n :param X:\n :param S:\n :param W:\n :return:\n \"\"\"\n freq = S.shape[0]\n for f in range(freq):\n Y[:, S[f], :] = np.dot(X[:, S[f], :], np.conj(W[S[f], :, :]))\n\n\ndef sparir(G, S, delay=0, weights=np.array([]), gini=0):\n \"\"\"\n Natural-gradient estimation of the complete HRTF from a sparsely recovered HRTF based on\n Koldovský, Zbyněk & Nesta, Francesco & Tichavsky, Petr & Ono, Nobutaka. (2016). Frequency-domain blind speech\n separation using incomplete de-mixing transform. EUSIPCO.2016.\n :param G: sparse HRTF in the frequency domain\n :param S:\n :param delay:\n :param weights:\n :param gini:\n :return:\n g: an (n_frames, n_src) array. The reconstructed hrtf in the time domain\n \"\"\"\n L = G.shape[0] # n_freq\n\n y = np.concatenate((np.real(G[S]), np.imag(G[S])), axis=0)\n M = y.shape[0]\n\n if gini == 0: # if no initialization is given\n g = np.zeros((L, 1))\n g[delay] = 1\n else:\n g = gini\n\n if weights.size == 0:\n tau = np.sqrt(L) / (y.conj().T.dot(y))\n tau = tau * np.exp(0.11 * np.abs((np.arange(1., L + 1.).T - delay)) ** 0.3)\n tau = tau.T\n elif weights.shape[0] == 1:\n tau = np.ones((L, 1)) * weights\n else:\n tau = np.tile(weights.T, (1, 1)).reshape(L)\n\n def soft(x, T):\n if np.sum(np.abs(T).flatten()) == 0:\n u = x\n else:\n u = np.max(np.abs(x) - T, 0)\n u = u / (u + T) * x\n return u\n\n maxiter = 50\n alphamax = 1e5 # maximum step - length parameter alpha\n alphamin = 1e-7 # minimum step - length parameteralpha\n tol = 10\n\n aux = np.zeros((L, 1),dtype=complex)\n G = np.fft.fft(g.flatten())\n Ag = np.concatenate((np.real(G[S]), np.imag(G[S])), axis=0)\n r = Ag - y.flatten() # instead of r = A * g - y\n aux[S] = np.expand_dims(r[0:M // 2] + 1j * r[M // 2:], axis=1)\n gradq = L * np.fft.irfft(aux.flatten(), L) # instead of gradq = A'*r\n gradq = np.expand_dims(gradq, axis=1)\n alpha = 10\n support = g != 0\n iter_ = 0\n\n crit = np.zeros((maxiter, 1))\n\n criterion = -tau[support] * np.sign(g[support]) - gradq[support]\n crit[iter_] = np.sum(criterion ** 2)\n # print(\"iteration: \", iter_ + 1, \", criterion: \", crit[iter_])\n\n while (crit[iter_] > tol) and (iter_ < maxiter - 1):\n prev_r = r\n prev_g = g\n g = soft(prev_g - gradq * (1.0 / alpha), tau / alpha)\n dg = g - prev_g\n DG = np.fft.fft(dg.flatten())\n Adg = np.concatenate((np.real(DG[S]), np.imag(DG[S])), axis=0)\n r = prev_r + Adg.flatten() # faster than A * g - y\n dd = dg.flatten().conj().T @ dg.flatten()\n dGd = Adg.flatten().conj().T @ Adg.flatten()\n alpha = min(alphamax, max(alphamin, dGd / (np.finfo(np.float32).eps + dd)))\n iter_ = iter_ + 1\n support = g != 0\n aux[S] = np.expand_dims(r[0:M // 2] + 1j * r[M // 2:], axis=1)\n gradq = L * np.fft.irfft(aux.flatten(), L)\n gradq = np.expand_dims(gradq, axis=1)\n criterion = -tau[support] * np.sign(g[support]) - gradq[support]\n crit[iter_] = sum(criterion ** 2) + sum(abs(gradq[~support]) - tau[~support] > tol)\n # if iter_ % 100 == 0:\n # print(\"iteration: \", iter_+1, \", criterion: \", crit[iter_])\n\n # print('SpaRIR: {0} iterations done.'.format(iter_+1))\n\n return g.flatten()\n","sub_path":"pyroomacoustics/bss/sparseauxiva.py","file_name":"sparseauxiva.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"204424800","text":"class Contact:\n def __init__(self, name, phone_number,e_mail, addr):\n self.name= name\n self.phone_number = phone_number\n self.e_mail = e_mail\n self.addr = addr\n\n def print_info(self):\n print(\"Name : \" , self.name)\n print(\"Phone number : \", self.phone_number)\n print(\"E mail : \", self.e_mail)\n print(\"Address : \", self.addr)\ndef set_contact():\n name = input(\"Name : \")\n phone_number = input(\"Phone Number : \")\n e_mail = input(\"E-mail : \")\n addr = input(\"Address : \")\n contact = Contact(name,phone_number,e_mail,addr)\n return contact\ndef print_contact(contact_list):\n for contact in contact_list:\n contact.print_info()\ndef run() :\n contact_list = []\n contact = set_contact()\n contact_list.append(contact)\n print_contact(contact_list)\n\nrun()","sub_path":"Algorithm_trading/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"494426109","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport models\nfrom Data import *\nfrom util.misc_utils import move_to_cuda\n\nimport numpy as np\n\n\nclass SelectGate(nn.Module):\n\n def __init__(self, config):\n super(SelectGate, self).__init__()\n self.linear1 = nn.Linear(config.encoder_hidden_size * 2, config.encoder_hidden_size)\n self.linear2 = nn.Linear(config.encoder_hidden_size, 2)\n\n def forward(self, contexts):\n gates = F.softmax(self.linear2(F.relu((self.linear1(contexts)))), dim=-1)\n return gates\n\n\nclass PostSelectGate(nn.Module):\n\n def __init__(self, config):\n super(PostSelectGate, self).__init__()\n self.linear1 = nn.Linear(config.encoder_hidden_size * 4, config.encoder_hidden_size)\n self.linear2 = nn.Linear(config.encoder_hidden_size, 2)\n\n def forward(self, contexts, comment_context):\n comment_context = comment_context.unsqueeze(1).expand(-1, contexts.size(1), -1)\n gates = F.softmax(self.linear2(F.relu(self.linear1(torch.cat([contexts, comment_context], dim=-1)))), dim=-1)\n return gates\n\n\nclass GetUser(nn.Module):\n\n def __init__(self, config):\n super(GetUser, self).__init__()\n self.linear = nn.Linear(config.n_z, 10)\n self.use_emb = nn.Embedding(10, config.n_z)\n self.topic_id = -1\n\n def forward(self, latent_context, is_test=False):\n if not is_test:\n p_user = F.softmax(self.linear(latent_context), dim=-1) # bsz * 10\n h_user = (self.use_emb.weight.unsqueeze(0) * p_user.unsqueeze(-1)).sum(dim=1) # bsz * n_hidden\n selected_user = torch.argmax(p_user, dim=-1)\n else:\n if self.topic_id == -1:\n ids = torch.LongTensor(latent_context.size(0)).to(latent_context.device).random_(0, 10)\n else:\n ids = torch.LongTensor(latent_context.size(0)).to(latent_context.device).fill_(self.topic_id)\n h_user = self.use_emb(ids)\n selected_user = ids\n return h_user, selected_user\n\nclass var_select_var_user_diverse2seq_test(nn.Module):\n\n def __init__(self, config, vocab, use_cuda, use_content=False, pretrain=None):\n super(var_select_var_user_diverse2seq_test, self).__init__()\n self.vocab = vocab\n self.vocab_size = vocab.voc_size\n if pretrain is not None:\n self.embedding = pretrain['emb']\n else:\n self.embedding = nn.Embedding(self.vocab_size, config.emb_size)\n self.encoder = models.rnn_encoder(config, self.vocab_size, embedding=self.embedding)\n self.decoder = models.rnn_topic_decoder(config, self.vocab_size, embedding=self.embedding)\n self.config = config\n self.use_content = use_content\n self.criterion = models.criterion(self.vocab_size, use_cuda)\n self.log_softmax = nn.LogSoftmax(-1)\n self.tanh = nn.Tanh()\n\n # select gate\n self.title_encoder = models.rnn_encoder(config, self.vocab_size, embedding=self.embedding)\n self.select_gate = SelectGate(config)\n self.comment_encoder = models.rnn_encoder(config, self.vocab_size, embedding=self.embedding)\n self.hidden_to_mu = nn.Linear(2 * config.encoder_hidden_size, config.n_z)\n self.hidden_to_logvar = nn.Linear(2 * config.encoder_hidden_size, config.n_z)\n self.gama_kld = config.gama_kld\n\n self.select_post_gate = PostSelectGate(config)\n self.gama_kld_select = config.gama_select\n self.get_user = GetUser(config)\n\n def compute_loss(self, out_dict, targets):\n hidden_outputs = out_dict['outputs'].transpose(0, 1)\n word_loss = models.cross_entropy_loss(hidden_outputs, targets, self.criterion)\n\n # gate loss\n gate_loss = out_dict['l1_gates']\n\n # # match loss\n # pos_loss = torch.log(torch.sigmoid((out_dict['title_state'] * out_dict['comment_state']).sum(dim=-1)))\n # neg_losg = torch.log(torch.sigmoid((out_dict['title_state'] * torch.roll(out_dict['comment_state'], 1, dims=0)).sum(dim=-1)))\n # match_loss = - pos_loss + neg_losg\n\n # kld comment\n kld = out_dict['kld']\n\n # kld select\n kld_select = out_dict['kld_select']\n if self.config.min_select > 0:\n kld_select = torch.abs(kld_select - self.config.min_select)\n\n # rank and reg loss\n rank_loss = out_dict['rank']\n reg_loss = out_dict['reg']\n\n loss = word_loss[0] + self.config.gama1 * gate_loss + self.gama_kld * kld + self.gama_kld_select * kld_select \\\n + self.config.gama_rank * rank_loss + self.config.gama_reg * reg_loss\n return {\n 'loss': loss,\n 'word_loss': word_loss[0],\n 'acc': word_loss[1],\n 'gate_loss': gate_loss,\n 'kld_loss': kld,\n 'kld_select_loss': kld_select,\n 'rank': rank_loss,\n 'reg': reg_loss,\n 'pri_gates': out_dict['pri_gates'],\n 'user_norm': out_dict['user_norm'],\n 'selected_user': out_dict['selected_user'],\n }\n\n def encode(self, batch, is_test=False):\n src, src_len, src_mask = batch.title, batch.title_len, batch.title_mask\n # content, content_len, content_mask = batch.content, batch.cotent_len, batch.cotent_mask\n content, content_len, content_mask = batch.title_content, batch.title_content_len, batch.title_content_mask\n\n # input: title, content\n # title_contexts, title_state = self.title_encoder(src, src_len)\n # title_rep = title_state[0][-1] # bsz * n_hidden\n\n # encoder\n contexts, state = self.encoder(content, content_len)\n\n # select important information of body\n org_context_gates = self.select_gate(contexts) # output: bsz * n_context * 2\n context_gates = org_context_gates[:, :, 0] # bsz * n_context\n\n if not is_test:\n # comment encoder\n tgt, tgt_len = batch.tgt, batch.tgt_len\n _, comment_state = self.comment_encoder(tgt, tgt_len) # output: bsz * n_hidden\n comment_rep = comment_state[0][-1] # bsz * n_hidden\n\n # selector vae\n org_post_context_gates = self.select_post_gate(contexts, comment_rep)\n post_context_gates = gumbel_softmax(torch.log(org_post_context_gates + 1e-10), self.config.tau)\n post_context_gates = post_context_gates[:, :, 0] # bsz * n_context\n org_post_context_gates = org_post_context_gates[:, :, 0]\n\n # kl(p1||p2)\n def kldiv(p1, p2):\n kl = p1 * torch.log((p1 + 1e-10) / (p2 + 1e-10)) + (1 - p1) * torch.log((1 - p1 + 1e-10) / (1 - p2 + 1e-10))\n return kl\n\n kld_select = ((kldiv(org_post_context_gates, context_gates) * content_mask.float()).sum(dim=-1) / content_len.float()).mean()\n\n # comment vae\n mu = self.hidden_to_mu(comment_rep) # Get mean of lantent z\n logvar = self.hidden_to_logvar(comment_rep) # Get log variance of latent z\n\n z = torch.randn([comment_rep.size(0), self.config.n_z]).to(mu.device) # Noise sampled from Normal(0,1)\n z = mu + z * torch.exp(0.5 * logvar) # Reparameterization trick\n kld = -0.5 * torch.sum(logvar - mu.pow(2) - logvar.exp() + 1, 1).mean() # Compute KL divergence loss\n else:\n comment_rep = None\n z = torch.randn([contexts.size(0), self.config.n_z]).to(contexts.device)\n kld = 0.0\n kld_select = 0.0\n\n # random\n # context_gates = torch.bernoulli(context_gates)\n\n # gumbel\n # context_gates = gumbel_softmax(torch.log(org_context_gates + 1e-10), self.config.tau)\n # context_gates = context_gates[:, :, 0]\n\n # best\n context_gates[context_gates > 0.5] = 1.0\n context_gates[context_gates <= 0.5] = 0.0\n\n post_context_gates = context_gates\n\n return contexts, state, post_context_gates, z, kld, comment_rep, kld_select, context_gates\n\n def forward(self, batch, use_cuda):\n if use_cuda:\n batch = move_to_cuda(batch)\n contexts, state, post_context_gates, z, kld, comment_rep, kld_select, context_gates = self.encode(batch)\n\n content_len, content_mask = batch.title_content_len, batch.title_content_mask\n tgt, tgt_len = batch.tgt, batch.tgt_len\n\n # get user\n h_user, selected_user = self.get_user(z)\n h_user_neg = torch.roll(h_user, 1, dims=0)\n # user loss\n rank_loss = (1 - torch.sum(h_user*z, dim=-1) + torch.sum(h_user_neg*z, dim=-1)).clamp(min=0).mean()\n reg_loss = torch.mm(self.get_user.use_emb.weight, self.get_user.use_emb.weight.t()) - torch.eye(10, dtype=h_user.dtype, device=h_user.device)\n reg_loss = torch.norm(reg_loss)\n\n # decoder\n outputs, final_state, attns = self.decoder(tgt[:, :-1], state, contexts, post_context_gates, h_user)\n # return outputs, gates, title_state[0], comment_state[0]\n\n l1_gates = (post_context_gates * content_mask.float()).sum(dim=-1) / content_len.float()\n pri_gates = (context_gates * content_mask.float()).sum(dim=-1) / content_len.float()\n user_norm = torch.norm(self.get_user.use_emb.weight, 2, dim=1).mean()\n return {\n 'outputs': outputs,\n 'l1_gates': l1_gates.mean(),\n 'comment_state': comment_rep,\n 'kld': kld,\n 'kld_select': kld_select,\n 'rank': rank_loss,\n 'reg': reg_loss,\n 'pri_gates': pri_gates.mean(),\n 'user_norm': user_norm,\n 'selected_user': selected_user,\n }\n\n def sample(self, batch, use_cuda):\n if use_cuda:\n batch = move_to_cuda(batch)\n contexts, state, context_gates, z, _, _, _, _ = self.encode(batch, True)\n h_user = self.get_user(z, True)\n\n bos = torch.ones(contexts.size(0)).long().fill_(self.vocab.word2id('[START]'))\n bos = bos.to(contexts.device)\n sample_ids, final_outputs = self.decoder.sample([bos], state, contexts, context_gates, h_user)\n\n return sample_ids, final_outputs[1]\n\n # TODO: fix beam search\n def beam_sample(self, batch, use_cuda, beam_size=1, n_best=1):\n # (1) Run the encoder on the src. Done!!!!\n if use_cuda:\n batch = move_to_cuda(batch)\n contexts, enc_state, context_gates, z, _, _, _, _ = self.encode(batch, True)\n h_user = self.get_user(z, True)\n\n batch_size = contexts.size(0)\n beam = [models.Beam(beam_size, n_best=1, cuda=use_cuda)\n for _ in range(batch_size)]\n\n # (1b) Initialize for the decoder.\n def rvar(a):\n return a.repeat(1, beam_size, 1)\n\n def unbottle(m):\n return m.view(beam_size, batch_size, -1)\n\n # Repeat everything beam_size times.\n # (batch, seq, nh) -> (beam*batch, seq, nh)\n contexts = contexts.repeat(beam_size, 1, 1)\n context_gates = context_gates.repeat(beam_size, 1)\n h_user = h_user.repeat(beam_size, 1)\n # (batch, seq) -> (beam*batch, seq)\n # src_mask = src_mask.repeat(beam_size, 1)\n # assert contexts.size(0) == src_mask.size(0), (contexts.size(), src_mask.size())\n # assert contexts.size(1) == src_mask.size(1), (contexts.size(), src_mask.size())\n dec_state = (rvar(enc_state[0]), rvar(enc_state[1])) # layer, beam*batch, nh\n # decState.repeat_beam_size_times(beam_size)\n\n # (2) run the decoder to generate sentences, using beam search.\n for i in range(self.config.max_tgt_len):\n\n if all((b.done() for b in beam)):\n break\n\n # Construct beam*batch nxt words.\n # Get all the pending current beam words and arrange for forward.\n # beam is batch_sized, so stack on dimension 1 not 0\n inp = torch.stack([b.getCurrentState() for b in beam], 1).contiguous().view(-1)\n if use_cuda:\n inp = inp.cuda()\n\n # Run one step.\n output, dec_state, attn = self.decoder.sample_one(inp, dec_state, contexts, context_gates, h_user)\n # decOut: beam x rnn_size\n\n # (b) Compute a vector of batch*beam word scores.\n output = unbottle(self.log_softmax(output))\n attn = unbottle(attn)\n # beam x tgt_vocab\n\n # (c) Advance each beam.\n # update state\n for j, b in enumerate(beam): # there are batch size beams!!! so here enumerate over batch\n b.advance(output.data[:, j], attn.data[:, j]) # output is beam first\n b.beam_update(dec_state, j)\n\n # (3) Package everything up.\n allHyps, allScores, allAttn = [], [], []\n\n for j in range(batch_size):\n b = beam[j]\n scores, ks = b.sortFinished(minimum=n_best)\n hyps, attn = [], []\n for i, (times, k) in enumerate(ks[:n_best]):\n hyp, att = b.getHyp(times, k)\n hyps.append(hyp)\n attn.append(att.max(1)[1])\n allHyps.append(hyps)\n allScores.append(scores)\n allAttn.append(attn)\n\n # print(allHyps)\n # print(allAttn)\n return allHyps, allAttn\n\n\ndef sample_gumbel(shape, eps=1e-20):\n U = torch.rand(shape)\n return -torch.log(-torch.log(U + eps) + eps)\n\n\ndef gumbel_softmax_sample(logits, temperature):\n y = logits + sample_gumbel(logits.size()).to(device=logits.device, dtype=logits.dtype)\n return F.softmax(y / temperature, dim=-1)\n\n\ndef gumbel_softmax(logits, temperature, hard=False):\n \"\"\"\n input: [*, n_class]\n return: [*, n_class] an one-hot vector\n \"\"\"\n y = gumbel_softmax_sample(logits, temperature)\n\n if not hard:\n return y\n\n shape = y.size()\n _, ind = y.max(dim=-1)\n y_hard = torch.zeros_like(y).view(-1, shape[-1])\n y_hard.scatter_(1, ind.view(-1, 1), 1)\n y_hard = y_hard.view(*shape)\n # Set gradients w.r.t. y_hard gradients w.r.t. y\n return (y_hard - y).detach() + y","sub_path":"models/var_select_var_user_diverse2seq_test.py","file_name":"var_select_var_user_diverse2seq_test.py","file_ext":"py","file_size_in_byte":14146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"53943572","text":"from tkinter import *\n\nwindow = Tk()\n\n\ndef increrement_button():\n new_number = 1 + my_button.cget(\"text\")\n my_button.config(text=new_number)\n \n\nmy_button = Button(text=1, command=increrement_button)\nmy_button.pack()\n\nwindow.mainloop()\n","sub_path":"Chapter7/GUI_test/incriment_tk.py","file_name":"incriment_tk.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"590049674","text":"\"\"\" lolgather.py class\n\nThis class contains all the methods needed by the main script and by the lolaccount class\nto be able to gather league of legends data. It handles all API calls to the riot games api.\n\n\"\"\"\nimport json\nimport time\nfrom typing import Dict\nimport requests\nfrom .lolparser import LolParser\nfrom .lollogger import LolLogger\nfrom .lolconfig import LolConfig\n\n#pylint: disable=too-many-instance-attributes # This is okay.\nclass LolGather():\n \"\"\" Contains all the methods and functions needed by our other classes to return data from riot\n\n Attributes:\n base_summoner_url (str): riot games api base endpoint for summoner\n account_name_url (str): riot games api account name endpoint\n _base_match_url (str): riot games api base endpoint for match\n _matches_url (str): riot games api matches endpoint\n _match_url (str): riot games api individual match endpoint\n\n config (obj): Config Class that does all our config stuff.\n\n accounts (list: str): Holds a list of all accounts we collect data for\n match_id_list (list: str): Holds a list of games added this script run for logging\n \"\"\"\n\n def __init__(self, max_game_index=200):\n self.base_summoner_url = \"https://na1.api.riotgames.com/lol/summoner/v4/\"\n self.account_name_url = \"summoners/by-name/\"\n self._base_match_url = \"https://na1.api.riotgames.com/lol/match/v4/\"\n self._matches_url = \"matchlists/by-account/\"\n self._match_url = \"matches/\"\n self.max_game_index = max_game_index\n\n self.lolparser = LolParser()\n self.config = LolConfig()\n self.accounts = self.lolparser.get_summoner_names()\n self.new_match_data: Dict[int, Dict] = {}\n self.match_id_list = \"\"\n self.logger = LolLogger(self.config.log_file_name)\n\n def get_match_reference_dto(self, account_id: str) -> list:\n \"\"\" Gets an individual account's recently played match ids.\n\n Args:\n account_id: The account id associated with our account\n\n Returns:\n A list containing MatchReferenceDto objects from riot games.\n\n \"\"\"\n game_index = 0\n player_matches = []\n\n # keeps looping until we get to the max_game_index\n # a higher max game index makes us check further back in time.\n while game_index < self.max_game_index:\n try:\n player_matches_response = requests.get(''.join([self._base_match_url,\\\n self._matches_url, account_id, \"?beginIndex=\", str(game_index),\\\n \"&endIndex=\", str(game_index+100),\\\n \"&api_key=\", self.config.api_key]))\n\n player_matches_response.raise_for_status()\n player_matches_response_dict = json.loads(player_matches_response.text)\n\n if not player_matches_response_dict['matches']:\n break\n\n player_matches.append(player_matches_response_dict)\n game_index += 100\n except requests.exceptions.RequestException as exc:\n self.logger.log_critical(\"Get_account_info broke\")\n if exc.response.status_code == 403:\n self.logger.log_critical(\"Api key is probably expired\")\n elif exc.response.status_code == 429:\n self.logger.log_warning(\"Well that's an unfortunate timeout.\")\n time.sleep(10)\n else:\n self.logger.log_critical(exc)\n\n time.sleep(.1)\n\n return player_matches\n\n def get_match_data(self, match_id: int) -> str:\n \"\"\" Gets an individual matches data\n\n Args:\n match_id: The match id we're getting data for\n\n Returns:\n The text form of the json object we get from riot games,\n so that it can be stored in the json_data table.\n\n \"\"\"\n try:\n self.logger.log_info(''.join([\"getting match data for \", str(match_id)]))\n\n # add match_id to match list\n self.match_id_list = self.match_id_list + \" \" + str(match_id)\n\n time.sleep(.08) # this should keep us around the 20 per 1 second limit.\n\n matches_response = requests.get(''.join([self._base_match_url, self._match_url,\\\n str(match_id), \"?api_key=\", self.config.api_key]))\n\n matches_response.raise_for_status()\n match_json = json.loads(matches_response.text)\n\n self.new_match_data[match_id] = match_json\n\n return matches_response.text\n\n except requests.exceptions.RequestException as exc:\n self.logger.log_critical(exc)\n self.logger.log_warning(\"Get_match_data broke, trying again\")\n time.sleep(10)\n self.get_match_data(match_id)\n\n return \"\"\n\n def get_account_id(self, account_name: str) -> str:\n \"\"\" Hits the riot API and gets our account_id based on account_name\n\n Args:\n account_name: the account name we're getting the account_id for\n\n Returns:\n The account_id associated with this account from riot\n \"\"\"\n try:\n account_response = requests.get(''.join([self.base_summoner_url,\\\n self.account_name_url, account_name, \"?api_key=\", self.config.api_key]))\n account_response.raise_for_status()\n account_data = json.loads(account_response.text)\n return account_data['accountId']\n except requests.exceptions.RequestException as exc:\n if exc.response.status_code == 403:\n self.logger.log_critical(\"Api key is probably expired\")\n\n self.logger.log_critical(\"get_user_id broke\")\n\n return \"\"\n\n @staticmethod\n def get_unstored_match_ids(prev_matches: list, new_matches: list,\\\n match_types: list) -> list:\n \"\"\" Compares a set of previous match ids with the data we return from riot to determine\n which matches we will need to get data for.\n\n Args:\n prev_matches: the list of matches we already have data for.\n new_matches: A list containing data objects that include recent game ids.\n match_types: A list of the match types to include in the comparison.\n\n Returns:\n A list of match ids a player was in, but that we don't have stored yet.\n \"\"\"\n\n unstored_match_ids = []\n\n for page in new_matches:\n for match in page['matches']:\n if match['queue'] in match_types:\n if match['gameId'] not in prev_matches:\n unstored_match_ids.append(match['gameId'])\n\n return unstored_match_ids\n","sub_path":"resources/python/classes/lolgather.py","file_name":"lolgather.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"511990215","text":"import csv\nimport json\nimport uuid\nfrom urllib.parse import urlparse, quote, parse_qs\n\nimport pytest\nfrom flask import url_for\nfrom bs4 import BeautifulSoup\n\nfrom app import utils\nfrom io import StringIO\nfrom app.main.views.jobs import get_time_left, get_status_filters\nfrom tests import notification_json\nfrom freezegun import freeze_time\n\n\ndef _csv_notifications(notifications_json):\n csvfile = StringIO()\n csvwriter = csv.writer(csvfile)\n from app import format_datetime_24h, format_notification_status\n csvwriter.writerow(['Row number', 'Recipient', 'Template', 'Type', 'Job', 'Status', 'Time'])\n\n for x in notifications_json:\n csvwriter.writerow([\n int(x['job_row_number']) + 2 if 'job_row_number' in x and x['job_row_number'] else '',\n x['to'],\n x['template']['name'],\n x['template']['template_type'],\n x['job']['original_file_name'] if x['job'] else '',\n format_notification_status(x['status'], x['template']['template_type']),\n format_datetime_24h(x['created_at'])\n ])\n return csvfile.getvalue()\n\n\ndef test_get_jobs_should_return_list_of_all_real_jobs(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_jobs,\n mocker,\n):\n response = logged_in_client.get(url_for('main.view_jobs', service_id=service_one['id']))\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string == 'Uploaded files'\n jobs = [x.text for x in page.tbody.find_all('a', {'class': 'file-list-filename'})]\n assert len(jobs) == 4\n\n\ndef test_get_jobs_shows_page_links(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_jobs,\n mocker,\n):\n response = logged_in_client.get(url_for('main.view_jobs', service_id=service_one['id']))\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert 'Next page' in page.find('li', {'class': 'next-page'}).text\n assert 'Previous page' in page.find('li', {'class': 'previous-page'}).text\n\n\n@pytest.mark.parametrize(\n \"status_argument, expected_api_call\", [\n (\n '',\n [\n 'created', 'pending', 'sending',\n 'delivered',\n 'failed', 'temporary-failure', 'permanent-failure', 'technical-failure',\n ]\n ),\n (\n 'sending',\n ['sending', 'created', 'pending']\n ),\n (\n 'delivered',\n ['delivered']\n ),\n (\n 'failed',\n ['failed', 'temporary-failure', 'permanent-failure', 'technical-failure']\n )\n ]\n)\n@freeze_time(\"2016-01-01 11:09:00.061258\")\ndef test_should_show_page_for_one_job(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_service_template,\n mock_get_job,\n mocker,\n mock_get_notifications,\n fake_uuid,\n status_argument,\n expected_api_call,\n):\n response = logged_in_client.get(url_for(\n 'main.view_job',\n service_id=service_one['id'],\n job_id=fake_uuid,\n status=status_argument\n ))\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.text.strip() == 'thisisatest.csv'\n assert page.find('div', {'class': 'sms-message-wrapper'}).text.strip() == (\n '{}: Template content with & entity'.format(service_one['name'])\n )\n assert ' '.join(page.find('tbody').find('tr').text.split()) == (\n '07123456789 Delivered 1 January at 11:10am'\n )\n assert page.find('div', {'data-key': 'notifications'})['data-resource'] == url_for(\n 'main.view_job_updates',\n service_id=service_one['id'],\n job_id=fake_uuid,\n status=status_argument,\n )\n csv_link = page.find('a', {'download': 'download'})\n assert csv_link['href'] == url_for(\n 'main.view_job_csv',\n service_id=service_one['id'],\n job_id=fake_uuid,\n status=status_argument\n )\n assert csv_link.text == 'Download this report'\n assert page.find('span', {'id': 'time-left'}).text == 'Data available for 7 days'\n mock_get_notifications.assert_called_with(\n service_one['id'],\n fake_uuid,\n status=expected_api_call\n )\n\n\ndef test_get_jobs_should_tell_user_if_more_than_one_page(\n logged_in_client,\n fake_uuid,\n service_one,\n mock_get_job,\n mock_get_service_template,\n mock_get_notifications_with_previous_next,\n):\n response = logged_in_client.get(url_for(\n 'main.view_job',\n service_id=service_one['id'],\n job_id=fake_uuid,\n status=''\n ))\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.find('p', {'class': 'table-show-more-link'}).text.strip() == 'Only showing the first 50 rows'\n\n\ndef test_should_show_job_in_progress(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_service_template,\n mock_get_job_in_progress,\n mocker,\n mock_get_notifications,\n fake_uuid,\n):\n\n response = logged_in_client.get(url_for(\n 'main.view_job',\n service_id=service_one['id'],\n job_id=fake_uuid\n ))\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.find('p', {'class': 'hint'}).text.strip() == 'Report is 50% complete…'\n\n\n@freeze_time(\"2016-01-01T00:00:00.061258\")\ndef test_should_show_scheduled_job(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_service_template,\n mock_get_scheduled_job,\n mocker,\n mock_get_notifications,\n fake_uuid,\n):\n response = logged_in_client.get(url_for(\n 'main.view_job',\n service_id=service_one['id'],\n job_id=fake_uuid\n ))\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.find('main').find_all('p')[1].text.strip() == 'Sending will start today at midnight'\n assert page.find('input', {'type': 'submit', 'value': 'Cancel sending'})\n\n\ndef test_should_cancel_job(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n fake_uuid,\n mocker,\n):\n mock_cancel = mocker.patch('app.main.jobs.job_api_client.cancel_job')\n response = logged_in_client.post(url_for(\n 'main.cancel_job',\n service_id=service_one['id'],\n job_id=fake_uuid\n ))\n\n mock_cancel.assert_called_once_with(service_one['id'], fake_uuid)\n assert response.status_code == 302\n assert response.location == url_for('main.service_dashboard', service_id=service_one['id'], _external=True)\n\n\ndef test_should_not_show_cancelled_job(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_cancelled_job,\n mocker,\n fake_uuid,\n):\n response = logged_in_client.get(url_for(\n 'main.view_job',\n service_id=service_one['id'],\n job_id=fake_uuid\n ))\n\n assert response.status_code == 404\n\n\ndef test_should_show_not_show_csv_download_in_tour(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_service_template,\n mock_get_job,\n mocker,\n mock_get_notifications,\n fake_uuid,\n):\n response = logged_in_client.get(url_for(\n 'main.view_job',\n service_id=service_one['id'],\n job_id=fake_uuid,\n help=3\n ))\n\n assert response.status_code == 200\n assert url_for(\n 'main.view_job_updates',\n service_id=service_one['id'],\n job_id=fake_uuid,\n status='',\n help=3\n ).replace('&', '&') in response.get_data(as_text=True)\n assert url_for(\n 'main.view_job_csv',\n service_id=service_one['id'],\n job_id=fake_uuid\n ) not in response.get_data(as_text=True)\n\n\n@freeze_time(\"2016-01-01 00:00:00.000001\")\ndef test_should_show_updates_for_one_job_as_json(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_notifications,\n mock_get_job,\n mocker,\n fake_uuid,\n):\n job_json = mock_get_job(service_one['id'], fake_uuid)['data']\n response = logged_in_client.get(url_for('main.view_job_updates', service_id=service_one['id'], job_id=fake_uuid))\n\n assert response.status_code == 200\n content = json.loads(response.get_data(as_text=True))\n assert 'sending' in content['counts']\n assert 'delivered' in content['counts']\n assert 'failed' in content['counts']\n assert 'Recipient' in content['notifications']\n assert '07123456789' in content['notifications']\n assert 'Status' in content['notifications']\n assert 'Delivered' in content['notifications']\n assert '12:01am' in content['notifications']\n assert 'Sent by Test User on 1 January at midnight' in content['status']\n\n\n@pytest.mark.parametrize(\n \"message_type,page_title\", [\n ('email', 'Emails'),\n ('sms', 'Text messages')\n ]\n)\n@pytest.mark.parametrize(\n \"status_argument, expected_api_call\", [\n (\n '',\n [\n 'created', 'pending', 'sending',\n 'delivered',\n 'failed', 'temporary-failure', 'permanent-failure', 'technical-failure',\n ]\n ),\n (\n 'sending',\n ['sending', 'created', 'pending']\n ),\n (\n 'delivered',\n ['delivered']\n ),\n (\n 'failed',\n ['failed', 'temporary-failure', 'permanent-failure', 'technical-failure']\n )\n ]\n)\n@pytest.mark.parametrize(\n \"page_argument, expected_page_argument\", [\n (1, 1),\n (22, 22),\n (None, 1)\n ]\n)\ndef test_can_show_notifications(\n logged_in_client,\n service_one,\n mock_get_notifications,\n mock_get_detailed_service,\n message_type,\n page_title,\n status_argument,\n expected_api_call,\n page_argument,\n expected_page_argument,\n):\n response = logged_in_client.get(url_for(\n 'main.view_notifications',\n service_id=service_one['id'],\n message_type=message_type,\n status=status_argument,\n page=page_argument))\n assert response.status_code == 200\n content = response.get_data(as_text=True)\n notifications = notification_json(service_one['id'])\n notification = notifications['notifications'][0]\n assert notification['to'] in content\n assert notification['status'] in content\n assert notification['template']['name'] in content\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page_title in page.h1.text.strip()\n\n path_to_json = page.find(\"div\", {'data-key': 'notifications'})['data-resource']\n\n url = urlparse(path_to_json)\n assert url.path == '/services/{}/notifications/{}.json'.format(service_one['id'], message_type)\n query_dict = parse_qs(url.query)\n if status_argument:\n assert query_dict['status'] == [status_argument]\n if expected_page_argument:\n assert query_dict['page'] == [str(expected_page_argument)]\n\n mock_get_notifications.assert_called_with(\n limit_days=7,\n page=expected_page_argument,\n service_id=service_one['id'],\n status=expected_api_call,\n template_type=[message_type]\n )\n\n json_response = logged_in_client.get(url_for(\n 'main.get_notifications_as_json',\n service_id=service_one['id'],\n message_type=message_type,\n status=status_argument\n ))\n json_content = json.loads(json_response.get_data(as_text=True))\n assert json_content.keys() == {'counts', 'notifications'}\n\n\ndef test_should_show_notifications_for_a_service_with_next_previous(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_notifications_with_previous_next,\n mock_get_detailed_service,\n mocker,\n):\n response = logged_in_client.get(url_for(\n 'main.view_notifications',\n service_id=service_one['id'],\n message_type='sms',\n page=2\n ))\n assert response.status_code == 200\n content = response.get_data(as_text=True)\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n next_page_link = page.find('a', {'rel': 'next'})\n prev_page_link = page.find('a', {'rel': 'previous'})\n assert (\n url_for('main.view_notifications', service_id=service_one['id'], message_type='sms', page=3) in\n next_page_link['href']\n )\n assert 'Next page' in next_page_link.text.strip()\n assert 'page 3' in next_page_link.text.strip()\n assert (\n url_for('main.view_notifications', service_id=service_one['id'], message_type='sms', page=1) in\n prev_page_link['href']\n )\n assert 'Previous page' in prev_page_link.text.strip()\n assert 'page 1' in prev_page_link.text.strip()\n\n\n@pytest.mark.parametrize(\n \"job_created_at, expected_message\", [\n (\"2016-01-10 11:09:00.000000+00:00\", \"Data available for 7 days\"),\n (\"2016-01-04 11:09:00.000000+00:00\", \"Data available for 1 day\"),\n (\"2016-01-03 11:09:00.000000+00:00\", \"Data available for 11 hours\"),\n (\"2016-01-02 23:59:59.000000+00:00\", \"Data no longer available\")\n ]\n)\n@freeze_time(\"2016-01-10 12:00:00.000000\")\ndef test_time_left(job_created_at, expected_message):\n assert get_time_left(job_created_at) == expected_message\n\n\nSTATISTICS = {\n 'sms': {\n 'requested': 6,\n 'failed': 2,\n 'delivered': 1\n }\n}\n\n\ndef test_get_status_filters_calculates_stats(client):\n ret = get_status_filters({'id': 'foo'}, 'sms', STATISTICS)\n\n assert {label: count for label, _option, _link, count in ret} == {\n 'total': 6,\n 'sending': 3,\n 'failed': 2,\n 'delivered': 1\n }\n\n\ndef test_get_status_filters_in_right_order(client):\n ret = get_status_filters({'id': 'foo'}, 'sms', STATISTICS)\n\n assert [label for label, _option, _link, _count in ret] == [\n 'total', 'sending', 'delivered', 'failed'\n ]\n\n\ndef test_get_status_filters_constructs_links(client):\n ret = get_status_filters({'id': 'foo'}, 'sms', STATISTICS)\n\n link = ret[0][2]\n assert link == '/services/foo/notifications/sms?status={}'.format(quote('sending,delivered,failed'))\n\n\ndef test_html_contains_notification_id(\n logged_in_client,\n service_one,\n active_user_with_permissions,\n mock_get_notifications,\n mock_get_detailed_service,\n mocker,\n):\n response = logged_in_client.get(url_for(\n 'main.view_notifications',\n service_id=service_one['id'],\n message_type='sms',\n status='')\n )\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n notifications = page.tbody.find_all('tr')\n for tr in notifications:\n assert uuid.UUID(tr.attrs['id'])\n","sub_path":"tests/app/main/views/test_jobs.py","file_name":"test_jobs.py","file_ext":"py","file_size_in_byte":14944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"219922089","text":"\"\"\"\r\nThis bot was created by Akhil Srivastava and Nicolas Buongiorno, students of Mr. Wong\r\n\"\"\"\r\nimport logging\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.DEBUG)\r\n\r\n# Help Data.\r\nfaq = {\r\n \"Nanotechnology\": \"What is Nanotechnology? What is the major goal for people in Nanotechnology? What is a person job in Nanotechnology? What kinds of jobs are available for people that are interested in Nanotechnology? How could Nanotechnology make a difference in the future?\",\r\n \"Nanorobotics\": \"What is Nanorobotics? Why should people be interested in Nanorobotics? What is the major goal for people in Nanorobotics? How would Nanorobotics make a difference in the future?\",\r\n \"Biorobotics\": \"What is Biorobotics? Why should people be interested in Biorobotics? What is the major goal for Biorobotics? How would Biorobotics make a difference in the future?\",\r\n \"Biomedical Engineering\": \"What is Biomedical Engineering? What are some notable examples of how Biomedical Engineering was utilized to assist people? How could Biomedical Engineering make a impact in the future?\",\r\n \"default\": \"The only topics are Nanotechnology, Nanorobotics, Biorobotics, and Biomedical Engineering\",\r\n \"elicit_slot\": \"What topcs do you want to talk about?\"\r\n}\r\n\r\n\r\n# Utility Functions.\r\n\r\n\r\ndef close_response(session_attributes, fulfillment_state, message):\r\n \"\"\"\r\n Ensures response to Lex is in a compatible format.\r\n\r\n For more info on the valid lambda response format read the AWS Docs page:\r\n https://docs.aws.amazon.com/lex/latest/dg/lambda-input-response-format.html\r\n\r\n :param session_attributes: intent_request['sessionAttributes'] pass-through\r\n :param fulfillment_state: fulfillmentState of the dialogAction typically Fulfilled or Failed.\r\n :param message: Properly formatted Dictionary for the message of the dialogAction.\r\n :return:\r\n \"\"\"\r\n\r\n response = {\r\n 'sessionAttributes': session_attributes,\r\n 'dialogAction': {\r\n 'type': 'Close',\r\n 'fulfillmentState': fulfillment_state,\r\n 'message': message\r\n }\r\n }\r\n\r\n return response\r\n\r\n\r\ndef elicit_slot_response(session_attributes, intent_name, slots, slot_to_elicit, message):\r\n \"\"\"\r\n Asks the user for information about a slot.\r\n\r\n If the user's slots don't validate, then send this response along with the\r\n slot that is missing to ask the user for more information.\r\n :param session_attributes: intent_request['sessionAttributes'] pass-through\r\n :param intent_name: intent_request['intentName'] pass-through\r\n :param slots: Current Slot state\r\n :param slot_to_elicit: Missing Slot state\r\n :param message: How to ask for more information.\r\n :return:\r\n \"\"\"\r\n return {\r\n 'sessionAttributes': session_attributes,\r\n 'dialogAction': {\r\n 'type': 'ElicitSlot',\r\n 'intentName': intent_name,\r\n 'slots': slots,\r\n 'slotToElicit': slot_to_elicit,\r\n 'message': message\r\n }\r\n }\r\n\r\n\r\n# Intents.\r\n\r\n\r\ndef get_help_intent(intent_request):\r\n \"\"\"\r\n This is where the Intents function logic gets defined.\r\n\r\n :param intent_request: Lambda Event\r\n :return:\r\n \"\"\"\r\n logger.debug(intent_request)\r\n topic_data = intent_request['currentIntent']['slots']\r\n\r\n # If the Topic slot is empty then we return an response to elicit the\r\n # information for the slot.\r\n if topic_data['Topic'] is None:\r\n return elicit_slot_response(\r\n session_attributes=intent_request['sessionAttributes'],\r\n intent_name=intent_request['currentIntent']['name'],\r\n slots=topic_data,\r\n slot_to_elicit=\"Topic\",\r\n message={\r\n 'contentType': 'PlainText',\r\n 'content': faq['elicit_slot']}\r\n )\r\n\r\n # If the Topic is filled but it isn't a topic that we have defined in our\r\n # faq dictionary then we need to inform the user and ask the user for\r\n # additional information.\r\n if topic_data['Topic'] not in faq:\r\n return elicit_slot_response(\r\n session_attributes=intent_request['sessionAttributes'],\r\n intent_name=intent_request['currentIntent']['name'],\r\n slots=topic_data,\r\n slot_to_elicit=\"Topic\",\r\n message={\r\n 'contentType': 'PlainText',\r\n 'content': faq['default']}\r\n )\r\n\r\n # By default we fulfill the request with info from the dictionary.\r\n return close_response(\r\n session_attributes=intent_request['sessionAttributes'],\r\n fulfillment_state='Fulfilled',\r\n message={\r\n 'contentType': 'PlainText',\r\n 'content': faq[topic_data['Topic']]\r\n })\r\n\r\n\r\n# Dispatch Handlers.\r\n\r\n\r\ndef dispatch(intent_request):\r\n \"\"\"\r\n Helper Intent Dispatcher.\r\n\r\n Wraps the Intent in helpful logging and exception handling. This way a\r\n baseline of debug information will be sent to the log.\r\n \"\"\"\r\n\r\n logger.debug(\r\n 'dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\r\n\r\n intent_name = intent_request['currentIntent']['name']\r\n\r\n # Dispatch to your bot's intent to you intent handlers.\r\n # Each Lex Intent will need to be added to this Intent Dispatcher.\r\n if intent_name == 'Topics':\r\n return get_help_intent(intent_request)\r\n\r\n raise Exception('Intent with name ' + intent_name + ' not supported')\r\n\r\n\r\ndef lambda_handler(event, context):\r\n \"\"\"\r\n Main Intent Handler\r\n\r\n Provides a single endpoint for multiple intents in a single AWS Lambda\r\n Function definition.\r\n \"\"\"\r\n\r\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\r\n\r\n return dispatch(event)\r\n","sub_path":"Biomedical_Engineering.py","file_name":"Biomedical_Engineering.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"348721546","text":"# -*- encoding: utf-8 -*-\nimport copy\nimport string\nimport urllib\nimport zlib\n\nimport brotli\nimport msgpack\nimport simplejson as json\n\nfrom common.utils import decider\nfrom common.utils import time_utils\nfrom common.utils import url_utils\nfrom common.utils.managed_datastructures import ManagedHashMap\nfrom common.utils.seo_utils import build_metric_tags\nfrom config import locale as locale_config\nfrom config.cache import KLP_FEED_CONN\nfrom config.cache import KLP_FEED_EXPIRE_TIME_SECONDS\nfrom config.cache import KLP_FEED_FORMAT\nfrom config.cache import KLP_FEED_PREFIX\nfrom config.cache import KLP_INTEREST_CONN\nfrom config.cache import KLP_INTEREST_PREFIX\nfrom config.cache import KLP_INTEREST_FORMAT\nfrom config.cache import KLP_INTEREST_EXPIRE_TIME_SECONDS\nfrom data_clients.memcache import create_key_gen\nfrom data_clients.memcache import MemcacheClient\nfrom data_clients import terrapin_thrift_client\nfrom logger import kafka_event\nfrom logger.statsd import stat_client\nfrom logger.statsd import opentsdb_client_v2\nfrom services.utils.barrier import BarrierAll\nfrom settings import DEBUG\nfrom settings import IS_EXTERNAL_DEV\nfrom webapp import metatags\nfrom webapp.resources.related_interests_resource import RelatedInterestsResource\nfrom webapp.utils import metatag_keywords\nfrom webapp.resources import seo_utils\nfrom webapp.resources import unauth_open_utils\nfrom webapp.resources.base_resource import APIStatus\nfrom webapp.resources.base_resource import BaseResource\nfrom webapp.resources.feed_resource import BaseFeedOfInterestsResource\nfrom webapp.resources.feed_resource import BaseFeedOfPinsResource\nfrom webapp.resources.node_resource_stub import NodeResourceStub\nfrom webapp.resources.pin_resource import PinResource\nfrom webapp.utils import app_interstitial\n\nINTEREST_FEED_RESOURCE_BATCH_EXPR = 'pin_join_manager_batch'\n\nklp_feed_cache = MemcacheClient(\n KLP_FEED_CONN,\n create_key_gen(KLP_FEED_PREFIX, KLP_FEED_FORMAT, force_hash=True),\n KLP_FEED_EXPIRE_TIME_SECONDS)\n\ninterest_cache = MemcacheClient(\n KLP_INTEREST_CONN,\n create_key_gen(KLP_INTEREST_PREFIX, KLP_INTEREST_FORMAT, force_hash=True),\n KLP_INTEREST_EXPIRE_TIME_SECONDS)\n\nSEO_FAKE_KLP = 'seo_fake_klp_log'\n\n\nclass InterestResource(BaseResource):\n field_sets = {\n 'default': (\n 'interest.background_color',\n 'interest.breadcrumbs',\n 'interest.canonical_term',\n 'interest.canonical_url',\n 'interest.id',\n 'interest.image_signature',\n 'interest.image_size',\n 'interest.image_source',\n 'interest.images[474x, orig]',\n 'interest.is_new',\n 'interest.is_followed',\n 'interest.is_interest',\n 'interest.is_klp_term',\n 'interest.key',\n 'interest.name',\n 'interest.type',\n 'interest.url_name',\n 'interest.has_related',\n 'interest.klp_has_related',\n 'interest.follower_count',\n 'interest.feed_update_time',\n 'interest.translation_urls',\n ),\n 'unauth_react': (\n 'interest.id',\n 'interest.name',\n 'interest.canonical_term',\n 'interest.canonical_url',\n 'interest.is_interest',\n 'interest.is_klp_term',\n 'interest.images[orig]',\n 'interest.url_name',\n 'interest.breadcrumbs',\n 'interest.translation_urls',\n )\n }\n\n ja_enabled_editorial_klps = ManagedHashMap('seo',\n 'editorial_klp_descriptions',\n 'Japanese Editorial KLP Descriptions',\n 'Descriptions for Enabled Japanese Editorial KLPs',\n use_config_backend=True)\n\n def _get_hreflang_data(self, data):\n \"\"\"Return a dict used to render the appropriate hreflang meta tags on the page\n\n (1) If a term is translated for a given language, use that term and the interest ID in the URL\n e.g. /explore/桜-913796643374/\n (2) If the translation is missing or identical to the original term, use the original URL\n e.g. /explore/cherry-blossoms/\n -- update: 2015-09-01\n (3) The (1) and (2) failed to consider the i18n interest. i18n interest should also point to\n /explore/{term}-{id} to align with the URL.\n \"\"\"\n hreflang_data = {}\n for locale, url in data.get('translation_urls').iteritems():\n hreflang_data[locale] = url\n\n if int(data.get('id')) == (data.get('canonical_term') or {}).get('id', 0):\n default_url = data.get('canonical_url')\n else:\n default_url = '/explore/%s/' % data.get('url_name')\n\n for locale in locale_config.LOCALE_TO_SUBDOMAIN.keys():\n if locale not in hreflang_data:\n hreflang_data[locale] = default_url\n hreflang_data['en-US'] = default_url\n return hreflang_data\n\n def _get_cache_settings(self):\n use_cache = ((decider.decide_experiment('klp_interest_cache') and\n self.context.experiments.v2_activate_experiment('interest_cache') != 'disabled') and\n self.context.is_full_page)\n skip_cache = (self.context.experiments.v2_activate_experiment('interest_cache') == 'invalidate' or\n decider.decide_experiment('klp_interest_cache_invalidate'))\n return use_cache, skip_cache\n\n def _get_cached_response(self, interest, field_set_key, skip_cache):\n cache_key = (interest, self.context.locale.lower(), field_set_key)\n if not skip_cache:\n cached_result = interest_cache.get(cache_key)\n if cached_result:\n return cache_key, msgpack.unpackb(cached_result, encoding=\"utf-8\")\n return cache_key, None\n\n def get(self):\n # If in the auth KLP search group v2 short circuit resource to avoid network call if we can parse interest_name\n main_module_name = self.options.get('main_module_name', '')\n in_auth_klp_search_v2 = self.context.experiments.v2_in_group('web_usm_auth_klp_refresh', ['enabled_search_v2'])\n eligible_for_klp_search_v2 = (not self.context.is_mobile_agent and\n main_module_name == 'InterestFeedPage' and in_auth_klp_search_v2)\n\n if eligible_for_klp_search_v2:\n interest_name = url_utils.parse_interest_name(self.options['interest'])\n if interest_name:\n return {'data': {'name': interest_name}}\n\n start_time_ms = time_utils.now_millis()\n if self.options['interest'] is not None:\n self.options['interest'] = self.options['interest'].lower()\n interest_id = url_utils.parse_interest_id(self.options['interest'])\n interest = interest_id if interest_id else self.options['interest']\n field_set_key = self.get_field_set_key()\n resp = None\n use_cache, skip_cache = self._get_cache_settings()\n\n if use_cache:\n cache_key, resp = self._get_cached_response(interest, field_set_key, skip_cache)\n\n if resp:\n data = resp.get('data')\n else:\n klp_start_time_ms = time_utils.now_millis()\n resp = self.request('/v3/klp/%s/' % interest, field_set_key=field_set_key)\n if resp.get('status') != APIStatus.SUCCESS:\n return resp\n klp_end_start_time_ms = time_utils.now_millis()\n\n opentsdb_client_v2.timing('denzel.resource.InterestResource.api.klp',\n klp_end_start_time_ms - klp_start_time_ms, sample_rate=1,\n tags=build_metric_tags(self.context))\n\n data = resp.get('data')\n\n # Use entire interest query to extract KLP if id is not a valid KLP id\n if interest_id and data and data['id'] == '0':\n interest = self.options['interest']\n origin_resp = self.request('/v3/klp/%s/' % interest,\n field_set_key=field_set_key,\n ignore_bookmark=True)\n if origin_resp.get('status') != APIStatus.SUCCESS:\n return origin_resp\n data = origin_resp.get('data')\n\n if self.context.is_mobile_agent and field_set_key != 'unauth_react':\n self.context.app_interstitial_data = app_interstitial.get_interest_data(data)\n\n # All the logic below that sets page-level redirects and meta tags should only execute\n # on /explore/ pages (if InterestResource is the main module's resource)\n # TODO (jean): Work with Web team on a better solution. This problem affects other resources too.\n if not self.context.visible_url.startswith('/explore/'):\n return resp\n\n # This could happen if the interest is a banned term\n if not data:\n resp['data'] = {'name': self.options['interest'].replace('-', ' ')}\n return resp\n\n canonical_term_id = (data.get('canonical_term') or {}).get('id', 0)\n\n # Set and 301 redirect to the canonical URL as appropriate.\n # (1) For terms with translations, the canonical URL for a locale (or subdomain)\n # will contain the translation for that locale.\n # Examples: /explore/asia-travel/ => /explore/asienreisen-924772690748/ for de\n # /explore/asia-travel/ => explore/アジア旅行-924772690748/ for ja\n # (2) Some terms canonicalize to other terms because they are duplicates,\n # such as mis-spellings or abbreviations.\n # In this case, we do not change the behavior for auth interests,\n # since the user may be following the interest.\n # Examples: /explore/httyd/ => /explore/how-to-train-your-dragon/ (interest, unauth only)\n # /explore/10th-doctor/ => /explore/tenth-doctor/ (interest, unauth only)\n # /explore/1-direction-cakes/ => /explore/one-direction-cakes/ (not interest, auth + unauth)\n # /explore/amish-bread/ => /explore/amish-bread-recipes/ (not interest, auth + unauth)\n if (canonical_term_id and\n # Non-English canonical interests have canonical_term_id = id\n # Example: /explore/大福/ (no redirect)\n data.get('id') and canonical_term_id != int(data.get('id')) and\n # Do not change interest URLs for auth users\n (not data.get('is_interest') or not self.context.is_authenticated)):\n canonical_resp = self.request('/v3/klp/%s/' % canonical_term_id,\n field_set_key=field_set_key,\n ignore_bookmark=True)\n data = canonical_resp.get('data') or {}\n resp['data'] = data\n\n is_debug_mode = False\n if self.context.request_debug and self.context.request_debug.get('deb_d'):\n is_debug_mode = True\n resp['data']['is_debug_mode'] = True\n\n # For unauth and 'no_gift_wrap=true' we won't show gift wrap. This is necessary for KLP Pipeline\n # when we use human eval for feed relevance.\n if not self.context.is_authenticated and (self.options.get('no_gift_wrap') == 'true' or is_debug_mode):\n resp['data']['no_gift_wrap'] = True\n\n # only cache results for terms in the dictionary\n is_dictionary_term = data.get('id') and data.get('id') != '0'\n if use_cache and is_dictionary_term:\n interest_cache.set(cache_key, msgpack.packb(resp, use_bin_type=True, encoding=\"utf-8\"))\n\n if (not self.context.is_authenticated and data.get('name')\n and string.capwords(data.get('name')) == data.get('name')):\n group = self.context.activate_seo_or_unauth_experiment('klp_title_format_change')\n if group and group.startswith('enabled'):\n data['name'] = string.capitalize(data.get('name').lower())\n\n determine_explore_redirect(data, self.options.get('interest'), self.context, resp)\n if self.context.redirect:\n return resp\n\n if not data.get('is_klp_term'):\n resp['data'] = {\n 'id': '0',\n 'name': data.get('name', ''),\n 'is_interest': data.get('is_interest', False),\n 'is_klp_term': data.get('is_klp_term', False),\n }\n stat_client.increment('seo.klp.non_klp_term', sample_rate=1)\n kafka_event.log_as_json(SEO_FAKE_KLP, {'name': data.get('name', \"\"), 'locale': self.context.locale})\n return resp\n\n end_time_ms = time_utils.now_millis()\n opentsdb_client_v2.timing('denzel.resource.InterestResource.get',\n end_time_ms - start_time_ms, sample_rate=0.05,\n tags=build_metric_tags(self.context))\n\n name = data.get('name')\n if self.ja_enabled_editorial_klps.contains(name):\n resp['data']['description'] = self.ja_enabled_editorial_klps.get(name)\n\n return resp\n\n def _get_interest_for_metadata(self, interest, field_set_key):\n response = None\n cache_hit = True\n start_time_ms = time_utils.now_millis()\n use_cache, skip_cache = self._get_cache_settings()\n if use_cache and not skip_cache:\n _, response = self._get_cached_response(interest, field_set_key, skip_cache)\n\n if response is None:\n cache_hit = False\n response = self.request(\n '/v3/klp/%s/' % interest,\n data={'fields': 'interest.name,interest.images[orig],interest.is_klp_term,interest.is_interest,' +\n 'interest.id,interest.translation_urls,interest.url_name,interest.canonical_term,' +\n 'interest.canonical_url'},\n ignore_bookmark=True)\n\n end_time_ms = time_utils.now_millis()\n opentsdb_client_v2.timing('denzel.resource.InterestResource.get_page_metadata.api',\n end_time_ms - start_time_ms, sample_rate=0.05,\n tags=build_metric_tags(self.context, {'cached': str(cache_hit)}))\n return response\n\n def get_page_metadata(self):\n start_time_ms = time_utils.now_millis()\n interest_id = url_utils.parse_interest_id(self.options['interest'])\n interest = interest_id if interest_id else self.options['interest']\n field_set_key = self.get_field_set_key()\n\n # Temporary fix for /explore/explore page until api would get rid of the v3/interests/explore/ endpoint.\n # TODO (vadim): follow up with Yan Sun after 9/30/15 if android still have a dependency on it.\n if interest == 'explore':\n return {'robots': 'noindex'}\n\n response = self._get_interest_for_metadata(interest, field_set_key)\n\n if response.get('status') == APIStatus.SUCCESS:\n interest_data = response.get('data', {})\n\n canonical_term_id = (interest_data.get('canonical_term') or {}).get('id', 0)\n if canonical_term_id and canonical_term_id != int(interest_data.get('id')):\n response = self._get_interest_for_metadata(str(canonical_term_id), field_set_key)\n if response.get('status') == APIStatus.SUCCESS:\n interest_data = response.get('data', {})\n\n # This could happen if the interest is a banned term\n if not interest_data:\n return {'robots': 'noindex'}\n\n metadata = metatags.get_interest_metadata(\n self.context.get('full_path'),\n interest_data.get('name'),\n interest_data.get('images', {}).get('orig', {}).get('url'))\n\n # We don't want non-dictionary terms to be indexed, even if we need\n # to continue to support certain pages that have already been\n # linked to in hashtag descriptions and followed by users\n # NOTE: this was originally is_seo, but we changed it when we\n # we removed \"seo\" from our client-visable source\n if not interest_data.get('is_klp_term'):\n metadata['robots'] = 'noindex'\n\n metatag_keywords.update_with_keywords(self, metadata, interest_id=interest_data.get('id'))\n\n metatags.update_title_text(metadata)\n metatags.log_page_title(self, metatags.PAGE_TYPE_INTEREST, metadata['title'], self.context['full_path'])\n\n metadata['hreflang_data'] = self._get_hreflang_data(interest_data)\n if self.context.is_bot == 'true':\n amp_klp_group = self.context.activate_seo_or_unauth_experiment('amp_klp')\n else:\n amp_klp_group = self.context.get_seo_experiment_group('seo_amp_klp')\n if not self.context['is_amp'] and amp_klp_group and amp_klp_group.startswith('enabled'):\n metadata['links'] = \\\n [('amphtml', self.context.get_canonical_absolute_url().replace('/explore/', '/amp/explore/', 1))]\n\n end_time_ms = time_utils.now_millis()\n opentsdb_client_v2.timing('denzel.resource.InterestResource.get_page_metadata',\n end_time_ms - start_time_ms, sample_rate=0.1,\n tags=build_metric_tags(self.context))\n return metadata\n\n\ndef determine_explore_redirect(data, interest, context, resp):\n if not data.get('is_klp_term'):\n if context.experiments.v2_activate_experiment('vase_carousel_explore_links') == 'holdout':\n stat_client.increment('seo.klp.redirect.search', sample_rate=1)\n context.redirect = '/search/?q=' + urllib.quote(interest.encode('utf-8'))\n return\n if int(data.get('id')) and context.is_full_page:\n context['canonical_url'] = data.get('canonical_url')\n if context['canonical_url'] != '/explore/%s/' % interest.replace(' ', '-'):\n options = []\n if 'nogw=true' in context['full_path']:\n options.append(\"nogw=true\")\n if options:\n context.redirect = \"%s?%s\" % (context['canonical_url'],\n \"&\".join(options))\n else:\n context.redirect = context['canonical_url']\n stat_client.increment('seo.klp.redirect.canonical', sample_rate=1)\n\n\nclass TopicResource(BaseResource):\n field_sets = {\n 'default': (\n 'interest.background_color',\n 'interest.canonical_term',\n 'interest.canonical_url',\n 'interest.id',\n 'interest.image_signature',\n 'interest.image_size',\n 'interest.image_source',\n 'interest.images[474x, orig]',\n 'interest.is_new',\n 'interest.is_followed',\n 'interest.is_interest',\n 'interest.is_klp_term',\n 'interest.key',\n 'interest.name',\n 'interest.type',\n 'interest.url_name',\n 'interest.has_related',\n 'interest.klp_has_related',\n 'interest.follower_count',\n 'interest.feed_update_time',\n )\n }\n\n def get(self):\n if self.options['interest'] is not None:\n self.options['interest'] = self.options['interest'].lower()\n interest_id = url_utils.parse_interest_id(self.options['interest'])\n interest = interest_id if interest_id else self.options['interest']\n\n # Temporary fix for /explore/explore page until api would get rid of the v3/interests/explore/ endpoint.\n # TODO (vadim): follow up with Yan Sun after 9/30/15 if android still have a dependency on it.\n if interest == 'explore':\n resp = {}\n resp['data'] = {'name': self.options['interest'].replace('-', ' ')}\n return resp\n\n field_set = 'default'\n\n resp = self.request('/v3/interests/%s/' % interest, field_set_key=field_set)\n if resp.get('status') != APIStatus.SUCCESS:\n return resp\n\n data = resp.get('data')\n if self.context.is_mobile_agent:\n self.context.app_interstitial_data = app_interstitial.get_interest_data(data)\n\n return resp\n\n\nSEARCH_BOOST_COUNTRIES = frozenset('US, FR, GB')\n\n\nclass BaseInterestsFeedResource(BaseResource):\n\n default_field_set_key = 'pins'\n field_sets = {\n 'pins': BaseFeedOfPinsResource.field_sets['interest_grid_item'],\n 'unauth_react': PinResource.field_sets['unauth_react_grid_item']\n }\n\n def get_field_set(self):\n \"\"\"Override\"\"\"\n fields = super(BaseInterestsFeedResource, self).get_field_set()\n field_set_key = self.get_field_set_key()\n\n if field_set_key == 'pins':\n fields += PinResource.experiments_additional_field_sets['did_it']\n\n return fields\n\n def _get_visual_data(self, data):\n start_time_ms = time_utils.now_millis()\n result = seo_utils.add_visual_data(self, data)\n opentsdb_client_v2.timing('denzel.resource.InterestsFeedResource.visualdata.get',\n time_utils.now_millis() - start_time_ms, sample_rate=1,\n tags=build_metric_tags(self.context))\n return result\n\n def _filter_duplicate_images(self, data):\n filtered_data = []\n images = set()\n for pin in data:\n if pin and pin.get('image_signature') and pin.get('image_signature') not in images:\n images.add(pin.get('image_signature'))\n filtered_data.append(pin)\n return filtered_data\n\n def _get_cached_klp_feed(self, interest_id, interest_key):\n start_time_ms = time_utils.now_millis()\n cache_hit = True\n page_size = self.options.get('page_size', self.default_page_size)\n bookmark = self.get_latest_bookmark()\n experiments = ':devapp:%d' % (DEBUG or IS_EXTERNAL_DEV) if (DEBUG or IS_EXTERNAL_DEV) else ''\n\n # Use running experiments from ngapi. Should be in sync with core/logic/seo_logic.py\n for seo_experiment in ('page_size_explore'):\n group = self.context.activate_seo_or_unauth_experiment(seo_experiment)\n\n # Do not use control in the cache key\n if group and not group.startswith('control'):\n experiments += ':%s=%s' % (seo_experiment, group)\n\n debug_data = {}\n if self.context.request_debug:\n debug_data.update(self.context.request_debug)\n\n field_set_key = self.get_field_set_key()\n vase_key = self.options.get('add_vase') and self.context.language in seo_utils.VISUAL_DESCRIPTION_LANGUAGES\n cache_key = (interest_id, self.context.locale.lower(), page_size,\n bookmark, 1 if vase_key else 0, field_set_key, experiments)\n\n locales_to_use_cache = ['en-us', 'de', 'fr', 'pt-br', 'en-gb']\n use_cache = (interest_id and\n decider.decide_experiment('klp_feed_cache') and\n self.context.experiments.v2_activate_experiment('klp_cache') != 'disabled' and\n self.context.locale.lower() in locales_to_use_cache)\n skip_cache = (self.context.experiments.v2_activate_experiment('klp_cache') == 'invalidate' or\n decider.decide_experiment('klp_feed_cache_invalidate'))\n result = klp_feed_cache.get(cache_key) if use_cache and not skip_cache else None\n\n if result:\n try:\n result = msgpack.unpackb(brotli.decompress(result), encoding=\"utf-8\")\n except brotli.error:\n result = json.loads(zlib.decompress(result))\n self.add_bookmark(result.get('bookmark', self.END_BOOKMARK))\n else:\n cache_hit = False\n result = self.request(\"/v3/klp/%s/feed/\" % interest_key, field_set_key=field_set_key, data=debug_data)\n if result and result.get('data') and result.get('status') == APIStatus.SUCCESS:\n data = self._filter_duplicate_images(result.get('data'))\n\n if data and data[0].get('id'):\n first_pin_id = data[0].get('id')\n else:\n first_pin_id = None\n if self.options.get('add_vase') and self.context.language in seo_utils.VISUAL_DESCRIPTION_LANGUAGES:\n self._get_visual_data(data)\n if use_cache and first_pin_id:\n klp_feed_cache.set(cache_key, brotli.compress(msgpack.packb(result, encoding=\"utf-8\"), quality=1))\n\n if result.get('data'):\n if (self.context.is_full_page and\n self.context.get_seo_experiment_group('klp_rich_snippet', skip_logging=True) != 'control_4'):\n seo_utils.show_rich_snippet_pin(self, result['data'], ('enabled_klp'))\n\n # discovery_debug expects a certain object structure, repackage\n newData = {}\n newData['results'] = result.get('data')\n\n if result.get('debug_data'):\n newData['debug'] = result.get('debug_data')\n\n result['data'] = newData\n if result.get('status') == APIStatus.SUCCESS:\n opentsdb_client_v2.timing('denzel.resource.InterestsFeedResource.get',\n time_utils.now_millis() - start_time_ms, sample_rate=0.05,\n tags=build_metric_tags(self.context, {'cached': str(cache_hit)}))\n return result\n\n def get(self):\n interest = self.options.get(\"interest\")\n if not interest:\n return self.response_error(\"Empty interest field\")\n\n interest_id = url_utils.parse_interest_id(interest)\n # pass a key derived from interest name when interest_id is missing (for non-dictionary terms)\n # if interest name is blank, pass '0' (the string), so the klp API call succeeds\n interest_key = (interest_id or\n self.options.get('interest_name', '').lower().replace(' ', '-') or\n '0')\n\n return self._get_cached_klp_feed(interest_id, interest_key)\n\n\nclass InterestsFeedResource(BaseInterestsFeedResource):\n def get(self):\n response = super(InterestsFeedResource, self).get()\n response_copy = copy.copy(response)\n data = response_copy.get('data')\n if data:\n response_copy['data'] = data.get('results')\n return response_copy\n\n\nclass TopicFeedResource(BaseResource):\n default_field_set_key = 'pins'\n field_sets = {\n 'pins': BaseFeedOfPinsResource.field_sets['interest_grid_item'],\n 'unauth_react': PinResource.field_sets['unauth_react_grid_item']\n }\n\n _RELATED_BOARDS_MODULE_NUMBER_BOARDS = 5\n\n def get_field_set(self):\n \"\"\"Override\"\"\"\n fields = super(TopicFeedResource, self).get_field_set()\n field_set_key = self.get_field_set_key()\n\n if field_set_key == 'pins':\n fields += PinResource.experiments_additional_field_sets['did_it']\n\n return fields\n\n def get(self):\n start_time_ms = time_utils.now_millis()\n interest = self.options.get(\"interest\")\n if not interest:\n return self.response_error(\"Empty interest field\")\n\n feed_type = self.options.get(\"feed_type\", 'prod')\n\n debug_on = False\n if self.context.request_debug:\n # Debug parameters used for interest feeds\n feed_type = self.context.request_debug.get('deb_feed')\n if self.context.request_debug.get('deb_d') == \"True\":\n debug_on = True\n response = self.request(\"/v3/interests/%s/feed/\" % interest,\n data={'feed_type': feed_type, 'debug_on': debug_on})\n\n if response.get('status') != APIStatus.SUCCESS:\n return response\n\n end_time_ms = time_utils.now_millis()\n opentsdb_client_v2.timing('denzel.resource.TopicFeedResource.get',\n end_time_ms - start_time_ms, sample_rate=1,\n tags=build_metric_tags(self.context))\n return response\n\n\nclass UserInterestsResource(BaseResource):\n default_field_set_key = 'grid_item'\n field_sets = {\n 'grid_item': BaseFeedOfInterestsResource.field_sets['grid_item'],\n # For internal/interests use only\n 'grid_item_internal': tuple(list(BaseFeedOfInterestsResource.field_sets['grid_item']) +\n ['interest.recommendation_source']),\n 'grid_item_nux': tuple(list(\n BaseFeedOfInterestsResource.field_sets['grid_item']) + ['interest.is_recommended', 'interest.log_data']),\n 'related_interest': (\n 'interest.name',\n # 'interest.has_related', waiting on API support\n 'interest.id',\n 'interest.type',\n 'interest.url_name'\n ),\n\n }\n\n\nclass UrlInterestsResource(BaseResource):\n default_field_set_key = 'grid_item'\n field_sets = {\n 'grid_item': BaseFeedOfInterestsResource.field_sets['grid_item'],\n }\n\n def get(self):\n url = self.options.get('url', 'http://www.google.com')\n sort = self.options.get('sort', True)\n return self.request('/vx/links/interests/', data={'link': url, 'sort': sort})\n\n\nclass KLPBarResource(BaseResource):\n default_field_set_key = 'annotations'\n field_sets = {\n 'annotations': (\n 'interest.id',\n 'interest.is_interest',\n 'interest.is_klp_term',\n 'interest.name',\n 'interest.key',\n 'interest.url_name'\n ),\n }\n KEYWORD_LIMIT = 8\n\n def _get_interest_info(self, interest_name):\n response = self.request(\n '/v3/klp/%s/' % interest_name,\n data={'fields': 'interest.id, interest.is_interest, interest.is_klp_term, '\n 'interest.klp_has_related, interest.url_name'},\n ignore_bookmark=True)\n if response.get('status') == APIStatus.SUCCESS:\n # response.get('data') can be None, e.g. for blacklisted terms\n return response.get('data') or {}\n return {}\n\n def _get_interest_url(self, interest_info):\n if interest_info.get('url_name'):\n return '/explore/' + interest_info['url_name'] + '/'\n return None\n\n def get(self):\n # TODO(sdapul): It breaks encapsulation to for this resource to know about\n # the module that called it. Instead, an option should be passed in explicitly.\n main_module_name, resource_options = self.get_main_module_and_options()\n main_module_name = self.options.get('main_module_name', 'InterestFeedPage')\n\n response = {}\n\n if main_module_name == 'InterestFeedPage':\n interest_name = self.options.get('main_module_interest')\n interest_id = url_utils.parse_interest_id(interest_name)\n interest = interest_id if interest_id else interest_name\n\n interest_response = self._get_interest_info(interest)\n\n is_klp_term = interest_response.get('is_klp_term')\n is_interest = interest_response.get('is_interest')\n klp_has_related = interest_response.get('klp_has_related')\n\n # Only show KLP bar on KLP pages, not interest pages\n if (is_klp_term and\n not is_interest and\n klp_has_related):\n response = self.request('/v3/interests/%s/related/' % interest_response.get('id'),\n field_set_key='annotations',\n data={'limit': self.KEYWORD_LIMIT})\n elif main_module_name == 'BoardPage':\n main_module_slug = resource_options.get('slug', self.options.get('main_module_slug'))\n main_module_username = resource_options.get('username', self.options.get('main_module_username'))\n\n slug = urllib.unquote(main_module_slug).decode('utf-8')\n response = self.request(\n '/v3/boards/%s/%s/interests/' % (main_module_username, slug),\n field_set_key='annotations',\n data={'interest_type': 'extended'})\n elif main_module_name == 'Closeup':\n response = self.request(\n '/v3/pins/%s/interests/' % self.options.get('main_module_id'),\n field_set_key='annotations',\n data={'interest_type': 'extended', 'limit': self.KEYWORD_LIMIT})\n\n if response.get('status') == APIStatus.SUCCESS:\n if response.get('data'):\n related_interests = response['data']\n response['data'] = {}\n response['data']['related_interests'] = related_interests\n stat_client.increment('event.seo.klp_bar.get_related_interests.%s.success'\n % main_module_name, sample_rate=0.001)\n else:\n stat_client.increment('event.seo.klp_bar.get_related_interests.%s.empty'\n % main_module_name, sample_rate=0.001)\n # Log when a KLP has no related terms shown so we can investigate\n # Only log for en-us. Related interests are often empty for non-en-us due to a shortage\n # of translated terms.\n if main_module_name == 'InterestFeedPage' and self.context.locale.lower() == 'en-us':\n return self.response_error(\"No related interests\")\n\n return response\n else:\n stat_client.increment('event.seo.klp_bar.get_related_interests.%s.failure'\n % main_module_name, sample_rate=0.001)\n\n return response\n\n\nclass InterestTypeaheadResource(BaseResource):\n field_sets = {\n 'default': (\n 'interest.id',\n 'interest.key',\n 'interest.name',\n 'interest.images[136x136]',\n 'interest.follower_count',\n 'interest.is_followed',\n ),\n 'blur': (\n 'interest.background_color',\n 'interest.id',\n 'interest.key',\n 'interest.name',\n 'interest.images[300x300(ir.24)]',\n 'interest.follower_count',\n 'interest.is_followed',\n )\n }\n\n def _transform_items(self, items):\n for item in items:\n item['label'] = item['name']\n if item['images'] and '136x136' in item['images'] and item['images']['136x136']:\n item['image'] = item['images']['136x136']['url']\n if item['images'] and '300x300(ir.24)' in item['images'] and item['images']['300x300(ir.24)']:\n item['image'] = item['images']['300x300(ir.24)']['url']\n item.pop('name', None)\n item.pop('images', None)\n\n def get(self):\n data = {\n 'query': self.options.get('term'),\n # active_only default to True as a product decision on Dec. 23, 2015\n 'active_only': self.options.get('active_only', True),\n }\n\n response = self.request('/v3/search/interests/', data=data)\n\n if response['status'] == APIStatus.SUCCESS:\n items = response['data']\n self._transform_items(items)\n response['data'] = {\n 'items': items\n }\n\n return response\n\n\nclass ReactKLPResource(BaseResource):\n\n def get(self):\n start_time_ms = time_utils.now_millis()\n interest_id = self.options['interest_id']\n is_interest = self.options.get('is_interest', False)\n resp = {'data': {}}\n\n page_size = self.options.get('page_size', 25)\n if self.context.is_mobile_agent:\n page_size = 8\n\n base_interests_feed_resource = BaseInterestsFeedResource(self.context,\n field_set_key='unauth_react',\n interest=interest_id, add_vase=True,\n interest_name=self.options.get('interest_name'),\n page_size=page_size)\n related_interests_resource = RelatedInterestsResource(self.context,\n field_set_key='unauth_react',\n interest_id=interest_id,\n limit=20,\n is_interest=is_interest)\n\n barrier = BarrierAll()\n barrier.add_task(base_interests_feed_resource.get)\n is_dictionary_term = interest_id and interest_id != '0'\n if is_dictionary_term:\n barrier.add_task(related_interests_resource.get)\n\n results = barrier.wait()\n\n for result in results:\n if result.get('status', APIStatus.SUCCESS) != APIStatus.SUCCESS:\n return result\n\n resp['data']['interest_feed'] = results[0].get('data', {}).get('results', {})\n resp['data']['search_debug_data'] = results[0].get('data', {}).get('debug', {})\n resp['data']['bookmarks'] = base_interests_feed_resource.get_latest_bookmark()\n resp['data']['related_interests'] = results[1].get('data', {}) if is_dictionary_term else []\n\n if self.options.get('check_is_open', False):\n # if rollout, add the category\n category = self._get_category(self.options.get('interest_id'))\n exp_group = unauth_open_utils.activate_exp(self, 'klp', category)\n resp['data']['us_open_group'] = exp_group\n resp['data']['is_open'] = exp_group in ['enabled', 'employees']\n\n end_time_ms = time_utils.now_millis()\n opentsdb_client_v2.timing('denzel.resource.ReactKLPResource.get',\n end_time_ms - start_time_ms, sample_rate=0.1,\n tags=build_metric_tags(self.context))\n return resp\n\n def _get_category(self, interest_id):\n if not interest_id:\n return\n\n category = None\n\n \"\"\"\n Returns klp category information stored in Terrapin\n format: '{\"repin_category\":\"ANIMALS\",\"term\":\"black cats\",\"impression_category\":\"ANIMALS\",\"lang\":0}'\n \"\"\"\n raw_category = terrapin_thrift_client.terrapin_service_client.single_get(\n ['main_a', 'main_e'],\n 'seo_klp_category',\n str(interest_id))\n\n if raw_category:\n try:\n category_json = json.loads(raw_category)\n category = category_json.get('impression_category', '').lower()\n if category:\n stat_client.increment('seo.klp.get_category.success', sample_rate=1)\n else:\n stat_client.increment('seo.klp.get_category.empty_category', sample_rate=1)\n except ValueError:\n stat_client.increment('seo.klp.get_category.json_error', sample_rate=1)\n else:\n stat_client.increment('seo.klp.get_category.empty_raw', sample_rate=1)\n\n return category\n\n\nclass PureReactKLPResource(BaseFeedOfPinsResource):\n def get(self):\n start_time_ms = time_utils.now_millis()\n interests_resource = InterestResource(self.context,\n field_set_key='unauth_react',\n interest=self.options.get('interest'),\n no_gift_wrap=self.options.get('no_gift_wrap'),\n main_module_name=self.options.get('main_module_name'),\n experiment=self.options.get('experiment'))\n interests_resource_result = interests_resource.get()\n interest_data = {}\n if interests_resource_result.get('status', None) == APIStatus.SUCCESS:\n if interests_resource_result.get('data'):\n interest_data = interests_resource_result.get('data')\n\n interest_id = interest_data.get('id')\n is_interest = interest_data.get('is_interest')\n interest_name = interest_data.get('name', '')\n is_debug_mode = interest_data.get('is_debug_mode', False)\n default_page_size = 125 if (self.context.is_bot == 'true' or is_debug_mode) else 25\n page_size = self.options.get('page_size', default_page_size)\n resp = {'data': {}}\n\n base_interests_feed_resource = BaseInterestsFeedResource(self.context,\n field_set_key='unauth_react',\n interest=interest_id, add_vase=True,\n interest_name=interest_name,\n page_size=page_size)\n inspired_wall_resource = NodeResourceStub('InspiredWallResource', self.context, {})\n related_interests_resource = RelatedInterestsResource(self.context,\n field_set_key='react',\n interest_id=interest_id,\n limit=20,\n is_interest=is_interest)\n\n barrier = BarrierAll()\n barrier.add_task(base_interests_feed_resource.get)\n barrier.add_task(inspired_wall_resource.get)\n is_dictionary_term = interest_id and interest_id != '0'\n if is_dictionary_term:\n barrier.add_task(related_interests_resource.get)\n\n results = barrier.wait()\n\n for result in results:\n if result.get('status', APIStatus.SUCCESS) != APIStatus.SUCCESS:\n return result\n\n # Make response cachable\n resp['http_status'] = 200\n resp['data']['interest_feed'] = results[0].get('data', {}).get('results', {})\n resp['data']['search_debug_data'] = results[0].get('data', {}).get('debug', {})\n resp['data']['bookmarks'] = base_interests_feed_resource.get_latest_bookmark()\n resp['data']['inspired_wall_story'] = results[1].get('data', {}).get('story', {})\n resp['data']['related_interests'] = results[2].get('data', {}) if is_dictionary_term else []\n resp['data']['interest_data'] = interest_data\n end_time_ms = time_utils.now_millis()\n opentsdb_client_v2.timing('denzel.resource.PureReactKLPResource.get',\n end_time_ms - start_time_ms, sample_rate=0.1,\n tags=build_metric_tags(self.context))\n return resp\n\n def get_page_metadata(self):\n interests_resource = InterestResource(self.context,\n field_set_key='unauth_react',\n interest=self.options.get('interest'),\n no_gift_wrap=self.options.get('no_gift_wrap'),\n main_module_name=self.options.get('main_module_name'),\n experiment=self.options.get('experiment'))\n return interests_resource.get_page_metadata()\n","sub_path":".git-history/b47f213-interests_resource.py","file_name":"b47f213-interests_resource.py","file_ext":"py","file_size_in_byte":43895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"41620606","text":"from django.core.cache import cache\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nimport requests\n# from dic.models import sign\n\nimport time\nimport random\nimport string\nimport hashlib\n\n\n# class Sign():\n# def __init__(self, jsapi_ticket, url):\n# self.ret = {\n# 'nonceStr': self.__create_nonce_str(),\n# 'jsapi_ticket': jsapi_ticket,\n# 'timestamp': self.__create_timestamp(),\n# 'url': url\n# }\n#\n# def __create_nonce_str(self):\n# return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))\n#\n# def __create_timestamp(self):\n# return int(time.time())\n#\n# def sign(self):\n# if 'signature' in self.ret.keys():\n# self.ret.pop('signature')\n# string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])\n# self.ret['signature'] = hashlib.sha1(string.encode()).hexdigest()\n# return self.ret\n\n\ndef get_sign(jsapi_ticket, url):\n \"\"\"\n\n :param jsapi_ticket:\n :param url:\n :return:\n \"\"\"\n\n create_nonce_str = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))\n create_timestamp = int(time.time())\n ret = {\n 'nonceStr': create_nonce_str,\n 'jsapi_ticket': jsapi_ticket,\n 'timestamp': create_timestamp,\n 'url': url\n }\n string1 = '&'.join(['%s=%s' % (key.lower(), ret[key]) for key in sorted(ret)])\n ret['signature'] = hashlib.sha1(string1.encode()).hexdigest()\n return ret\n\n\n# class AccessToken:\n# \"\"\"\n# access_token获取\n# \"\"\"\n# def __init__(self):\n#\n# self.url = 'https://api.weixin.qq.com/cgi-bin/token'\n# self.parm = {\n# \"grant_type\": \"client_credential\",\n# \"appid\": \"wx7cdc7e5be90319f4\",\n# \"secret\": \"ccb7b34bc4763d5dbc2ce5043c3032f8\",\n# }\n#\n# def get_access_token(self):\n#\n# wb_data = requests.get(self.url, params=self.parm)\n# data = wb_data.json()\n# if wb_data.status_code == 200:\n# cache.set('wechat_jsapi_access_token', data['access_token'], 7200)\n# return data['access_token']\n# else:\n# print(data['errmsg'])\n# return 0\n\n\ndef get_access_token():\n \"\"\"\n\n :return:\n \"\"\"\n\n url = 'https://api.weixin.qq.com/cgi-bin/token'\n parm = {\n \"grant_type\": \"client_credential\",\n \"appid\": \"wx7cdc7e5be90319f4\",\n \"secret\": \"ccb7b34bc4763d5dbc2ce5043c3032f8\",\n }\n wb_data = requests.get(url, params=parm)\n data = wb_data.json()\n if wb_data.status_code == 200:\n cache.set('wechat_jsapi_access_token', data['access_token'], 7200)\n return data['access_token']\n else:\n print(data['errmsg'])\n return '0'\n\n\n# class Ticket:\n# \"\"\"\n# jsapi_ticket获取\n# \"\"\"\n# def __init__(self, access_token):\n#\n# self.url = 'https://api.weixin.qq.com/cgi-bin/ticket/getticket'\n# self.parm = {\n# \"access_token\": access_token,\n# \"type\": \"jsapi\",\n# }\n#\n# def get_ticket(self):\n#\n# wb_data = requests.get(self.url, params=self.parm)\n# data = wb_data.json()\n# if wb_data.status_code == 200:\n# return data['ticket']\n# else:\n# return 0\n\n\ndef get_ticket(access_token):\n \"\"\"\n\n :param access_token:\n :return:\n \"\"\"\n\n url = 'https://api.weixin.qq.com/cgi-bin/ticket/getticket'\n parm = {\n \"access_token\": access_token,\n \"type\": \"jsapi\",\n }\n wb_data = requests.get(url, params=parm)\n data = wb_data.json()\n if wb_data.status_code == 200:\n cache.set('wechat_jsapi_ticket', data['ticket'], 7200)\n return data['ticket']\n else:\n return '0'\n\n\nclass WechatSignView(APIView):\n \"\"\"\n\n \"\"\"\n\n def get(self, request):\n url = request.GET.get('url')\n if url == None or len(url) < 1 :\n return Response(status=status.HTTP_406_NOT_ACCEPTABLE)\n\n accessToken = cache.get('wechat_jsapi_access_token')\n if accessToken == None:\n accessToken = get_access_token()\n if accessToken == '0':\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ticket = cache.get('wechat_jsapi_ticket')\n if ticket == None:\n ticket = get_ticket(accessToken)\n if ticket == '0':\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n ret = get_sign(ticket, url)\n return Response(ret, status=status.HTTP_200_OK)\n","sub_path":"sign/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"505454723","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"PAT\")\n\n\nprocess.load('FWCore/MessageService/MessageLogger_cfi')\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n\nprocess.options = cms.untracked.PSet(\n Rethrow = cms.untracked.vstring('ProductNotFound'),\n wantSummary = cms.untracked.bool(True)\n)\n# source\nprocess.source = cms.Source(\"PoolSource\", \n fileNames=cms.untracked.vstring( )\n \n)\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\n## Load additional processes\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\n## global tags:\nprocess.GlobalTag.globaltag = cms.string('GR_R_35X_V8B::All')\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load('Configuration.StandardSequences.Services_cff')\n\n\n################################################################################################\n### P r e p a r a t i o n o f t h e P A T O b j e c t s f r o m A O D ###\n################################################################################################\n\n## pat sequences to be loaded:\n#process.load(\"PhysicsTools.PFCandProducer.PF2PAT_cff\")\nprocess.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n#process.load(\"PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cff\")\n##\n#\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n## MET creation <=== WARNING: YOU MAY WANT TO MODIFY THIS PART OF THE CODE %%%%%%%%%%%%%\n## specify the names of the MET collections that you need here %%%%\n## #%%\n## if you don't specify anything the default MET is the raw Calo MET #%%\n## if you don't specify anything the default MET is the raw Calo MET #%%\nprocess.caloMET = process.patMETs.clone( #%%\n metSource = cms.InputTag(\"met\",\"\",\"RECO\"),\n addTrigMatch = cms.bool(False),\n addMuonCorrections = cms.bool(False),\n addGenMET = cms.bool(False),\n )\nprocess.tcMET = process.patMETs.clone( #%%\n metSource = cms.InputTag(\"tcMet\",\"\",\"RECO\"),\n addTrigMatch = cms.bool(False),\n addMuonCorrections = cms.bool(False),\n addGenMET = cms.bool(False),\n )\nprocess.pfMET = process.patMETs.clone( #%%\n metSource = cms.InputTag(\"pfMet\",\"\",\"RECO\"),\n addTrigMatch = cms.bool(False),\n addMuonCorrections = cms.bool(False),\n addGenMET = cms.bool(False),\n )\n## specify here what you want to have on the plots! <===== MET THAT YOU WANT ON THE PLOTS %%%%%%%\nmyMetCollection = 'caloMET'\nmyPfMetCollection = 'pfMET'\nmyTcMetCollection = 'tcMET'\n\n## specify here what you want to have on the plots! <===== MET THAT YOU WANT ON THE PLOTS %%%%%%%\n## myDesiredMetCollection = 'layer1RawCaloMETs'\n## modify the sequence of the MET creation: #%%\nprocess.makePatMETs = cms.Sequence(process.caloMET*process.tcMET*process.pfMET)\n\n## GF changed here: more than one pat done here\n\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n## modify the final pat sequence: keep only electrons + METS (muons are needed for met corrections)\nprocess.load(\"RecoEgamma.EgammaIsolationAlgos.egammaIsolationSequence_cff\")\n#process.patElectronIsolation = cms.Sequence(process.egammaIsolationSequence)\n\nprocess.patElectrons.isoDeposits = cms.PSet()\nprocess.patElectrons.userIsolation = cms.PSet()\nprocess.patElectrons.addElectronID = cms.bool(True)\nprocess.patElectrons.electronIDSources = cms.PSet(\n simpleEleId95relIso= cms.InputTag(\"simpleEleId95relIso\"),\n simpleEleId90relIso= cms.InputTag(\"simpleEleId90relIso\"),\n simpleEleId85relIso= cms.InputTag(\"simpleEleId85relIso\"),\n simpleEleId80relIso= cms.InputTag(\"simpleEleId80relIso\"),\n simpleEleId70relIso= cms.InputTag(\"simpleEleId70relIso\"),\n simpleEleId60relIso= cms.InputTag(\"simpleEleId60relIso\"),\n simpleEleId95cIso= cms.InputTag(\"simpleEleId95cIso\"),\n simpleEleId90cIso= cms.InputTag(\"simpleEleId90cIso\"),\n simpleEleId85cIso= cms.InputTag(\"simpleEleId85cIso\"),\n simpleEleId80cIso= cms.InputTag(\"simpleEleId80cIso\"),\n simpleEleId70cIso= cms.InputTag(\"simpleEleId70cIso\"),\n simpleEleId60cIso= cms.InputTag(\"simpleEleId60cIso\"), \n )\n##\nprocess.patElectrons.addGenMatch = cms.bool(False)\nprocess.patElectrons.embedGenMatch = cms.bool(False)\nprocess.patElectrons.usePV = cms.bool(False)\n##\nprocess.load(\"ElectroWeakAnalysis.WENu.simpleEleIdSequence_cff\")\nprocess.patElectronIDs = cms.Sequence(process.simpleEleIdSequence)\nprocess.makePatElectrons = cms.Sequence(process.patElectronIDs*process.patElectrons)\n# process.makePatMuons may be needed depending on how you calculate the MET\nprocess.makePatCandidates = cms.Sequence(process.makePatElectrons+process.makePatMETs)\nprocess.patDefaultSequence = cms.Sequence(process.makePatCandidates)\n\n\n##\n## ################################################################################\n##\n## generator level filter\n##\n## ################################################################################\n## conditions on different particles are OR-ed\nprocess.genFilter = cms.EDFilter(\"MCSingleParticleFilter\",\n Status = cms.untracked.vint32(1, 1),\n MinEta = cms.untracked.vdouble(-2.4, -2.4),\n MaxEta = cms.untracked.vdouble(2.4, 2.4),\n MinPt = cms.untracked.vdouble(15.0, 15.0),\n ParticleID = cms.untracked.vint32(11, -11) ## these are electron and positron\n )\n\n\n\n\n\n##\n## ################################################################################\n##\n## the filter to select the candidates from the data samples\n##\n##\n## WARNING: you may want to modify this item:\nHLT_process_name = \"HLT\" # \n# trigger path selection\n# HLT_path_name = \"HLT_Ele15_LW_L1R\"\nHLT_path_name = \"HLT_Photon10_L1R\"\n# trigger filter name\nHLT_filter_name = \"hltL1NonIsoHLTNonIsoSinglePhotonEt10HcalIsolFilter\"\n# HLT_filter_name = \"hltL1NonIsoHLTNonIsoSingleElectronLWEt15PixelMatchFilter\"\n#\nHLT_path_name_extra = \"HLT_Photon15_L1R\" #= \"HLT_Ele15_LW_L1R\" #\nHLT_filter_name_extra = \"hltL1NonIsoHLTNonIsoSinglePhotonEt15HcalIsolFilter\"\nprocess.z1legFilter=cms.EDFilter('WenuCandidateFilter',\n ### the input collections needed:\n electronCollectionTag = cms.untracked.InputTag(\"patElectrons\",\"\",\"PAT\"),\n metCollectionTag = cms.untracked.InputTag(myMetCollection,\"\",\"PAT\"),\n pfMetCollectionTag = cms.untracked.InputTag(myPfMetCollection,\"\",\"PAT\"),\n tcMetCollectionTag = cms.untracked.InputTag(myTcMetCollection,\"\",\"PAT\"),\n triggerCollectionTag = cms.untracked.InputTag(\"TriggerResults\",\"\",HLT_process_name),\n triggerEventTag = cms.untracked.InputTag(\"hltTriggerSummaryAOD\",\"\",HLT_process_name),\n hltpath = cms.untracked.string(HLT_path_name),\n hltpathFilter = cms.untracked.InputTag(HLT_filter_name,\"\",HLT_process_name),\n ebRecHits = cms.untracked.InputTag(\"reducedEcalRecHitsEB\"),\n eeRecHits = cms.untracked.InputTag(\"reducedEcalRecHitsEE\"),\n PrimaryVerticesCollection = cms.untracked.InputTag(\"offlinePrimaryVertices\"),\n ### here the preselection is applied\n # fiducial cuts:\n BarrelMaxEta = cms.untracked.double(1.4442),\n EndCapMinEta = cms.untracked.double(1.566),\n EndCapMaxEta = cms.untracked.double(2.5),\n # demand ecal driven electron:\n useEcalDrivenElectrons = cms.untracked.bool(True),\n # demand offline spike cleaning with the Swiss Cross criterion:\n useSpikeRejection = cms.untracked.bool(True),\n spikeCleaningSwissCrossCut = cms.untracked.double(0.95),\n # demand geometrically matched to an HLT object with ET>15GeV\n useTriggerInfo = cms.untracked.bool(False), # GF \n electronMatched2HLT = cms.untracked.bool(False), # GF \n electronMatched2HLT_DR = cms.untracked.double(0.2), # GF \n useHLTObjectETCut = cms.untracked.bool(False), # GF \n hltObjectETCut = cms.untracked.double(15.),\n useExtraTrigger = cms.untracked.bool(False), # GF \n hltpathExtra = cms.untracked.string(HLT_path_name_extra),\n hltpathFilterExtra = cms.untracked.InputTag(HLT_filter_name_extra,\"\",HLT_process_name),\n # ET Cut in the SC\n ETCut = cms.untracked.double(10.), # GF : mistakenly set to 20 in the first pass\n METCut = cms.untracked.double(0.),\n # reject events with a 2nd electron with ET > 20 that passes the WP95%\n vetoSecondElectronEvents = cms.untracked.bool(False),\n storeSecondElectron = cms.untracked.bool(False),\n ETCut2ndEle = cms.untracked.double(20.),\n vetoSecondElectronIDType = cms.untracked.string(\"simpleEleId80relIso\"), # GF\n vetoSecondElectronIDSign = cms.untracked.string(\"=\"),\n vetoSecondElectronIDValue = cms.untracked.double(7.),\n # Other parameters of the code - leave them as they are\n useValidFirstPXBHit = cms.untracked.bool(False),\n useConversionRejection = cms.untracked.bool(False),\n useExpectedMissingHits = cms.untracked.bool(False),\n maxNumberOfExpectedMissingHits = cms.untracked.int32(1),\n # calculate some new cuts\n calculateValidFirstPXBHit = cms.untracked.bool(True),\n calculateConversionRejection = cms.untracked.bool(True),\n calculateExpectedMissingHits = cms.untracked.bool(True),\n # we are dealing with DATA\n dataMagneticFieldSetUp = cms.untracked.bool(False),\n dcsTag = cms.untracked.InputTag(\"scalersRawToDigi\"),\n )\n\n# = cms.EDFilter('WenuCandidateFilter',\n# # cfg for data\n# dataMagneticFieldSetUp = cms.untracked.bool(False), # set to False in MC!\n# dcsTag = cms.untracked.InputTag(\"scalersRawToDigi\"),\n# # cuts\n# ETCut = cms.untracked.double(10.),\n# METCut = cms.untracked.double(0.),\n# # 2nd electron in W events\n# vetoSecondElectronEvents = cms.untracked.bool(False),\n# ETCut2ndEle = cms.untracked.double(20.),\n# vetoSecondElectronIDType = cms.untracked.string(\"simpleEleId80relIso\"),\n# vetoSecondElectronIDSign = cms.untracked.string(\"=\"),\n# vetoSecondElectronIDValue = cms.untracked.double(7.),\n# # trigger \n# useTriggerInfo = cms.untracked.bool(False),\n# triggerCollectionTag = cms.untracked.InputTag(\"TriggerResults\",\"\",HLT_process_name),\n# triggerEventTag = cms.untracked.InputTag(\"hltTriggerSummaryAOD\",\"\",HLT_process_name),\n# hltpath = cms.untracked.string(HLT_path_name), \n# hltpathFilter = cms.untracked.InputTag(HLT_filter_name,\"\",HLT_process_name),\n# electronMatched2HLT = cms.untracked.bool(False),\n# electronMatched2HLT_DR = cms.untracked.double(0.2),\n# # additional preselection cuts\n# useValidFirstPXBHit = cms.untracked.bool(False),\n# useConversionRejection = cms.untracked.bool(False),\n# useExpectedMissingHits = cms.untracked.bool(False),\n# maxNumberOfExpectedMissingHits = cms.untracked.int32(1),\n# # calculate some new cuts\n# calculateValidFirstPXBHit = cms.untracked.bool(True),\n# calculateConversionRejection = cms.untracked.bool(True),\n# calculateExpectedMissingHits = cms.untracked.bool(True),\n# # electrons and MET\n# electronCollectionTag = cms.untracked.InputTag(\"patElectrons\",\"\",\"PAT\"),\n# metCollectionTag = cms.untracked.InputTag(myDesiredMetCollection,\"\",\"PAT\"),\n# \n# )\n####################################################################################\n\n\n\n\n\n\n\n# to access values of EldId cuts as defined by the EWK group\nimport ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi\n\n# EWK analyzer: different Z definitions with one ECAL electron and one HF electron\n# ---> this is the instance to run AFTER the Wenu EWK filter\nprocess.demo = cms.EDAnalyzer('HFZeeVBTF',\n ECALid = cms.string('simpleEleId90cIso'),\n minEtECAL = cms.double(20),\n minEtHF = cms.double(20),\n DoLog = cms.bool(True),\n# this is instance of the analysis code which matters; keep only enectrons that pass the full selection\n acceptedElectronIDs = cms.vint32( 7 ),\n \n# robust95relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95relIsoEleIDCutsV04.clone()),\n robust95relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95relIsoEleIDCutsV04.clone()),\n robust90relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust90relIsoEleIDCutsV04.clone()),\n robust85relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust85relIsoEleIDCutsV04.clone()),\n robust80relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust80relIsoEleIDCutsV04.clone()),\n robust70relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust70relIsoEleIDCutsV04.clone()),\n robust60relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust60relIsoEleIDCutsV04.clone()),\n robust95cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95cIsoEleIDCutsV04.clone()),\n robust90cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust90cIsoEleIDCutsV04.clone()),\n robust85cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust85cIsoEleIDCutsV04.clone()),\n robust80cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust80cIsoEleIDCutsV04.clone()),\n robust70cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust70cIsoEleIDCutsV04.clone()),\n robust60cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust60cIsoEleIDCutsV04.clone())\n\n\n)\n\n\n# EWK analyzer: different Z definitions with one ECAL electron and one HF electron\n# ---> this is the instance to run AFTER the Wenu EWK filter\nprocess.demoLoose = cms.EDAnalyzer('HFZeeVBTF',\n ECALid = cms.string('simpleEleId90cIso'),\n minEtECAL = cms.double(20),\n minEtHF = cms.double(20),\n DoLog = cms.bool(True),\n# this is instance of the analysis code which I carry along to allow the keeping also of electrons that have not passed conversion rejection\n acceptedElectronIDs = cms.vint32( 3, 7 ),\n \n# robust95relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95relIsoEleIDCutsV04.clone()),\n robust95relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95relIsoEleIDCutsV04.clone()),\n robust90relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust90relIsoEleIDCutsV04.clone()),\n robust85relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust85relIsoEleIDCutsV04.clone()),\n robust80relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust80relIsoEleIDCutsV04.clone()),\n robust70relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust70relIsoEleIDCutsV04.clone()),\n robust60relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust60relIsoEleIDCutsV04.clone()),\n robust95cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95cIsoEleIDCutsV04.clone()),\n robust90cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust90cIsoEleIDCutsV04.clone()),\n robust85cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust85cIsoEleIDCutsV04.clone()),\n robust80cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust80cIsoEleIDCutsV04.clone()),\n robust70cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust70cIsoEleIDCutsV04.clone()),\n robust60cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust60cIsoEleIDCutsV04.clone())\n\n\n)\n\n\n# EWK analyzer: different Z definitions with one ECAL electron and one HF electron\n# ---> this is the instance to run BEFORE the Wenu EWK filter, so to have normalization of number of MC events processed\nprocess.demoBefCuts = cms.EDAnalyzer('HFZeeVBTF',\n ECALid = cms.string('simpleEleId90cIso'),\n minEtECAL = cms.double(20),\n minEtHF = cms.double(20),\n DoLog = cms.bool(True),\n# this instance of the analysis code is just to count events, so it does not matter how tight eleID might be\n acceptedElectronIDs = cms.vint32( 7 ),\n \n# robust95relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95relIsoEleIDCutsV04.clone()),\n robust95relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95relIsoEleIDCutsV04.clone()),\n robust90relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust90relIsoEleIDCutsV04.clone()),\n robust85relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust85relIsoEleIDCutsV04.clone()),\n robust80relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust80relIsoEleIDCutsV04.clone()),\n robust70relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust70relIsoEleIDCutsV04.clone()),\n robust60relIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust60relIsoEleIDCutsV04.clone()),\n robust95cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust95cIsoEleIDCutsV04.clone()),\n robust90cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust90cIsoEleIDCutsV04.clone()),\n robust85cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust85cIsoEleIDCutsV04.clone()),\n robust80cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust80cIsoEleIDCutsV04.clone()),\n robust70cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust70cIsoEleIDCutsV04.clone()),\n robust60cIsoEleIDCutsV04 = cms.PSet(ElectroWeakAnalysis.WENu.simpleCutBasedElectronIDSpring10_cfi.simpleCutBasedElectronID.robust60cIsoEleIDCutsV04.clone())\n\n\n)\n\n\n\n# THIS IS PROD\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"\"),\n)\n\n\n\n# this is the filter requiring the same HLT bits which compose the EG dataset\n# see: https://twiki.cern.ch/twiki/bin/view/CMS/PhysicsSecondaryDatasets AND\n# http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/Configuration/Skimming/test/SDmaker_6SD_2CS_PDMinBias_1e28_cfg.py?revision=1.1&view=markup\nimport HLTrigger.HLTfilters.hltHighLevelDev_cfi\nprocess.EG_1e28 = HLTrigger.HLTfilters.hltHighLevelDev_cfi.hltHighLevelDev.clone(andOr = True)\nprocess.EG_1e28.HLTPaths = (\n# \"HLT_Mu9\",\n# \"HLT_Mu5_Track0_Jpsi\"\n \"HLT_Photon10_L1R\",\n \"HLT_Photon15_L1R\",\n \"HLT_Photon15_LooseEcalIso_L1R\",\n \"HLT_Photon20_L1R\",\n \"HLT_Photon30_L1R_8E29\",\n \"HLT_DoublePhoton4_Jpsi_L1R\",\n \"HLT_DoublePhoton4_Upsilon_L1R\",\n \"HLT_DoublePhoton4_eeRes_L1R\",\n \"HLT_DoublePhoton5_eeRes_L1R\", #added to match the /cdaq/physics/firstCollisions10/v2.0/HLT_7TeV/V5 table\n \"HLT_DoublePhoton5_Jpsi_L1R\",\n \"HLT_DoublePhoton5_Upsilon_L1R\",\n \"HLT_DoublePhoton5_L1R\",\n \"HLT_DoublePhoton10_L1R\",\n \"HLT_DoubleEle5_SW_L1R\",\n \"HLT_Ele20_LW_L1R\",\n \"HLT_Ele15_SiStrip_L1R\",\n \"HLT_Ele15_SC10_LW_L1R\",\n \"HLT_Ele15_LW_L1R\",\n \"HLT_Ele10_LW_EleId_L1R\",\n \"HLT_Ele10_LW_L1R\",\n \"HLT_Photon15_TrackIso_L1R\"\n )\nprocess.EG_1e28.HLTPathsPrescales = cms.vuint32(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)\nprocess.EG_1e28.HLTOverallPrescale = cms.uint32(1)\nprocess.EG_1e28.throw = False\nprocess.EG_1e28.andOr = True\n\n\nprocess.load(\"RecoEgamma.EgammaHFProducers.hfRecoEcalCandidate_cfi\")\nprocess.hfRecoEcalCandidate.intercept2DCut=0.3\nprocess.hfRecoEcalCandidate.e9e25Cut =0.94\n# 0.94 is the same as default in the HF cluster producer\n\n\nprocess.z1lPath = cms.Path(\n process.patDefaultSequence *\n process.demoBefCuts *\n # process.genFilter *\n process.EG_1e28 *\n process.z1legFilter *\n process.hfRecoEcalCandidate *\n process.demo *\n process.demoLoose\n )\n","sub_path":"ZShape/HFZeeVBTF/test/skimAndAnalysisFromRECO_MC_Zoneleg_cfg.py","file_name":"skimAndAnalysisFromRECO_MC_Zoneleg_cfg.py","file_ext":"py","file_size_in_byte":24690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"240839914","text":"import numpy as np\nimport cv2\nfrom keras.models import model_from_json\n\n#Loading the classifier model\n#Load the JSON file\njson_file = open('model.json', 'r')\nmodel_json = json_file.read()\njson_file.close()\nmodel = model_from_json(model_json)\n\n#Load the model weights\nmodel.load_weights('model.h5')\n\n#Expression dictionary\nexpression_dict = {0:'Angry', \n 1:'Fear', \n 2:'Happy', \n 3:'Sad', \n 4:'Surprise', \n 5:'Neutral'\n }\n\n#Font for writing the expression on the webcam stream\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n#Create a VideoCapture object to read live-stream from a camera\ncap = cv2.VideoCapture(0) \n\nwhile True:\n ret, frame = cap.read() #retval, image\n \n #Operations on the frame\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # type = numpy array\n \n \n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n if len(faces) > 0:\n #Iterate over all of the faces found in the frame\n for (x,y,w,h) in faces:\n #Draw a rectange around the face\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n \n #Crop each of the faces, resize, and use the classifier to classify the facial expression\n cropped_image = gray[y:y+h,x:x+w]\n cropped_image = cv2.resize(cropped_image, dsize=(48,48))\n reshaped = np.reshape(cropped_image, (1, 48,48,1))\n reshaped = np.divide(reshaped, 255.0)\n #Make a prediction\n expression = np.argmax(model.predict(reshaped))\n cv2.putText(frame,expression_dict[expression],(x+10,y-10), font, 1,(100,100,100),2,cv2.LINE_AA)\n \n #Display the frame\n cv2.imshow('Facial expression classifier', frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n#When everything is done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n\n\n","sub_path":"stream_classification.py","file_name":"stream_classification.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"338633283","text":"import matplotlib.gridspec as gridspec\n#gs = gridspec.GridSpec(13,12)\nimport matplotlib\nfrom matplotlib import font_manager\nimport numpy as np\nfrom astropy.io import ascii\nfrom astropy.cosmology import FlatLambdaCDM\nimport matplotlib.pyplot as plt\nfrom load_and_smooth_map import *\n\n'''Plot the voids and galaxies on top of slices of the map. Each slice is separated by 2 h^{-1} Mpc.\nUse integer division to find galaxies in each slice: select all galaxies between a given RA slice and the next one.'''\n\n# Map parameters and cosmology\ncosmo = FlatLambdaCDM(H0=70, Om0=0.31)\nra0 = 149.975\ndec0 = 2.15\nzmid = 2.35\ndec1 = 2.4880706 \ndeg_per_hMpc = 1./ cosmo.h / cosmo.comoving_distance(zmid).value * 180./np.pi\ndist_to_center = cosmo.comoving_distance(zmid).value*cosmo.h\n\n# Load the map\nmapfile = 'map_2017tmp_sm2.0.bin'\nmap_smoothed, allx, ally, allz = load_and_smooth_map(mapfile,(48,48,680))\n\n# Load the galaxies and convert coordinates to h^{-1} Mpc\ngalaxies = ascii.read('galxcorr_cl2016_v0_with_vuds.dat')\ngalaxies_x = np.cos(0.5*(dec0+dec1)*np.pi/180.)*(galaxies['ra'] - ra0)/deg_per_hMpc\ngalaxies_y = (galaxies['dec'] - dec0)/deg_per_hMpc\ngalaxies_z = cosmo.comoving_distance(galaxies['zspec']).value*cosmo.h\n\n# Select certain galaxies\n#galaxies_x = galaxies_x[(galaxies['source'] == 'MOSDEF')]\n#galaxies_y = galaxies_y[(galaxies['source'] == 'MOSDEF')]\n#galaxies_z = galaxies_z[(galaxies['source'] == 'MOSDEF')]\n\ngalaxies_x_mosdef = galaxies_x[(galaxies['source'] == 'MOSDEF')]\ngalaxies_y_mosdef = galaxies_y[(galaxies['source'] == 'MOSDEF')]\ngalaxies_z_mosdef = galaxies_z[(galaxies['source'] == 'MOSDEF')]\n\ngalaxies_x_vuds = galaxies_x[(galaxies['source'] == 'VUDS')]\ngalaxies_y_vuds = galaxies_y[(galaxies['source'] == 'VUDS')]\ngalaxies_z_vuds = galaxies_z[(galaxies['source'] == 'VUDS')]\n\n\n#galaxies_x = galaxies_x[(galaxies['source'] == 'zDEEP')]\n#galaxies_y = galaxies_y[(galaxies['source'] == 'zDEEP')]\n#galaxies_z = galaxies_z[(galaxies['source'] == 'zDEEP')]\n\n# Load the voids. Convert from map-centered coordinates to observer-centered coordinates\nvoids = np.loadtxt('voids.txt')\nnon_overlap_vradius = voids[:,0]\nnon_overlap_vcenter = voids[:,1:]\nnon_overlap_vcenter[:,2] = non_overlap_vcenter[:,2] + dist_to_center-170.\n\nticks_font = font_manager.FontProperties(family='Helvetica', style='normal',\n\t\tsize=10, weight='normal', stretch='normal')\n\nmatplotlib.rcParams['text.usetex'] = True\nmatplotlib.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\nmatplotlib.rcParams['font.family'] = 'serif'\nmatplotlib.rcParams['font.serif'] = 'cm'\n\n#fig = plt.figure(figsize=(6.09,8))\nfor j in range(12):\n\tstep = 4\n\tfor i in range(12):\n\t\tif i == j:\n\t\t\tplt.clf()\n\t\t\tfig = plt.figure(figsize=(14.17,2.29))\n\t\t\t#ax = plt.subplots(111)\n\t\t\tax = plt.gca()\n\t\t\tax.contourf(allz[step*i,:,:]+dist_to_center-170.,ally[step*i,:,:],map_smoothed[step*i,:,:],cmap=plt.cm.jet_r,levels=np.linspace(np.min(map_smoothed),np.max(map_smoothed),10),vmin=-0.25,vmax=0.25)\n\t\t\tax.minorticks_on()\n\t\t\tax2 = ax.twiny()\n\t\t\tax2.minorticks_on()\n\t\t\tax2.set_xlim([2.15875,2.55805])\n\t\t\tax2.set_xticks([2.2,2.25, 2.3,2.35,2.4,2.45,2.5])\n\t\t\tax.set_xlabel(r'Comoving distance ($h^{-1}$ Mpc)',size=18)\n\t\t\tax.set_ylabel(r'y$_{\\textrm{perp}}$ ($h^{-1}$ Mpc)',size=18)\n\t\t\tax2.set_xlabel(r'redshift',size=18)\n\t\t\tfor item in (ax.get_yticklabels()):\n\t\t\t\titem.set_fontsize(12)\n\t\t\tfor item in (ax2.get_xticklabels()):\n\t\t\t\titem.set_fontsize(12)\n\t\t\tfor item in (ax.get_xticklabels()):\n\t\t\t\titem.set_fontsize(12)\n\t\t\tax.plot(galaxies_z_mosdef[np.where((np.array(galaxies_x_mosdef//(step/2)).astype('int') == i))],galaxies_y_mosdef[np.where((np.array(galaxies_x_mosdef//(step/2)).astype('int') == i))],'.',color='k',markersize=12)\n\t\t\tax.plot(galaxies_z_vuds[np.where((np.array(galaxies_x_vuds//(step/2)).astype('int') == i))],galaxies_y_vuds[np.where((np.array(galaxies_x_vuds//(step/2)).astype('int') == i))],'*',color='g',markeredgewidth=0.0,markersize=12)\n\t\t\tax.set_xlim(np.array([0,340])+dist_to_center-170.)\n\t\t\tax.set_ylim([0,24])\n\t\t\tax.set_yticks([0,8,16,24])\n\t\t\t#plt.figaspect(1/14.17)\n\t\t\t'''for j in range(len(non_overlap_vcenter)):\n\t\t\t\tvoid = non_overlap_vcenter[j]\n\t\t\t\tradius = non_overlap_vradius[j]\n\t\t\t\tx = void[0]\n\t\t\t\ty = void[1]\n\t\t\t\tz = void[2]\n\t\t\t\t#if ((x+2*radius >= 2*i) and (x+2*radius <= 2*(i+1))\n\t\t\t\t#\tor (x-2*radius >= 2*i) and (x-2*radius <= 2*(i+1))):\n\t\t\t\tif (int(x//(step/2)) == i or int(x//(step/2)) == i-1 or int(x//(step/2)) == i+1\n\t\t\t\t\t\tor int(x//(step/2)) == i+2 or int(x//(step/2)) == i-2\n\t\t\t\t\t\tor int(x//(step/2)) == i+3 or int(x//(step/2)) == i-3):\n\t\t\t\t#if (2*i > x - radius) and (2*i < x + radius):\n\t\t\t\t\tprint 2*i, x, y, z, radius\n\t\t\t\t\tcircle1 = plt.Circle((z,y), radius, color='k', fill=False)\n\t\t\t\t\tax.add_artist(circle1)'''\n\t#fig.text(0.04, 0.5, r'y ($h^{-1}$ Mpc)', rotation='vertical')\n\n\t#Create custom artists\n\tsimArtist = plt.Line2D((0,1),(0,0), color='k', marker='.', linestyle='',markersize=12)\n\tanyArtist = plt.Line2D((0,1),(0,0), color='g', marker='*', linestyle='',markersize=12)\n\n\t#Create legend from custom artist/label lists\n\tax.legend([simArtist,anyArtist],\n\t\t\t ['MOSDEF', 'VUDS'],numpoints=1,bbox_to_anchor=(0.98, 1.02),bbox_transform=plt.gcf().transFigure,frameon=False)\n\n\t#fig.subplots_adjust(hspace=0.5)\n\tplt.tight_layout()\n\n\tplt.ion()\n\tplt.show()\n\tplt.savefig('clamato_03_17_galaxies_mosdef_vuds_slice%i.pdf' % j)\n\n","sub_path":"plot_galaxies_single_slice.py","file_name":"plot_galaxies_single_slice.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"341751598","text":"import discord\nfrom discord.ext import commands\nfrom PopulateGame import *\nfrom Roles import Mafia\nfrom MainGameLoop import ChangePhase\nfrom bottoken import TOKEN\nfrom MainGameLogic import CacheAllRoles, CacheGameLoop\n\nservers = {}\nongoing_games = {}\nmafia_channels = {}\n\nbot = commands.Bot(command_prefix = \"!\")\n\n@bot.event\nasync def on_ready():\n\n print(\"Ready\")\n\n\nasync def AddUserToLobby(ctx):\n\n for key, value in servers.items():\n\n if ctx.author in value:\n\n await ctx.send(f\"You are already in a lobby {ctx.author.name}\")\n raise Exception\n\n if ctx.guild in servers.keys():\n\n servers[ctx.guild].append(ctx.author)\n\n else:\n\n servers[ctx.guild] = [ctx.author]\n\n@bot.command()\nasync def mafStart(ctx):\n\n await AddUserToLobby(ctx)\n await ctx.send(f\"Okay, setting up the lobby. I have added {ctx.author.name} to it\")\n await ctx.send(f\"Anyone else who would like to join, type !join\")\n await ctx.send(f\"Type !start when everyone is ready (minimum is 8 people)\")\n\n@bot.command()\nasync def join(ctx):\n\n if ctx.guild in servers.keys():\n\n try:\n\n await AddUserToLobby(ctx)\n\n except Exception:\n\n return\n\n await ctx.send(f\"Added {ctx.author.name} to the lobby\")\n\n@bot.command()\nasync def start(ctx):\n\n if servers[ctx.guild].count != 8:\n\n server_player_role = {}\n\n selected_roles = TestPopulateGame(servers[ctx.guild])\n\n server_player_role[ctx.guild] = selected_roles\n\n await CreateMafiaChannel(ctx, selected_roles)\n await CacheAllRoles(ctx.guild, server_player_role)\n\n if ctx.guild not in ongoing_games.keys():\n\n ongoing_games[ctx.guild] = ChangePhase(ctx, servers[ctx.guild])\n await CacheGameLoop(ctx.guild, ongoing_games)\n\n\nasync def CreateMafiaChannel(ctx, all_roles: dict):\n\n server = ctx.guild\n channels = ctx.guild.channels\n everyone_overwrite = discord.PermissionOverwrite()\n maf_overwrite = discord.PermissionOverwrite()\n everyone_overwrite.view_channel = False\n maf_overwrite.view_channel = True\n\n #maf_bot_role = await determine_bot_role(server)\n\n if \"mafia-chat\" not in channels:\n await ctx.send(\"Creating the channel for Mafia\")\n current_channel = ctx.message.channel.category\n\n maf_channel = await server.create_text_channel(\"mafia-chat\", category=current_channel)\n mafia_channels[ctx.guild] = maf_channel\n await maf_channel.set_permissions(bot.user, overwrite=maf_overwrite, reason=\"Mafia Game\")\n\n for player, role in all_roles.items():\n if isinstance(role, Mafia):\n await maf_channel.set_permissions(player, overwrite=maf_overwrite, reason=\"Mafia Game\")\n else:\n await maf_channel.set_permissions(player, overwrite=everyone_overwrite, reason=\"Mafia Game\")\n\n\n@bot.command()\nasync def quitGame(ctx):\n try:\n await ctx.message.channel.set_permissions(ctx.guild.default_role, overwrite=None)\n\n for player in servers[ctx.guild]:\n\n await ctx.message.channel.set_permissions(player, overwrite=None)\n\n await mafia_channels[ctx.guild].delete()\n\n except:\n pass\n\n ongoing_games[ctx.guild].cog_unload()\n del ongoing_games[ctx.guild]\n del servers[ctx.guild]\n\nbot.run(TOKEN)","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"627632960","text":"# -*- coding: utf-8 -*-\n# Langmuir probe analysis program using multiprocessing\n# This is for voltage steps only with planer Langmuir probe (values come from dI/dV)\n# This program will run for all positions and times and output the result into a .npy file\n\nimport time\nimport os\nimport warnings\nimport numpy\nfrom scipy.ndimage import gaussian_filter1d\n\n'''\nUser input\n'''\n\nifn = r\"C:\\data\\lp-ind\\step\\Argon-10mT-300W(1kHz)-step-XYplane(z=2)\"\n\narea = 2 * 2.6e-3 * 2e-3\nresistor = 50\n\nsmooth_interval_1 = 10 # This is smooth in time\nsmooth_interval_2 = 0 # This is smooth in dI/dV\nnum_process = 64 # Number of processes in queue\n\n#================================================================================================\n#================================================================================================\n\n'''\nWorker functions for multiprocessing\n'''\nfrom multiprocessing import Process, Queue\nfrom gsmooth import gsmooth\nimport general\n\n'''\nvoltage and current at one position with pos_ndx\nFilter current and voltage with respect to time\n'''\n\ndef pos_timetr(pos_ndx, pos_array, cur_dict, Vbias_dict, Vcur_dict, smooth_interval, remove_defect=False):\n \n pos = pos_array[pos_ndx]\n I_ant = cur_dict[str(pos[0])]\n \n V = Vbias_dict[str(pos[0])]\n I = - Vcur_dict[str(pos[0])] / resistor\n\n if remove_defect == True:\n ind = 0\n indls = []\n for i in I:\n ss_i = gsmooth(i,200)\n tarr = numpy.arange(0, len(ss_i))\n\n if ind == 0:\n old = ss_i\n else:\n arr1 = ss_i - old\n area = numpy.trapz(arr1)\n\n if min(arr1[int(len(arr1)/2):]) < -5e-5:\n indls.append(ind)\n else:\n old = ss_i\n ind += 1\n print('eliminate ', indls)\n\n V = numpy.delete(V, indls, axis=0)\n I = numpy.delete(I, indls, axis=0)\n \n if smooth_interval != 0:\n \n ss_V = gaussian_filter1d(V, smooth_interval, axis=-1)\n ss_I = gaussian_filter1d(I, smooth_interval, axis=-1)\n \n tarr = numpy.arange(len(ss_V[0])) * dt\n \n return general.subsample(ss_V, smooth_interval), general.subsample(ss_I, smooth_interval), general.subsample(tarr, smooth_interval, 1)\n \n else:\n return V, I, numpy.arange(0,len(I_ant[0]))*dt\n\n\n#================================================================================================\n#================================================================================================\n\n\ndef run1():\n create_array = True\n for pndx in range(len(pos_array)):\n\n pos = pos_array[pndx]\n ind = str(pos[0])\n xndx = numpy.argwhere(xpos==pos[1])\n yndx = numpy.argwhere(ypos==pos[2])\n zndx = numpy.argwhere(zpos==pos[3])\n print(pos, end='-->') \n\n try:\n a = Vcur_dict[ind][1]\n \n except IndexError:\n print('Position does not exist')\n continue\n\n\n vol, cur, tarr = pos_timetr(pndx, pos_array, Icur_dict, Vbias_dict, Vcur_dict, smooth_interval_1) # read time series at position\n nt = len(tarr)\n \n # =============================\n\n \n if create_array == True:\n\n ne_arr = numpy.zeros((nz,ny,nx,nt))\n Te_arr = numpy.zeros((nz,ny,nx,nt))\n Vp_arr = numpy.zeros((nz,ny,nx,nt))\n Isat_arr = numpy.zeros((nz,ny,nx,nt))\n\n # Iant = Icur_dict[ind][0]\n # light = light_dict[ind][0]\n\n\n create_array = False\n\n # =============================\n Isat_arr[zndx, yndx, xndx] = numpy.average(cur[:5], axis=0)\n \n for tndx in range(nt):\n try:\n\n V = vol[:,tndx]\n I = cur[:,tndx]\n \n ne, Te, Vp = lp_analysis.integrate_distribution(V, I, label='a', smooth_interval=smooth_interval_2)\n\n ne_arr[zndx, yndx, xndx, tndx] = ne\n Te_arr[zndx, yndx, xndx, tndx] = Te\n Vp_arr[zndx, yndx, xndx, tndx] = Vp\n\n except KeyboardInterrupt:\n raise SystemExit('Hault due to ctrl-c')\n \n except:\n print('Error found: at tndx=', tndx)\n ne_arr[zndx, yndx, xndx, tndx] = ne\n Te_arr[zndx, yndx, xndx, tndx] = Te\n Vp_arr[zndx, yndx, xndx, tndx] = Vp\n pass\n\n\n print('Writing to file')\n sfn = ifn\n numpy.save(sfn, (xpos, ypos, zpos, tarr, ne_arr, Te_arr, Vp_arr, Isat_arr))\n\n#-------------------------------------------------------------------------------------------------------------------------------------------\n\ndef run2(length=100):\n\n create_array = True\n \n for pndx in range(len(pos_array)):\n\n pos = pos_array[pndx]\n ind = str(pos[0])\n xndx = numpy.argwhere(xpos==pos[1])\n yndx = numpy.argwhere(ypos==pos[2])\n zndx = numpy.argwhere(zpos==pos[3])\n print(pos, end='-->') \n\n vol, cur, tarr = pos_timetr(pndx, pos_array, Icur_dict, Vbias_dict, Vcur_dict, smooth_interval_1) # read time series at position\n nt = len(tarr)\n\n # =============================\n if vol.shape[0] == 1: # position doesn't exist\n print('No data at this position', end=' ')\n continue\n \n if create_array == True:\n\n f_arr = numpy.zeros((nz,ny,nx,nt,length))\n Isat_arr = numpy.zeros((nz,ny,nx,nt))\n\n create_array = False\n\n # =============================\n Isat_arr[zndx, yndx, xndx] = numpy.average(cur[:5], axis=0)\n \n for tndx in range(nt):\n try:\n\n V = vol[:,tndx]\n I = cur[:,tndx]\n \n dIdV, max_ind = lp_analysis.derivative(V, I)\n\n dIdV_sub, Vnew_sub, Vp, Vfake, f, yne, popt = lp_analysis.distribution(V, dIdV,max_ind, length=length)\n f_arr[zndx, yndx, xndx, tndx] = f\n\n except KeyboardInterrupt:\n raise SystemExit('Hault due to ctrl-c')\n \n except:\n print('Error found: at tndx=', tndx)\n f_arr[zndx, yndx, xndx, nt] = f\n pass\n\n\n print('Writing to file')\n sfn = ifn + '-distribution'\n numpy.save(sfn, (xpos, ypos, zpos, tarr, Vfake, f_arr, Isat_arr))\n#================================================================================================\n#================================================================================================\n\nif __name__ == '__main__':\n\n from read_3Ddata import read_step\n import lp_analysis\n\n\n print('---Reading data---')\n pos_array, xpos, ypos, zpos, vset, dt, Icur_dict, Vcur_dict, Vbias_dict, light_dict = read_step(ifn)\n\n nx = len(xpos)\n ny = len(ypos)\n nz = len(zpos)\n\n print('---Analyzing data---')\n\n# run2(length=300)\n run1()\n\n print('Done')\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"539598321","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse, HttpResponse\nfrom django.core import serializers\n\nfrom django.template.loader import render_to_string\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom django.db.models import F, Sum, Count, Q\n# Create your views here.\n\nfrom dal import autocomplete\n\nfrom .filters import HMOBillFilter\n\nfrom django.forms import inlineformset_factory\n\nfrom account.decorators import unauthenticated_user\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import *\nfrom .models import *\nfrom .tables import *\n\n'''----------------------------- Automcomplete --------------------------------'''\n\nclass HMOAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n # if not self.request.user.is_authenticated():\n # return Address.objects.none()\n\n\n qs = HMO.objects.all().order_by('description')\n\n if self.q:\n qs = qs.filter(Q(description__icontains=self.q)|Q(code__icontains=self.q))\n return qs\n\nclass PatientAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n # if not self.request.user.is_authenticated():\n # return Address.objects.none()\n\n qs = Patient.objects.all().order_by('last_name')\n\n if self.q:\n qs = qs.filter(Q(last_name__icontains=self.q)|Q(first_name__icontains=self.q))\n return qs\n\nclass ItemAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n # if not self.request.user.is_authenticated():\n # return Address.objects.none()\n\n qs = Item.objects.all().order_by('description')\n\n if self.q:\n qs = qs.filter(Q(description__icontains=self.q)|Q(code__icontains=self.q))\n return qs\n\nclass DoctorAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n # if not self.request.user.is_authenticated():\n # return Address.objects.none()\n\n qs = Doctor.objects.all().order_by('last_name')\n\n if self.q:\n qs = qs.filter(Q(last_name__icontains=self.q)|Q(first_name__icontains=self.q)|Q(code__icontains=self.q))\n return qs\n\ndef home(request):\n hmo_bill = HMOBill.objects.all().order_by('-date_created')\n\n hmo_bill_total_entry = hmo_bill.count()\n hmo_bill_encoded = hmo_bill.filter(bill_status='Encoded').count()\n hmo_bill_pending = hmo_bill.filter(bill_status='Pending').count()\n hmo_bill_billed = hmo_bill.filter(bill_status='Billed').count()\n\n\n template_name = 'home.html'\n context = {'hmo_bill_total_entry': hmo_bill_total_entry,\n 'hmo_bill_encoded':hmo_bill_encoded,\n 'hmo_bill_pending': hmo_bill_pending,\n 'hmo_bill_billed': hmo_bill_billed,\n 'hmo_bill': hmo_bill,\n }\n return render(request, template_name, context)\n\n\n''' -------------------------- Item Section ---------------------------------'''\n\n@login_required(login_url='signin')\ndef ItemList(request):\n items = Item.objects.all()\n\n context = {'items': items}\n template_name = 'settings/items/list.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef ItemDetail(request, slug):\n\n item_detail = Item.objects.get(slug=slug)\n\n context = {'item': item_detail}\n template_name = 'settings/items/detail.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreateItem(request):\n form = ItemForm(request.POST or None)\n\n if form.is_valid():\n obj = form.save(commit=False)\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data\n obj.user = request.user\n obj.save()\n form = ItemForm()\n return redirect('item-list')\n \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n template_name = 'settings/items/create.html'\n context = {\"form\": form}\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef UpdateItem(request, slug):\n item = Item.objects.get(slug=slug)\n form = ItemForm(request.POST or None, instance=item) # To locate specific date instance=obj\n if form.is_valid():\n form.save()\n return redirect('item-list') \n template_name = 'settings/items/update.html'\n context = {\"form\": form, \"title\": f\"Update {item.code}, {item.description}\" }\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef DeleteItem(request, slug):\n item = Item.objects.get(slug=slug)\n template_name = 'settings/items/delete.html'\n if request.method == \"POST\":\n item.delete()\n return redirect('item-list')\n context = {\"item\": item}\n return render(request, template_name, context)\n\n''' -------------------------- HMO Section ---------------------------------''' \n\n@login_required(login_url='signin')\ndef HMOList(request):\n hmos = HMO.objects.all()\n\n context = {'hmos': hmos}\n template_name = 'settings/hmos/list.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef HMODetail(request, slug):\n\n hmo_detail = HMO.objects.get(slug=slug)\n\n context = {'hmo': hmo_detail}\n template_name = 'settings/hmos/detail.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreateHMO(request):\n form = HMOForm(request.POST or None)\n if form.is_valid():\n obj = form.save(commit=False)\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data\n obj.user = request.user\n obj.save()\n form = HMOForm()\n return redirect('hmo-list')\n \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n template_name = 'settings/hmos/create.html'\n context = {\"form\": form}\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef UpdateHMO(request, slug):\n hmo = HMO.objects.get(slug=slug)\n form = HMOForm(request.POST or None, instance=hmo) # To locate specific date instance=obj\n if form.is_valid():\n form.save()\n return redirect('hmo-list') \n template_name = 'settings/hmos/update.html'\n context = {\"form\": form, \"title\": f\"Update {hmo.code}, {hmo.description}\" }\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef DeleteHMO(request, slug):\n hmo = HMO.objects.get(slug=slug)\n template_name = 'settings/hmos/delete.html'\n if request.method == \"POST\":\n hmo.delete()\n return redirect('hmo-list')\n context = {\"hmo\": hmo}\n return render(request, template_name, context)\n\n''' -------------------------- Doctor Section ---------------------------------''' \n\n@login_required(login_url='signin')\ndef DoctorList(request):\n doctors = Doctor.objects.all()\n\n context = {'doctors': doctors}\n template_name = 'settings/doctors/list.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef DoctorDetail(request, slug):\n\n doctor_detail = Doctor.objects.get(slug=slug)\n\n context = {'doctor': doctor_detail}\n template_name = 'settings/doctors/detail.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreateDoctor(request):\n form = DoctorForm(request.POST or None)\n if form.is_valid():\n obj = form.save(commit=False)\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data\n obj.user = request.user\n obj.save()\n form = DoctorForm()\n return redirect('doctor-list')\n \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n template_name = 'settings/doctors/create.html'\n context = {\"form\": form}\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef UpdateDoctor(request, slug):\n doctor = Doctor.objects.get(slug=slug)\n form = DoctorForm(request.POST or None, instance=doctor) # To locate specific date instance=obj\n if form.is_valid():\n form.save()\n return redirect('doctor-list') \n template_name = 'settings/doctors/update.html'\n context = {\"form\": form, \"title\": f\"Update {doctor.code}, - {doctor.last_name}, {doctor.first_name} -- {doctor.specialization}\" }\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef DeleteDoctor(request, slug):\n doctor = Doctor.objects.get(slug=slug)\n template_name = 'settings/doctors/delete.html'\n if request.method == \"POST\":\n doctor.delete()\n return redirect('doctor-list')\n context = {\"doctor\": doctor}\n return render(request, template_name, context)\n\n''' -------------------------- Patient Section ---------------------------------''' \n\n@login_required(login_url='signin')\ndef PatientList(request):\n patients = Patient.objects.all()\n\n context = {'patients': patients}\n template_name = 'settings/patients/list.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef PatientDetail(request, slug):\n\n patient_detail = Patient.objects.get(slug=slug)\n\n context = {'patient': patient_detail}\n template_name = 'settings/patients/detail.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreatePatient(request):\n form = PatientForm(request.POST or None)\n if form.is_valid():\n obj = form.save(commit=False)\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data\n obj.user = request.user\n obj.save()\n form = PatientForm()\n return redirect('patient-list')\n \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n template_name = 'settings/patients/create.html'\n context = {\"form\": form}\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreatePatientModal(request):\n data = dict()\n form = PatientForm(request.POST or None)\n if form.is_valid():\n obj = form.save(commit=False)\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data\n obj.user = request.user\n obj.save()\n form = PatientForm()\n data['form_is_valid'] = True\n \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n context = {\"form\": form}\n data['html_form'] = render_to_string('settings/patients/modal/modal-create.html', context, request=request)\n \n return JsonResponse(data)\n\n@login_required(login_url='signin')\ndef UpdatePatient(request, slug):\n patient = Patient.objects.get(slug=slug)\n form = PatientForm(request.POST or None, instance=patient) # To locate specific date instance=obj\n if form.is_valid():\n form.save()\n return redirect('patient-list') \n template_name = 'settings/patients/update.html'\n context = {\"form\": form, \"title\": f\"Update {patient.last_name}, - {patient.first_name}, {patient.middle_name}\" }\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef DeletePatient(request, slug):\n patient = Patient.objects.get(slug=slug)\n template_name = 'settings/patients/delete.html'\n if request.method == \"POST\":\n patient.delete()\n return redirect('patient-list')\n context = {\"patient\": patient}\n return render(request, template_name, context)\n\n''' -------------------------- HMOBill Section ---------------------------------''' \n@login_required(login_url='signin')\ndef HMOBillReport(request):\n hmo_bill_report = HMOBill.objects.filter(bill_status='Pending').order_by('-utility_date')\n \n charges = hmo_bill_report.values_list('charges', flat=True)\n professional_fee = hmo_bill_report.values_list('professional_fee', flat=True)\n credit = hmo_bill_report.values_list('credit', flat=True)\n total = hmo_bill_report.values_list('total', flat=True)\n\n total_charges = sum(charges)\n total_professional_fee = sum(professional_fee)\n total_credit = sum(credit)\n total_balance = sum(total) \n \n context = {\n 'hmo_bill_report': hmo_bill_report,\n 'total_charges': total_charges,\n 'total_professional_fee': total_professional_fee,\n 'total_credit': total_credit,\n 'total_balance': total_balance\n }\n\n template_name = 'settings/hmo-bills/reports.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef HMOBillList(request):\n\n hmo_bills = HMOBill.objects.all()\n context = {'hmo_bills': hmo_bills}\n template_name = 'settings/hmo-bills/list.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef HMOBillDetail(request, slug):\n\n hmo_bill_detail = HMOBill.objects.get(slug=slug)\n\n context = {'hmo_bill': hmo_bill_detail}\n template_name = 'settings/hmo-bills/detail.html'\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreateHMOBill(request):\n qs = Doctor.objects.all()\n hmo_bill_list = HMOBill.objects.all().order_by('-date_created')\n form = HMOBillForm(request.POST or None)\n if form.is_valid():\n obj = form.save(commit=False)\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data\n obj.user = request.user\n obj.save()\n form = HMOBillForm()\n return redirect('create-hmo-bill')\n \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n template_name = 'settings/hmo-bills/create.html'\n context = {\"form\": form, 'qs': qs, 'hmo_bill_list': hmo_bill_list}\n return render(request, template_name, context)\n\n\ndef CreateHMOBillPost(request): \n if request.is_ajax and request.method == 'POST':\n form = HMOBillForm(request.POST)\n if form.is_valid(): \n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n ser_instance = serializers.serialize('json', [ instance, ])\n return JsonResponse({'instance': ser_instance, }, status=200)\n else:\n return JsonResponse({'error': form.errors}, status=400) \n\n return JsonResponse({'error': ''}, status=400)\n\n\n@login_required(login_url='signin')\ndef UpdateHMOBillModal(request, slug):\n data = dict()\n hmo_bill = HMOBill.objects.get(slug=slug)\n form = HMOBillForm(request.POST or None, instance=hmo_bill)\n if form.is_valid():\n # obj.title = form.cleaned_data.get('title') + \"0\" can manipulat pre existing data \n form.save()\n data['form_is_valid'] = True \n # title = form.cleaned_data['title'] \n # obj = BlogPost.objects.create(title=title) you can create and save this column data in multiple models\n context = {\"form\": form}\n data['html_form'] = render_to_string('settings/hmo-bills/modal/modal-update.html', context, request=request)\n \n return JsonResponse(data)\n\n\n@login_required(login_url='signin')\ndef UpdateHMOBill(request, slug):\n hmo_bill = HMOBill.objects.get(slug=slug)\n form = HMOBillUpdateForm(request.POST or None, instance=hmo_bill) # To locate specific date instance=obj\n if form.is_valid():\n form.save()\n return redirect('create-hmo-bill') \n template_name = 'settings/hmo-bills/update.html'\n context = {\"form\": form, \"title\": f\"Update {hmo_bill.hmo}, - {hmo_bill.patient}, {hmo_bill.approval_number}\" }\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef DeleteHMOBill(request, slug):\n hmo_bill = HMOBill.objects.get(slug=slug)\n template_name = 'settings/patients/delete.html'\n if request.method == \"POST\":\n hmo_bill.delete()\n return redirect('create-hmo-bill')\n context = {\"hmo_bill\": hmo_bill}\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef CreateSOA(request):\n\n form = SOAForm(request.POST or None)\n hmo_bills = HMOBill.objects.all()\n\n myFilter = HMOBillFilter(request.GET, queryset=hmo_bills)\n hmo_bills = myFilter.qs\n\n if request.method == \"POST\":\n form = SOAForm(request.POST or None)\n hmo_bill_list_id = request.POST.getlist('check-status')\n list_of_obj = HMOBill.objects.filter(pk__in=hmo_bill_list_id) \n list_of_obj.update(bill_status='Pending')\n SOA.objects.bulk_create([SOA(hmo_bill=x) for x in list_of_obj])\n return redirect('soa-list')\n\n template_name = 'settings/hmo-bills/list-update.html'\n context = {\"form\": form, 'hmo_bills': hmo_bills, 'myFilter': myFilter }\n return render(request, template_name, context)\n\n\n\n\n'''------------------ Is this Correct? ----------------'''\n\n # form = SOAForm(request.POST or None)\n # hmo_bills = HMOBill.objects.all()\n\n # if request.method == \"POST\":\n # form = SOAForm(request.POST or None)\n # hmo_bill_list_id = request.POST.getlist('check-status')\n # list_of_obj = HMOBill.objects.get(id=hmo_bill_list_id[0])\n # soa = SOA.objects.create(hmo_bill=list_of_obj)\n # soa.save() \n\n\n\n'''-------------------------- Queryset Instance ----------------------'''\n\n # hmo_bill_list_id = request.POST.getlist('check-status')\n\n\n # list_of_obj = HMOBill.objects.filter(pk__in=hmo_bill_list_id)\n # if request.method == \"POST\":\n # form = SOAForm(request.POST or None)\n # if form.is_valid():\n # soa = form.save(commit=False)\n # soa.hmo_bill = HMOBill.objects.get(pk=list_of_obj)\n # soa.save()\n\n''' --------------------- For Loop queryset -------------------''' \n # for i in list_of_obj:\n # soa = SOA.objects.create(hmo_bill=i)\n # soa.save()\n\n@login_required(login_url='signin')\ndef SOAList(request):\n\n soas = SOA.objects.values('soa_number').distinct()\n\n template_name = 'settings/soa/list.html'\n context = {'soas': soas}\n return render(request, template_name, context)\n\n@login_required(login_url='signin')\ndef SOADetail(request, soa_number):\n\n soa = SOA.objects.filter(soa_number=soa_number).order_by('-hmo_bill__patient__last_name', '-hmo_bill__utility_date')\n\n # hmo_bill_filter = HMOBill.objects.filter()\n\n charges = soa.values_list('hmo_bill__charges', flat=True)\n professional_fee = soa.values_list('hmo_bill__professional_fee', flat=True)\n credit = soa.values_list('hmo_bill__credit', flat=True)\n total = soa.values_list('hmo_bill__total', flat=True)\n \n # charges = hmo_bill_filter.values_list('charges', flat=True)\n # professional_fee = hmo_bill_filter.values_list('professional_fee', flat=True)\n # credit = hmo_bill_filter.values_list('credit', flat=True)\n # total = hmo_bill_filter.values_list('total', flat=True)\n\n total_charges = sum(charges)\n total_professional_fee = sum(professional_fee)\n total_credit = sum(credit)\n total_balance = sum(total)\n\n template_name = 'settings/soa/detail.html'\n # context = {'soa': soa}\n \n\n context = {\n 'soa': soa,\n 'total_charges': total_charges,\n 'total_professional_fee': total_professional_fee,\n 'total_credit': total_credit,\n 'total_balance': total_balance\n }\n\n return render(request, template_name, context)\n\n\n\n","sub_path":"HMO/billing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"260718301","text":"import time\nimport scrapy\nfrom twilio.rest import TwilioRestClient\nfrom bs4 import BeautifulSoup as bs\n\n\nclass JoeSpider(scrapy.Spider):\n name = \"joey\"\n # Find these values at https://twilio.com/user/account\n account_sid = \"ACee226ee81e7abf75cc6208ec8c37c628\"\n auth_token = \"bac015d9ecd742472e9d065758c08eb4\"\n\n def start_requests(self):\n urls = [\n 'http://www.attheraces.com/tips/atr-tipsters/hugh-taylor'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n text_bets = []\n betting_weight = 1\n\n rbets = response.xpath('//*[@id=\"recommended_bet\"]/div/div/p').extract()\n\n for rbet in rbets:\n soup = bs(rbet, 'html.parser')\n horse = soup.span.string\n\n bet = soup.getText()\n bet, trash = bet.split('(')\n\n bet_value, bet_type = bet.split('pt')\n\n bet_type = bet_type.replace('-', ' ')\n bet_type = bet_type.replace('pt', ' ')\n bet_type = bet_type[:5]\n bet_value = bet_value.replace('pt', ' ')\n bet_value = bet_value.replace(' ', '')\n\n bet_value = str(int(bet_value)*betting_weight)\n text_bet = str(bet_value) + ' ' + bet_type + ' ' + horse.encode('ascii', 'ignore')\n text_bets.append(text_bet)\n\n with open(\"bets.txt\", \"r+\") as f:\n f.seek(0)\n data = f.read()\n\n if str(text_bets) in str(data):\n self.logger.info('Current Tips: %s', text_bets)\n\n time.sleep(10) # delays for 10 seconds\n url = 'http://www.attheraces.com/tips/atr-tipsters/hugh-taylor'\n yield scrapy.Request(url, self.parse, dont_filter=True)\n else:\n self.logger.info('NEW TIPS: %s', text_bets)\n\n client = TwilioRestClient(self.account_sid, self.auth_token)\n for text_bet in text_bets:\n if str(text_bet) not in str(data):\n client.messages.create(to=\"+353879203099\", from_=\"+353861802519\", body=text_bet)\n f.seek(0)\n f.truncate()\n f.write(str(text_bets))\n\n time.sleep(10) # delays for 10 seconds\n url = 'http://www.attheraces.com/tips/atr-tipsters/hugh-taylor'\n yield scrapy.Request(url, self.parse, dont_filter=True)\n","sub_path":"jimmySpiders/spiders/joey.py","file_name":"joey.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"473491904","text":"\"\"\"\ntest_custom_extension_in_hooks.\n\nTests to ensure custom cookiecutter extensions are properly made available to\npre- and post-gen hooks.\n\"\"\"\nimport codecs\nimport os\n\nimport pytest\n\nfrom cookiecutter import main\n\n\n@pytest.fixture\ndef output_dir(tmpdir):\n \"\"\"Fixture. Create and return custom temp directory for test.\"\"\"\n return str(tmpdir.mkdir('templates'))\n\n\n@pytest.mark.parametrize(\"template\", [\"include\", \"no-templates\", \"extends\", \"super\"])\ndef test_build_templates(template, output_dir):\n \"\"\"\n Verify Templates Design keywords.\n\n no-templates is a compatibility tests for repo without `templates` directory\n \"\"\"\n project_dir = main.cookiecutter(\n f'tests/test-templates/{template}', no_input=True, output_dir=output_dir,\n )\n\n readme_file = os.path.join(project_dir, 'requirements.txt')\n\n with codecs.open(readme_file, encoding='utf8') as f:\n readme = f.read().splitlines()\n\n assert readme == [\n \"pip==19.2.3\",\n \"Click==7.0\",\n \"pytest==4.6.5\",\n ]\n","sub_path":"tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"455824105","text":"import json\nimport os\n\nconfig = None\n\nINSTANCE = \"./instance\"\n\ndef load_config(testing=False):\n global config\n config_name = (not testing) and \"config.json\" or \"config_testing.json\"\n config_file = os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", INSTANCE, config_name))\n with open(config_file) as file:\n config = json.load(file)\n return config","sub_path":"poediscordcontroller/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"95930996","text":"#!/usr/bin/env python3\n\n\"\"\"Demonstration Script that extracts agent data from cache directory files.\n\nThis could be a modified to be a daemon\n\n\"\"\"\n\n# Standard libraries\nimport os\nimport json\nfrom threading import Timer\nimport hashlib\nimport argparse\nfrom collections import defaultdict\nfrom pprint import pprint\n\n\nclass Ingest(object):\n \"\"\"Infoset class that ingests agent data.\n\n Args:\n None\n\n Returns:\n None\n\n Methods:\n __init__:\n populate:\n post:\n \"\"\"\n\n def __init__(self, filename):\n \"\"\"Method initializing the class.\n\n Args:\n uid: Unique ID for Agent\n config: Configuration object\n\n Returns:\n None\n\n \"\"\"\n # Initialize key variables\n self.filename = filename\n self.data = defaultdict(lambda: defaultdict(dict))\n self.metadata = []\n data_types = ['chartable', 'other']\n\n # Ingest data\n with open(filename, 'r') as f_handle:\n information = json.load(f_handle)\n\n pprint(information)\n\n # Get universal parameters from file\n timestamp = information['timestamp']\n uid = information['uid']\n\n # Process chartable data\n for data_type in data_types:\n for label, group in sorted(information[data_type].items()):\n # Get universal parameters for group\n base_type = group['base_type']\n description = group['description']\n\n # Initialize base type\n if base_type not in self.data[data_type]:\n self.data[data_type][base_type] = []\n\n # Process data\n for datapoint in group['data']:\n index = datapoint[0]\n value = datapoint[1]\n source = datapoint[2]\n did = _did(uid, label, index)\n\n # Update data\n self.data[data_type][base_type].append(\n (uid, did, value, timestamp)\n )\n\n # Update sources\n self.metadata.append(\n (uid, did, label, source, description)\n )\n\n def counter32(self):\n \"\"\"Return counter32 chartable data from file.\n\n Args:\n None\n\n Returns:\n data: List of tuples (uid, did, value, timestamp)\n uid = UID of device providing data\n did = Datapoint ID\n value = Value of datapoint\n timestamp = Timestamp when data was collected by the agent\n\n \"\"\"\n # Initialize key variables\n if 'counter32' in self.data['chartable']:\n data = self.data['chartable']['counter32']\n else:\n data = []\n\n # Return\n return data\n\n def counter64(self):\n \"\"\"Return counter64 chartable data from file.\n\n Args:\n None\n\n Returns:\n data: List of tuples (uid, did, value, timestamp)\n uid = UID of device providing data\n did = Datapoint ID\n value = Value of datapoint\n timestamp = Timestamp when data was collected by the agent\n\n \"\"\"\n # Initialize key variables\n if 'counter64' in self.data['chartable']:\n data = self.data['chartable']['counter64']\n else:\n data = []\n\n # Return\n return data\n\n def gauge(self):\n \"\"\"Return gauge chartable data from file.\n\n Args:\n None\n\n Returns:\n data: List of tuples (uid, did, value, timestamp)\n uid = UID of device providing data\n did = Datapoint ID\n value = Value of datapoint\n timestamp = Timestamp when data was collected by the agent\n\n \"\"\"\n # Initialize key variables\n if 'gauge' in self.data['chartable']:\n data = self.data['chartable']['gauge']\n else:\n data = []\n\n # Return\n return data\n\n def other(self):\n \"\"\"Return other non-chartable data from file.\n\n Args:\n None\n\n Returns:\n data: List of tuples (uid, did, value, timestamp)\n uid = UID of device providing data\n did = Datapoint ID\n value = Value of datapoint\n timestamp = Timestamp when data was collected by the agent\n\n \"\"\"\n # Initialize key variables\n data = []\n\n # Return (Ignore whether gauge or counter)\n for _, value in self.data['other'].items():\n data.extend(value)\n return data\n\n def sources(self):\n \"\"\"Return sources data from file.\n\n Args:\n None\n\n Returns:\n data: List of tuples (uid, did, label, source, description)\n uid = UID of device providing data\n did = Datapoint ID\n label = Label that the agent gave the category of datapoint\n source = Subsystem that provided the data in the datapoint\n description = Description of the label\n\n \"\"\"\n # Initialize key variables\n data = self.metadata\n\n # Return\n return data\n\n def purge(self):\n \"\"\"Purge cache file that was read.\n\n Args:\n None\n\n Returns:\n success: \"True: if successful\n\n \"\"\"\n # Initialize key variables\n success = True\n\n try:\n os.remove(self.filename)\n except:\n success = False\n\n # Return\n return success\n\n\ndef _did(uid, label, index):\n \"\"\"Create a unique DID from ingested data.\n\n Args:\n uid: UID of device that created the cache data file\n label: Label of the data\n index: Index of the data\n\n Returns:\n did: Datapoint ID\n\n \"\"\"\n # Initialize key variables\n prehash = ('%s%s%s') % (uid, label, index)\n hasher = hashlib.sha256()\n hasher.update(bytes(prehash.encode()))\n did = hasher.hexdigest()\n\n # Return\n return did\n\n\ndef ingest(cache_dir):\n \"\"\"Ingest agent data from cache directory.\n\n Args:\n cache_dir: Cache directory with agent data\n\n Returns:\n None\n\n \"\"\"\n # Add files in cache directory to list\n filenames = [filename for filename in os.listdir(\n cache_dir) if os.path.isfile(\n os.path.join(cache_dir, filename))]\n\n #########################################################################\n # This could be threaded on a per file basis\n #########################################################################\n\n # Read each cache file\n for filename in filenames:\n filepath = os.path.join(cache_dir, filename)\n\n # Process data\n get = Ingest(filepath)\n\n #####################################################################\n # We will need to keep track of the previous value of counter data\n # for each DID and then subtract one from the other to get\n # incremental values.\n #\n # If there is no previous value, then assume this is the first.\n #\n # We will have to only subtract between timestamps that are 300\n # seconds apart to ensure accuracy.\n #\n # We will have to determine whether the counter has rolled over and\n # take this into consideration\n #\n #####################################################################\n # Get counter information\n pprint(get.counter32())\n print('\\n\\n\\n')\n pprint(get.counter64())\n\n # Get other information\n print('\\n\\n\\n')\n pprint(get.gauge())\n print('\\n\\n\\n')\n pprint(get.other())\n print('\\n\\n\\n')\n pprint(get.sources())\n print('\\n\\n\\n')\n\n #####################################################################\n # Purge file so that we don't use it again\n #####################################################################\n\n # get.purge()\n\n #####################################################################\n # Put data in relevant database tables\n #####################################################################\n\n\ndef process_cli(additional_help=None):\n \"\"\"Return all the CLI options.\n\n Args:\n None\n\n Returns:\n args: Namespace() containing all of our CLI arguments as objects\n - filename: Path to the configuration file\n\n \"\"\"\n # Header for the help menu of the application\n parser = argparse.ArgumentParser(\n description=additional_help,\n formatter_class=argparse.RawTextHelpFormatter)\n\n # CLI argument for the config directory\n parser.add_argument(\n '--cache_dir',\n dest='cache_dir',\n required=True,\n default=None,\n type=str,\n help='Cache directory with agent data.'\n )\n\n # Return the CLI arguments\n args = parser.parse_args()\n\n # Return our parsed CLI arguments\n return args\n\n\ndef main():\n \"\"\"Process agent data.\n\n Args:\n None\n\n Returns:\n None\n\n \"\"\"\n # Get configuration\n args = process_cli()\n\n # Get data from the cache directory\n ingest(args.cache_dir)\n\n # Do the daemon thing\n Timer(10, main).start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"infoset/sample_code/ingestd-test.py","file_name":"ingestd-test.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"241566162","text":"'''\nDescription: \nAuthor: He Yuhang\nGithub: https:#github.com/hyhhhhhhhh\nDate: 2020-12-22 11:40:34\nLastEditors: Box\nLastEditTime: 2021-01-08 14:54:14\n'''\nimport sys\n\nfrom OpenGL.GL import *\n\nfrom OpenGL.GLUT import *\n\nfrom OpenGL.GLU import *\n\nfrom PIL import Image\n\nimport numpy as np\n\nfrom ogl.objloader import OBJ, MTL\n\nfrom ogl.light import setup_lighting\n\nfrom controller import IS_PERSPECTIVE, VIEW, EYE, LOOK_AT, EYE_UP, SCALE_K, WIN_H, WIN_W, mouseclick, mousemotion, keydown\n\nfrom nubs import BindNubs,THE_LIST\n\n\n\nfidr = \"resource/modelSequence/horses/\"\nHorses = list()\ndisplay = 0\nfor i in range(117):\n horse = 'horses_' + str(i) + '.obj'\n Horses.append(OBJ(fidr, horse))\n\n\nclass MyPyOpenGLTest:\n def __init__(self,\n width=640,\n height=480,\n models=Horses,\n title='MyPyOpenGLTest'.encode()):\n\n glutInit(sys.argv)\n\n self.texture_id = [0, 0]\n\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)\n\n glutInitWindowSize(width, height)\n\n self.window = glutCreateWindow(title)\n self.InitGL(width, height)\n\n self.models = models\n\n self.display_max = len(models)\n self.display_iter = 0\n\n glutDisplayFunc(self.Draw)\n\n glutIdleFunc(self.Draw)\n\n glutMouseFunc(mouseclick) # 注册响应鼠标点击的函数mouseclick()\t\n glutMotionFunc(mousemotion) # 注册响应鼠标拖拽的函数mousemotion()\t\n glutKeyboardFunc(keydown) # 注册键盘输入的函数keydown()\t\n\n #绕各坐标轴旋转的角度\n\n self.x = 0.0\n\n self.y = 0.0\n\n self.z = 0.0\n\n #绘制图形\n\n def DrawModels(self):\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, self.texture_id[1])\n Horses[self.display_iter].create_gl_list()\n glCallList(Horses[self.display_iter].gl_list)\n self.display_iter += 1\n self.display_iter = self.display_iter % self.display_max\n\n \n def Draw(self):\n\n # 设置视点\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glLoadIdentity()\n\n glMatrixMode(GL_PROJECTION)\n\n glLoadIdentity()\n\n if WIN_W > WIN_H:\n\n if IS_PERSPECTIVE:\n\n glFrustum(VIEW[0] * WIN_W / WIN_H, VIEW[1] * WIN_W / WIN_H,\n VIEW[2], VIEW[3], VIEW[4], VIEW[5])\n\n else:\n\n glOrtho(VIEW[0] * WIN_W / WIN_H, VIEW[1] * WIN_W / WIN_H,\n VIEW[2], VIEW[3], VIEW[4], VIEW[5])\n\n else:\n\n if IS_PERSPECTIVE:\n\n glFrustum(VIEW[0], VIEW[1], VIEW[2] * WIN_H / WIN_W,\n VIEW[3] * WIN_H / WIN_W, VIEW[4], VIEW[5])\n\n else:\n\n glOrtho(VIEW[0], VIEW[1], VIEW[2] * WIN_H / WIN_W,\n VIEW[3] * WIN_H / WIN_W, VIEW[4], VIEW[5])\n\n # 设置模型视图\n\n glMatrixMode(GL_MODELVIEW)\n\n glLoadIdentity()\n\n glScale(SCALE_K[0], SCALE_K[1], SCALE_K[2])\n\n gluLookAt(EYE[0], EYE[1], EYE[2], LOOK_AT[0], LOOK_AT[1], LOOK_AT[2],\n EYE_UP[0], EYE_UP[1], EYE_UP[2])\n\n glViewport(0, 0, WIN_W, WIN_H)\n\n #沿z轴平移\n\n # glTranslate(0.0, 0.0, -5.0)\n # self.DrawXyz()\n\n self.DrawGround1()\n\n # self.DrawSurface()\n\n self.DrawModels()\n\n #刷新屏幕,产生动画效果\n\n glutSwapBuffers()\n\n #修改各坐标轴的旋转角度\n\n def DrawSurface(self):\n THE_LIST = BindNubs()\n # global THE_LIST\n glCallList(THE_LIST)\n\n \n\n def DrawGround1(self):\n\n glClear(GL_COLOR_BUFFER_BIT)\n\n\n\n # #切换纹理\n glEnable(GL_TEXTURE_2D)\n glColor3f(1, 5, 1)\n\n glBindTexture(GL_TEXTURE_2D, self.texture_id[0])\n\n glBegin(GL_POLYGON)\n\n glTexCoord2f(1.0, 1.0)\n\n glVertex3f(-1.0, -0., -1.0)\n\n glTexCoord2f(0.0, 1.0)\n\n glVertex3f(1.0, -0., -1.0)\n\n glTexCoord2f(0.0, 0.0)\n\n glVertex3f(1.0, -0., 1.0)\n\n glTexCoord2f(1.0, 0.0)\n\n glVertex3f(-1.0, -0., 1.0)\n\n glEnd()\n\n def DrawGround2(self):\n\n # #切换纹理\n\n glColor3f(1, 5, 1)\n\n glBindTexture(GL_TEXTURE_2D, self.texture_id[0])\n\n glBegin(GL_QUADS)\n\n glTexCoord2f(1.0, 1.0)\n\n glVertex3f(-1.0, -1.0, -1.5)\n\n glTexCoord2f(0.0, 1.0)\n\n glVertex3f(1.0, -1.0, -1.5)\n\n glTexCoord2f(0.0, 0.0)\n\n glVertex3f(1.0, -1.0, 1.0)\n\n glTexCoord2f(1.0, 0.0)\n\n glVertex3f(-1.0, -1.0, 1.0)\n\n glEnd()\n\n #加载纹理\n\n def LoadTexture(self):\n\n #载入纹理\n\n imgFiles = '1.jpg'\n\n img = Image.open(imgFiles)\n\n width, height = img.size\n\n img = img.tobytes('raw', 'RGBX', 0, -1)\n\n # 纹理索引为1\n\n self.texture_id[0] = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, self.texture_id[0])\n\n glTexImage2D(GL_TEXTURE_2D, 0, 4, width, height, 0, GL_RGBA,\n GL_UNSIGNED_BYTE, img)\n\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n\n imgFiles = '2.jpg'\n\n img = Image.open(imgFiles)\n\n width, height = img.size\n\n img = img.tobytes('raw', 'RGBX', 0, -1)\n\n # 纹理索引为2\n\n self.texture_id[1] = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, self.texture_id[1])\n\n glTexImage2D(GL_TEXTURE_2D, 0, 4, width, height, 0, GL_RGBA,\n GL_UNSIGNED_BYTE, img)\n\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) \n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) \n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\n # glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n\n def InitGL(self, width, height):\n\n self.LoadTexture()\n\n setup_lighting()\n\n glEnable(GL_TEXTURE_2D)\n\n glClearColor(0, 0, 0, 0.0)\n\n glClearDepth(1.0)\n\n glEnable(GL_TEXTURE_2D)\n\n glDisable( GL_CULL_FACE )\n\n glEnable(GL_DEPTH_TEST) # 开启深度测试,实现遮挡关系\n\n glDepthFunc(GL_LEQUAL) # 设置深度测试函数(GL_LEQUAL只是选项之一)\n\n glShadeModel(GL_SMOOTH)\n\n glEnable(GL_POINT_SMOOTH)\n\n glEnable(GL_LINE_SMOOTH)\n\n glEnable(GL_POLYGON_SMOOTH)\n\n glMatrixMode(GL_PROJECTION)\n\n glHint(GL_POINT_SMOOTH_HINT, GL_NICEST)\n\n glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)\n\n glHint(GL_POLYGON_SMOOTH_HINT, GL_FASTEST)\n\n glLoadIdentity()\n\n gluPerspective(45.0, float(width) / float(height), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n def MainLoop(self):\n\n glutMainLoop()\n\n\nif __name__ == '__main__':\n\n w = MyPyOpenGLTest()\n\n w.MainLoop()","sub_path":"Exp3/exp3_horses.py","file_name":"exp3_horses.py","file_ext":"py","file_size_in_byte":6984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"477107128","text":"#!/usr/bin/env python3\n# Program to create terminology terminal emulator themes from json files\n\nfrom jinja2 import Template\nimport json\nimport shutil\nimport subprocess\nimport os\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser(description='Create them from json file')\nparser.add_argument('path', metavar='json_file', type=str, help='path to \\\nthe json file containing the theme.')\nargs = parser.parse_args()\n\n# Import json theme. \noutput_location = os.environ['HOME'] + \"/.config/terminology/themes\"\njson_file_path = args.path\njson_file = os.path.basename(args.path)\ntheme = json.loads(open(json_file_path).read())\n\nif theme[\"name\"] == \"\":\n theme_name = json_file.rstrip('.json')\nelse:\n theme_name = theme[\"name\"]\n\noutput_file = output_location + \"/\" + theme_name + \".edj\"\nif os.path.isfile(output_file):\n print(\"The theme \" + theme_name + \" already exists.\")\n sys.exit(0)\n\ntmp_location = \"/tmp/\" + theme_name\n\nshutil.copytree(\"build_template\", tmp_location)\n\ncolors = theme[\"color\"]\nbackground = theme[\"background\"]\nforeground = theme[\"foreground\"]\n\n# Change color15 to match foreground for random gen themes\nif foreground != colors[15]:\n colors[15] = foreground\n\ncolor_template = Template(open(\"template_files/theme.edc.j2\").read())\noutput_theme = color_template.render(zero=colors[0],one=colors[1],two=colors[2],\\\n three=colors[3],four=colors[4],five=colors[5],six=colors[6],\\\n seven=colors[7],eight=colors[8],nine=colors[9],ten=colors[10],\\\n eleven=colors[11],twelve=colors[12],thirteen=colors[13],\\\n fourteen=colors[14],fifteen=colors[15],background=background,\\\n foreground=foreground,theme_name=theme_name)\nbuild_template = Template(open(\"template_files/build.sh.j2\").read())\noutput_build = build_template.render(theme_name=theme_name)\n\nbuild_file = open(tmp_location+\"/build.sh\", \"w\")\ncolor_file = open(tmp_location+\"/\"+theme_name+\".edc\", \"w\")\n\nbuild_file.write(output_build)\nbuild_file.close()\n\nos.chmod(tmp_location+\"/build.sh\", 0o700)\n\ncolor_file.write(output_theme)\ncolor_file.close()\n\nos.chdir(tmp_location)\nsubprocess.call([tmp_location+\"/build.sh\"])\n\nshutil.move(tmp_location+\"/\"+theme_name+\".edj\", output_location)\n\nshutil.rmtree(tmp_location)\n","sub_path":"terminology_themer.py","file_name":"terminology_themer.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"581661237","text":"from nutils import mesh, function, solver, util, export, cli, testing\nimport numpy as np, treelog\nfrom CoolProp.CoolProp import PropsSI\nimport scipy.special as sc\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import norm\nfrom matplotlib import collections, colors\nimport pandas as pd\n# import seaborn as sns\nimport matplotlib.pyplot as plt\nimport math\n\n#################### Doublet model library #########################\n#Objects\nclass Aquifer:\n\n def __init__(self, aquifer):\n\n #if stoichastic params not used\n self.H = aquifer['H']\n self.φ = aquifer['porosity']\n self.K = aquifer['K']\n self.Q = aquifer['Q'] # pumping rate from well (negative value = extraction)\n\n #deterministic\n self.dtop = aquifer['dtop'] # depth to top aquifer\n self.dsensor = aquifer['dsensor'] # depth to esp sensor\n self.dpump = aquifer['dpump'] # depth to pump location\n self.labda = aquifer['labda'] # geothermal gradient\n self.Tsur = aquifer['Tsurface']\n self.ρf = self.rhof = aquifer['rhof']\n self.rhos = aquifer['rhos']\n self.cpf = aquifer['cpf']\n self.cps = aquifer['cps'] # stone specific heat capacity (limestone) [J/kg K]\n self.labdas = aquifer['labdas'] # thermal conductivity solid [W/mK]\n self.labdaf = aquifer['labdaf'] # thermal conductivity fluid [W/mK]\n self.mu = aquifer['viscosity']\n self.pref = aquifer['pref'] # initial reservoir pressure [Pa]\n self.Tref = aquifer['Tref'] # initial reservoir temperature [K]\n self.rw = aquifer['rw'] # well radius [m]\n self.rmax = aquifer['rmax'] # well radius of influence [m]\n self.mdot = self.Q * aquifer['rhof']\n self.D = 2 * aquifer['rw']\n self.Aw = 2 * np.pi * aquifer['rw']\n self.g = 9.81\n self.L = aquifer['L'] # distance between injection well and production well\n self.Tinj = aquifer['Tinj'] # initial temperature of injection well (reinjection temperature)\n self.patm = aquifer['patm'] # atmospheric pressure\n self.ε = aquifer['ε'] # tubing roughness [m]\n self.ct = aquifer['ct']\n\n # total system (rock + fluid) variable\n self.ρ = self.φ * self.rhof + (1 - self.φ) * self.rhos\n self.cp = self.φ * self.cpf + (1 - self.φ) * self.cps\n self.λ = self.φ * self.labdaf + (1 - self.φ) * self.labdas\n# class Well:\n#\n# def __init__(self, well, aquifer):\n#\n# self.Q = well['Q'] # pumping rate from well (negative value = extraction)\n# self.mdot = self.Q * aquifer['rho_f']\n# self.D = 2 * aquifer['rw']\n# self.Aw = 2 * np.pi * aquifer['rw']\n\nclass DoubletGenerator:\n \"\"\"Generates all properties for a doublet\n\n Args:\n\n \"\"\"\n def __init__(self, aquifer, sol, params=None):\n\n # Initialize deterministic parameters\n self.aquifer = aquifer\n self.time = 365*24*60*60 #1 year [s]\n self.H = self.aquifer.H\n self.Q = self.aquifer.Q\n self.alpha = self.aquifer.labdas / ( self.aquifer.rhos * self.aquifer.cps) #thermal diffusion of rock\n self.gamma = 0.577216 #euler constant\n\n self.pnode9 = sol[0]\n self.Tnode9 = sol[1]\n self.Tinj = self.aquifer.Tinj * np.ones_like(self.Tnode9)\n\n # if params:\n # Stoichastic parameters with effect on well test\n # self.params = params\n # self.H = np.mean(params[0])\n # self.Q = np.mean(params[4])\n\n # Set lengths in system\n self.lpipe = self.z = self.aquifer.dsensor\n self.dpump = self.aquifer.dpump\n\n # Set specs\n self.effpump = 0.61 # Efficiency of pump [-]\n self.eta = 0.61 # Efficiency of heat exchanger [-]\n self.Ppump = 2.671e5/2 # Power of pump [W]\n\n # Evaluate objects within doublet\n self.T_aqinjector = self.Tinj\n self.T_aqproducer = self._get_Tz(self.lpipe)\n self.P_aqproducer = self._get_pgz(self.aquifer.patm, self.lpipe, self.T_aqproducer)\n self.P_aqinjector = self._get_pgz(self.aquifer.patm, self.lpipe, self.Tinj)\n self.ppump = self._get_ppump(self.Ppump, self.Q)\n\n # Evaluate Tnodes within doublet\n self.Tnode10 = self.T_aqproducer # Tref when based on depth of sensor\n self.Tnode8 = self.get_Tnode8(self.Tnode9)\n self.Tnode6 = self.Tnode7 = self.get_Tnode7(self.Tnode9)\n self.Tnode4 = self.Tnode5 = self.Tinj\n self.Tnode3 = self.get_Tnode3(self.Tnode4)\n self.Tnode2 = self.get_Twinj(self.z - self.dpump, self.Tinj)\n self.Tnode1 = self.T_aqproducer\n\n # Evaluate pnodes within doublet\n self.pnode10 = self.P_aqproducer # pref when based on depth\n self.pnode8 = self.get_pnode8(self.pnode9)\n self.pnode6 = self.pnode7 = self.get_pnode7(self.pnode8)\n self.pnode4 = self.pnode5 = self.pnode6\n self.pnode3 = self.get_pnode3(self.pnode4)\n self.pnode2 = self.get_pnode2(self.pnode3)\n self.pnode1 = self.P_aqinjector # pref when based on depth and injection temperature\n\n # Calculate power output system\n self.Phe = self.aquifer.mdot * self.aquifer.cpf * (self.Tnode6 - self.Tinj)\n\n def get_Tw(self, dz, Tw):\n Tw = Tw.copy()\n dl = 10 # pipe segment [m]\n zi = np.linspace(self.z, self.z - dz, dz/dl + 1)\n\n for i in range(len(zi)-1):\n Tw -= dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )\n return Tw\n\n def get_Twinj(self, dz, Tw):\n Tw = Tw.copy()\n dl = 10 # pipe segment [m]\n zi = np.linspace(0, dz, dz/dl + 1)\n\n for i in range(len(zi)-1):\n Tw += dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )\n\n return Tw\n\n def _getqw(self, Tw, zi):\n qw = 4 * math.pi * self.aquifer.labdas * ( Tw - self._get_Tz(zi) ) / math.log( ( 4 * self.alpha * self.time ) / (math.exp(self.gamma) * self.aquifer.rw**2 ) )\n\n return qw\n\n def get_Tnode8(self, Tnode9):\n Tnode8 = self.get_Tw(self.z - self.dpump, Tnode9)\n\n return Tnode8\n\n def get_Tnode7(self, Tnode9):\n Tnode7 = self.get_Tw(self.z, Tnode9)\n\n return Tnode7\n\n def get_Tnode3(self, Tnode4):\n Tnode3 = self.get_Twinj(self.dpump, Tnode4)\n\n return Tnode3\n\n def get_Tnode2(self, Tnode4):\n Tnode2 = self.get_Twinj(self.z, Tnode4)\n\n return Tnode2\n\n def get_pnode8(self, pnode9):\n pnode8 = pnode9 - self._get_pgz(0, (self.z - self.dpump), self.Tnode9) - self._get_pfriction(self.z - self.dpump)\n # print('loss of pressure by height', self._get_pgz(0, (self.z - self.dpump), self.Tnode9))\n # print('loss of pressure by friction', self._get_pfriction(self.z - self.dpump))\n\n return pnode8\n\n def get_pnode7(self, pnode8):\n pnode7 = pnode8 - self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) - self._get_pfriction(self.dpump) + self._get_ppump(self.Ppump, self.Q)\n\n return pnode7\n\n def get_pnode3(self, pnode4):\n pnode3 = pnode4 + self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) + self._get_pfriction(self.dpump) #+ self._get_ppump(self.Ppump, self.Q)\n\n return pnode3\n\n def get_pnode2(self, pnode3):\n pnode2 = pnode3 + self._get_pgz(0, (self.z - self.dpump), self.T_aqinjector) + self._get_pfriction(self.z - self.dpump)\n\n return pnode2\n\n def _get_ppump(self, Ppump, Q):\n ppump = Ppump / (Q * self.effpump) # appropiate value is 20e5 Pa\n # print('pump added pressure', ppump)\n\n return ppump\n\n def _get_pgz(self, patm, z, T):\n \"\"\" Computes pressure of the aquifer as a function of the depth, temperature and pressure\n\n Arguments:\n z (float): depth (downwards from groundlevel is positive)\n Returns:\n p (float): value of pressure\n \"\"\"\n pgz = patm + self.aquifer.g * self.aquifer.rhof * z # density as a constant\n # pgz = patm + self.aquifer.g * self.rho(np.mean(T)-273, pgz) * z # density as a function of temperature and pressure\n\n return pgz\n\n def _get_pfriction(self, z):\n pfriction = (self._get_f() * self.aquifer.rhof * self.get_vmean(self.Q) * z) / 2 * self.aquifer.D\n\n return pfriction\n\n def _get_f(self):\n f = ( 1.14 - 2 * math.log10( self.aquifer.ε / self.aquifer.D + 21.25 / ( self.get_Re( self.get_vmean(self.Q) )**0.9 ) ) )**-2\n\n return f\n\n def get_vmean(self, Q):\n vmean = 4 * Q / ( math.pi * ( self.aquifer.D ** 2 ) )\n\n return vmean\n\n def get_Re(self, vmean):\n Re = ( self.aquifer.rhof * vmean ) / self.aquifer.mu\n\n return Re\n\n # Theis solution, temperature and pressure as a function of depth\n # def _get_P_wb(self, P_aquifer, T_aquifer):\n # \"\"\" Computes pressure at wellbore\n #\n # Arguments:\n # d (float): depth (downwards from groundlevel is positive)\n # Returns:\n # P_wb (float): value of pressure at well bore\n # \"\"\"\n # if P_aquifer == self.P_aqproducer:\n # Q = -self.Q\n # else:\n # Q = self.Q\n #\n # P_wb = P_aquifer + ( ( Q * self.mu(T_aquifer, P_aquifer) ) / ( 2 * math.pi * self.aquifer.K * self.aquifer.H ) ) * np.log ( self.aquifer.L / self.aquifer.rw)\n # return P_wb\n\n def _get_Tz(self, z):\n \"\"\" Computes temperature of the aquifer as a function of the depth\n\n Arguments:\n z (float): depth (downwards from groundlevel is positive)\n Returns:\n T (float): value of temperature\n \"\"\"\n T = self.aquifer.Tsur + z * self.aquifer.labda\n return T\n\n # Thermophysical properties\n def rho(self, Twater, Pwater):\n # rho = (1 + 10e-6 * (-80 * T - 3.3 * T**2 + 0.00175 * T**3 + 489 * p - 2 * T * p + 0.016 * T**2 * p - 1.3e-5 * T**3\\\n # * p - 0.333 * p**2 - 0.002 * T * p**2) )\n rho = PropsSI('D', 'T', Twater, 'P', Pwater, 'IF97::Water')\n # rho = self.aquifer.rhof * (1 - 3.17e-4 * (Twater - 298.15) - 2.56e-6 * (Twater - 298.15) ** 2)\n\n return rho\n\n def mu(self, Twater, Pwater):\n # mu = 0.1 + 0.333 * saltcontent + (1.65 + 91.9 * saltcontent**3) * math.exp(-(0.42*(saltcontent**0.8 - 0.17)**2 + 0.045) * Twater**0.8)\n mu = PropsSI('V', 'T', Twater, 'P', Pwater, 'IF97::Water')\n\n return mu\n\n ## Graphical variables for GUI ##\n # self.Dx = self.aquifer.L * 3 # domain of x\n # self.Dy = - (2 * self.aquifer.dtop + self.aquifer.H) # domain of y\n # self.Nx = 24 # number of nodes by x\n # self.Ny = 10 # number of nodes by y\n # self.nNodes = self.Nx * self.Ny # total number of nodes\n # self.ne = (self.Nx - 1) * (self.Ny - 1)\n # self.dx = self.Dx / self.Nx # segment length of x\n # self.dy = self.Dy / self.Ny # segment length of y\n # self.domain = np.array([self.dx, self.dy])\n # self.x_grid, self.y_grid = self._make_grid()\n # self.x_well, self.y_well = self._construct_well()\n # self.nodes_grid = self._make_nodes_grid()\n # self.coordinate_grid = self._make_coordinates_grid()\n # self.P_grid = self._compute_P_grid()\n # self.T_grid = self._compute_T_grid()\n # def _get_gaussian_points\n # def _compute_T_grid(self):\n # T_grid = self._get_T(-self.y_grid)\n # # P_grid[self.Ny/2][self.Nx/3] = self.P_wellbore\n # # P_grid[5][16] = self.P_wellbore\n # # P_grid[4][16] = self.P_wellbore\n # T_grid[5][8] = self.Tinj\n # T_grid[4][8] = self.Tinj\n #\n # return T_grid\n\n # def _compute_P_grid(self):\n # P_grid = self._get_P(-self.y_grid)\n # # P_grid[self.Ny/2][self.Nx/3] = self.P_wellbore\n # P_grid[5][16] = self.P_wellbore\n # P_grid[4][16] = self.P_wellbore\n # P_grid[5][8] = self.P_wellbore\n # P_grid[4][8] = self.P_wellbore\n #\n # return P_grid\n\n # def _make_nodes_grid(self):\n # \"\"\" Compute a nodes grid for the doublet\n #\n # Returns:\n # x_grid_nodes, y_grid_nodes (np.array): arrays of the domain in x and y direction\n # \"\"\"\n # i = np.arange(0, self.Nx+1, 1)\n # j = np.arange(0, -self.Ny-1, -1)\n #\n # i_coords, j_coords = np.meshgrid(i, j)\n #\n # nodes_grid = np.array([i_coords, j_coords])\n #\n # return nodes_grid\n\n # def _make_coordinates_grid(self):\n # coordinates_grid = self.nodes_grid\n #\n # coordinates_grid[0,:,:] = self.nodes_grid[0,:,:] * self.domain[0]\n # coordinates_grid[1,:,:] = self.nodes_grid[1,:,:] * -self.domain[1]\n #\n # return coordinates_grid\n\n # def _make_grid(self):\n # \"\"\" Compute a cartesian grid for the doublet\n #\n # Returns:\n # domain (np.array): array of the domain in x and y direction\n # \"\"\"\n # x = np.linspace(0, self.aquifer.L * 3, self.Nx)\n # y = np.linspace(0,- (2 * self.aquifer.dtop + self.aquifer.H) , self.Ny)\n # x_grid, y_grid = np.meshgrid(x, y)\n #\n # return x_grid, y_grid\n\n # def _construct_well(self):\n # \"\"\" Compute two wells for the doublet\n #\n # Returns:\n # x_well, y_well (np.array): array of the x and y of the well\n # \"\"\"\n # # x = np.array([[self.aquifer.L * 5 - self.aquifer.L * 0.5], [self.aquifer.L * 5 + self.aquifer.L * 0.5]])\n # # y = np.linspace(0,- (self.aquifer.dtop + self.aquifer.H) , (20 * self.Ny) - 1)\n # x_well = np.array([[self.x_grid[0][math.floor(self.Nx/3)]], [self.x_grid[0][2*math.floor(self.Nx/3)]]])\n # y_well = self.y_grid[math.floor(self.Ny/2)][0] * np.ones(2)\n #\n # return x_well, y_well\n\n#Forward Analysis\ndef evaluateDoublet(doublet):\n print(\"\\r\\n############## Analytical values model ##############\\n\"\n \"m_dot: \", doublet.aquifer.mdot, \"Kg/s\\n\"\n \"ppump,p/i \", doublet.ppump/1e5, \"Bar\\n\"\n \"pnode10/p_aq,p: \", doublet.pnode10/1e5, \"Bar\\n\"\n \"pnode9/p_bh,p: \", doublet.pnode9/1e5, \"Bar\\n\"\n \"pnode8/p_pu,p: \", doublet.pnode8/1e5, \"Bar\\n\"\n \"pnode7/p_out,p: \", doublet.pnode7/1e5, \"Bar\\n\"\n \"pnode6/p_in,HE: \", doublet.pnode6/1e5, \"Bar\\n\"\n \"pnode5/p_out,HE: \", doublet.pnode5/1e5, \"Bar\\n\"\n \"pnode2/p_bh,i: \", doublet.pnode2/1e5, \"Bar\\n\"\n \"pnode1/p_aq,i: \", doublet.pnode1/1e5, \"Bar\\n\"\n \"Tnode9/T_bh,p: \", doublet.Tnode9-273, \"Celcius\\n\"\n \"Tnode8/T_pu,p: \", doublet.Tnode8-273, \"Celcius\\n\"\n \"Tnode7/T_in,HE: \", doublet.Tnode7-273, \"Celcius\\n\" \n \"Tnode6/T_in,HE: \", doublet.Tnode6-273, \"Celcius\\n\"\n \"Tnode5/T_out,HE: \", doublet.Tnode5-273, \"Celcius\\n\"\n \"Tnode4/T_in,i: \", doublet.Tnode4-273, \"Celcius\\n\"\n \"Tnode3/T_pu,i: \", doublet.Tnode3-273, \"Celcius\\n\"\n \"Tnode2/T_bh,i: \", doublet.Tnode2-273, \"Celcius\\n\" \n \"Power,HE: \", doublet.Phe/1e6, \"MW\")\n MPA = 1e6\n pnodelist = [doublet.pnode2 / MPA, doublet.pnode3 / MPA, doublet.pnode4 / MPA, doublet.pnode5 / MPA,\n doublet.pnode6 / MPA, doublet.pnode7 / MPA, doublet.pnode8 / MPA, doublet.pnode9 / MPA]\n Tnodelist = [doublet.Tnode2, doublet.Tnode3, doublet.Tnode4, doublet.Tnode5, doublet.Tnode6, doublet.Tnode7,\n doublet.Tnode8, doublet.Tnode9]\n return pnodelist, Tnodelist\n\n# ## Finite element thermo-hydraulic model\n#\n# def DoubletFlow(aquifer, well, doublet, k, porosity, timestep, endtime):\n#\n# # construct mesh\n# nelemsX = 10\n# nelemsY = 10\n# vertsX = np.linspace(0, well.L, nelemsX + 1)\n# vertsY = np.linspace(0, aquifer.H, nelemsY + 1)\n# vertsZ = np.linspace(0, aquifer.H, nelemsY + 1)\n# topo, geom = mesh.rectilinear([vertsX, vertsY])\n# # topo = topo.withboundary(inner='left', outer='right')\n#\n# bezier = topo.sample('bezier', 3)\n# points, vals = bezier.eval([geom, 0])\n#\n# # # plot\n# # plt.figure(figsize=(10, 10))\n# # cmap = colors.ListedColormap(\"limegreen\")\n# # plt.tripcolor(points[:, 0], points[:, 1], bezier.tri, vals, shading='gouraud', cmap=cmap)\n# # ax = plt.gca()\n# # ax.add_collection(collections.LineCollection(points[bezier.hull], colors='r', linewidth=2, alpha=1))\n#\n# # create namespace\n# ns = function.Namespace()\n# degree = 3\n# ns.pbasis = topo.basis('std', degree=degree)\n# ns.Tbasis = topo.basis('std', degree=degree - 1)\n# ns.p = 'pbasis_n ?lhsp_n'\n# ns.T = 'Tbasis_n ?lhsT_n'\n# ns.x = geom\n# ns.cf = aquifer.Cp_f\n# ns.g = aquifer.g\n# ns.g_i = '<0, -g>_i'\n# ns.uinf = 1, 0\n# ns.mdot = well.mdot\n# ns.r = well.r\n# ns.Awell = well.A_well\n# ns.nyy = 0, 1\n# ns.pout = doublet.P_aqproducer\n# ns.p0 = ns.pout\n# ns.Tatm = 20 + 273\n# ns.Tin = doublet.well.Tinj\n# ns.Tout = doublet.T_HE\n# ns.T0 = doublet.T_HE\n# ns.ρf = aquifer.rhof\n# ns.ρ = ns.ρf #* (1 - 3.17e-4 * (ns.T - 298.15) - 2.56e-6 * (ns.T - 298.15)**2) #no lhsT in lhsp\n# ns.lambdl = aquifer.labda_l #'thermal conductivity liquid [W/mK]'\n# ns.lambds = aquifer.labda_s #'thermal conductivity solid [W/mK]'\n# ns.qh = ns.lambds * aquifer.labda #heat source production rocks [W/m^2]\n# k_int_x = k #'intrinsic permeability [m2]'\n# k_int_y = k #'intrinsic permeability [m2]'\n# k_int= (k_int_x,k_int_y)\n# ns.k = (1/aquifer.mu)*np.diag(k_int)\n# ns.k1 = k\n# ns.u_i = '-k_ij (p_,j - (ρ g_1)_,j)' #darcy velocity\n# ns.ur = '-k1 (p_,i)' #darcy velocity, but now simple\n# ns.u0 = (ns.mdot / (ns.ρ * ns.Awell))\n# ns.qf = -ns.u0\n# ns.λ = porosity * ns.lambdl + (1 - porosity) * ns.lambds # heat conductivity λ [W/m/K]\n# ns.porosity = porosity\n# ns.w = math.sin()\n# ns.Ar = aquifer.H * ns.w\n#\n# # define initial condition for mass balance and darcy's law\n# sqr = topo.integral('(p - p0) (p - p0)' @ ns, degree=degree * 2) # set initial temperature to T=T0\n# pdofs0 = solver.optimize('lhsp', sqr)\n# statep0 = dict(lhsp=pdofs0)\n#\n# # define dirichlet constraints for hydraulic process\n# sqrp = topo.boundary['right'].integral('(p - pout) (p - pout) d:x' @ ns, degree=degree * 2) # set outflow condition to p=p_out\n# consp = solver.optimize('lhsp', sqrp, droptol=1e-15)\n# # consp = dict(lhsp=consp)\n#\n# # formulate hydraulic process single field\n# resp = topo.integral('(u_i porosity pbasis_n,i) d:x' @ ns, degree=degree*2) # formulation of velocity\n# resp -= topo.boundary['left'].integral('pbasis_n qf d:x' @ ns, degree=degree*2) # set inflow boundary to q=u0\n# resp += topo.boundary['top,bottom'].integral('(pbasis_n u_i n_i) d:x' @ ns, degree=degree*2) #neumann condition\n# pinertia = topo.integral('ρ pbasis_n,i u_i porosity d:x' @ ns, degree=degree*4)\n#\n# # solve for transient state of pressure\n# # lhsp = solver.solve_linear('lhsp', resp, constrain=consp)\n#\n# # introduce temperature dependent variables\n# ns.ρ = ns.ρf * (1 - 3.17e-4 * (ns.T - 298.15) - 2.56e-6 * (ns.T - 298.15)**2)\n# ns.lambdl = 4187.6 * (-922.47 + 2839.5 * (ns.T / ns.Tatm) - 1800.7 * (ns.T / ns.Tatm)**2 + 525.77*(ns.T / ns.Tatm)**3 - 73.44*(ns.T / ns.Tatm)**4)\n# # ns.cf = 3.3774 - 1.12665e-2 * ns.T + 1.34687e-5 * ns.T**2 # if temperature above T=100 [K]\n#\n# # define initial condition for thermo process\n# sqr = topo.integral('(T - T0) (T - T0)' @ ns, degree=degree * 2) # set initial temperature to T=T0\n# Tdofs0 = solver.optimize('lhsT', sqr)\n# stateT0 = dict(lhsT=Tdofs0)\n#\n# # define dirichlet constraints for thermo process\n# sqrT = topo.boundary['left'].integral('(T - Tin) (T - Tin) d:x' @ ns, degree=degree*2) # set temperature injection pipe to T=Tin\n# # sqrT = topo.boundary['left, bottom, top'].integral('(T - T0) (T - T0) d:x' @ ns, degree=degree*2) #set bottom temperature T=T0\n# consT = solver.optimize('lhsT', sqrT, droptol=1e-15)\n# consT = dict(lhsT=consT)\n#\n# # formulate thermo process\n# resT = topo.integral('(ρ cf Tbasis_n (u_k T)_,k ) d:x' @ ns, degree=degree*2) # formulation of convection of energy\n# resT -= topo.integral('Tbasis_n,i (- λ) T_,i d:x' @ ns, degree=degree*2) # formulation of conductive heat flux\n# resT -= topo.boundary['top,bottom'].integral('Tbasis_n qh d:x' @ ns, degree=degree*2) # heat flux on boundary\n# # resT -= topo.integral('Tbasis_n qh d:x' @ ns, degree=degree*2) # heat source/sink term within domain\n# Tinertia = topo.integral('ρ cf Tbasis_n T d:x' @ ns, degree=degree*4)\n#\n# def make_plots():\n# fig, ax = plt.subplots(2)\n#\n# ax[0].set(xlabel='X [m]', ylabel='Pressure [Bar]')\n# ax[0].set_ylim([min(p/1e5), doublet.P_aqproducer/1e5])\n# # ax[0].set_xlim([0, 1000])\n# print(\"wellbore pressure\", p[0])\n# print(\"pressure difference\", p[0] - doublet.P_aqproducer)\n# ax[0].plot(x[:, 0].take(bezier.tri.T, 0), (p/1e5).take(bezier.tri.T, 0))\n#\n# # ax[1].set(xlabel='X [m]', ylabel='Temperature [Celcius]')\n# # ax[1].plot(x[:,0].take(bezier.tri.T, 0), T.take(bezier.tri.T, 0)-273)\n#\n# fig, axs = plt.subplots(3, sharex=True, sharey=True)\n# fig.suptitle('2D Aquifer')\n#\n# plot0 = axs[0].tripcolor(x[:, 0], x[:, 1], bezier.tri, p / 1e5, vmin=min(p/1e5), vmax=doublet.P_aqproducer/1e5, shading='gouraud', rasterized=True)\n# fig.colorbar(plot0, ax=axs[0], label=\"Darcy p [Bar]\")\n#\n# plot1 = axs[1].tripcolor(x[:, 0], x[:, 1], bezier.tri, u[:, 0], vmin=0, vmax=0.05, shading='gouraud',\n# rasterized=True)\n# fig.colorbar(plot1, ax=axs[1], label=\"Darcy Ux [m/s]\")\n# plt.xlabel('x')\n# plt.ylabel('z')\n#\n# # plot2 = axs[2].tripcolor(x[:, 0], x[:, 1], bezier.tri, T-273, shading='gouraud', rasterized=True)\n# # fig.colorbar(plot2, ax=axs[2], label=\"T [C]\")\n#\n# plt.show()\n#\n# # Time dependent pressure development\n#\n# bezier = topo.sample('bezier', 5)\n# with treelog.iter.plain(\n# 'timestep', solver.impliciteuler(('lhsp'), residual=resp, inertia=pinertia,\n# arguments=statep0, timestep=timestep, constrain=consp,\n# newtontol=1e-2)) as steps:\n# #arguments=dict(lhsp=lhsp, lhsT=Tdofs0)\n#\n# for istep, lhsp in enumerate(steps):\n#\n# time = istep * timestep\n# # x, u, p, T = bezier.eval(['x_i', 'u_i', 'p', 'T'] @ ns, **state)\n# x, p, u = bezier.eval(['x_i', 'p', 'u_i'] @ ns, lhsp=lhsp)\n#\n# if time >= endtime:\n# print(len(x[:, 0]), len(p))\n#\n# make_plots()\n# break\n#\n# # Time dependent heat transport process\n# bezier = topo.sample('bezier', 5)\n# with treelog.iter.plain(\n# 'timestep', solver.impliciteuler(('lhsT'), residual=resT, inertia=Tinertia,\n# arguments=dict(lhsp=lhsp, lhsT=Tdofs0), timestep=timestep, constrain=consT,\n# newtontol=1e-2)) as steps:\n#\n# for istep, lhsT in enumerate(steps):\n#\n# time = istep * timestep\n# # x, u, p, T = bezier.eval(['x_i', 'u_i', 'p', 'T'] @ ns, **state)\n# x, p, u, T = bezier.eval(['x_i', 'p', 'u_i', 'T'] @ ns, lhsp=lhsp, lhsT=lhsT)\n#\n# if time >= endtime:\n# print(len(x[:,0]), len(T))\n#\n# make_plots()\n# break\n#\n# bar = 1e5\n# p_inlet = p[0]/bar\n# T_prod = T[-1]\n#\n# return p_inlet, T_prod\n#\n# # solve for steady state of temperature\n# # lhsT = solver.newton('lhsT', resT, constrain=consT, arguments=dict(lhsp=lhsp)).solve(tol=1e-2)\n#\n#\n# #################\n# # Postprocessing\n# #################\n#\n# # bezier = topo.sample('bezier', 5)\n# # # x, p, u = bezier.eval(['x_i', 'p', 'u_i'] @ ns, lhsp=lhsp)\n# # x, p, u, T = bezier.eval(['x_i', 'p', 'u_i', 'T'] @ ns, lhsp=lhsp, lhsT=lhsT)\n#\n# def add_value_to_plot():\n# for i, j in zip(x[:,0], x[:,1]):\n# for index in range(len(T)):\n# print(T[index], index)\n# # axs[2].annotate(T[index], xy=(i, j))\n#\n# # add_value_to_plot()\n# # fig, ax = plt.subplots(4)\n# # density = 'True'\n# #\n# # ax[0].plot(x1,frozen_lognorm.pdf(x1)*(max(x1)-min(x1)))\n# # # ax[0].hist(permeability, bins=bin_centers1, density=density, histtype='stepfilled', alpha=0.2)\n# # ax[0].set(xlabel='Permeability K [m/s]', ylabel='Probability')\n# # ax[0].axvline(x=2.2730989084434785e-08)\n# #\n# # ax[1].plot(x2, frozen_norm_por.pdf(x2)*(max(x2)-min(x2)))\n# # # ax[1].hist(porosity, bins=bin_centers2, density=density, histtype='stepfilled', alpha=0.2)\n# # ax[1].set(xlabel='Porosity [-]', ylabel='Probability')\n# # ax[1].axvline(x=0.163)\n# #\n# # ax[2].hist(p_inlet, density=density, bins=50, histtype='stepfilled', alpha=0.2)\n# # mu_p = np.mean(p_inlet)\n# # # print(mu_p)\n# # stddv_p = np.var(p_inlet)**0.5\n# # # print(stddv_p)\n# # frozen_norm_p = stats.norm(loc=mu_p, scale=stddv_p)\n# # x3 = np.linspace(mu_p-3*stddv_p, mu_p+3*stddv_p, 10)\n# # # print(frozen_norm_p.pdf(x3))\n# # # ax[2].plot(x3,frozen_lognorm_p.pdf(x3))\n# # ax[2].plot(x3,frozen_norm_p.pdf(x3))\n# # # ax[2].xaxis.set_major_locator(MaxNLocator(integer=True))\n# # ax[2].get_xaxis().get_major_formatter().set_useOffset(False)\n# # ax[2].set(xlabel='Injector Pressure [Bar]', ylabel='Probability')\n# # # plt.xlabel('Inlet Pressure [Bar]')\n# # # plt.ylabel('Probability')\n# #\n# # ax[3].hist(T_prod, density=density, bins=50, histtype='stepfilled', alpha=0.2)\n# # mu_T = np.mean(T_prod)\n# # stddv_T = np.var(T_prod)**0.5\n# # frozen_norm_T = stats.norm(loc=mu_T, scale=stddv_T)\n# # x4 = np.linspace(mu_T-3*stddv_T, mu_T+3*stddv_T, 10)\n# # # print(frozen_norm_p.pdf(x4))\n# # ax[3].plot(x4,frozen_norm_T.pdf(x4))\n# # ax[3].set(xlabel='Producer Temperature [Celcius]', ylabel='Probability')\n# #\n# # # print(ns.u0.eval())\n# # # print(\"velocity horizontal\", (u[:,0]))\n# # # print((p[0]))\n# # plt.subplots_adjust(hspace=1)\n# # # plt.show()\n# #\n# # Confidence_mu = 0.95\n# # N_min = (norm.ppf((1 + Confidence_mu)/2) / (1 - Confidence_mu))**2 * (stddv_p / mu_p)**2\n# # print(\"Cdf\", norm.ppf((1 + Confidence_mu)/2))\n# # print(\"N_min\", N_min)\n#\n# # fig1, ax1 = plt.subplots(2)\n#\n# # import numpy as np\n# # from scipy import stats\n#\n# # sns.set(color_codes=True)\n#\n# # x = np.random.normal(size=100)\n# # sns.distplot(x);\n# #\n# # mean, cov = [0, 1], [(1, .5), (.5, 1)]\n# # data = np.random.multivariate_normal(mean, cov, 200)\n# # df = pd.DataFrame(data, columns=[\"x1\", \"x2\"])\n# # sns.jointplot(x=\"x1\", y=\"x2\", data=df);\n#\n# # f, ax = plt.subplots(figsize=(6, 6))\n# # sns.kdeplot(x1, x2, ax=ax)\n# # sns.rugplot(x1, color=\"g\", ax=ax)\n# # sns.rugplot(x2, vertical=True, ax=ax);\n#\n# # fig1.suptitle('2D Probability plot')\n# # triang = tri.Triangulation(x1, x2)\n#\n# # plot1 = ax1[0].tripcolor(x1, x2, triang, frozen_lognorm.pdf(x1)+frozen_norm_por.pdf(x2), shading='gouraud', rasterized=True)\n# # fig1.colorbar(plot1, ax=ax1[0], label=\"Probability [x]\")\n#\n# # Z = frozen_lognorm.pdf(x1)*frozen_norm_por.pdf(x2)\n# # print(\"permeability\", len(x1))\n# # print(\"porosity\", len(x2))\n# # print(\"dit is Z\", len(Z))\n# # fig1, ax1 = plt.subplots()\n# # CS = ax1.contour(x1, x2, Z)\n# # ax1.clabel(CS, inline=1, fontsize=10)\n# # # ax1.set_title('Simplest default with labels')\n# #\n# # plt.show()\n\n\n","sub_path":"files/myFUQlib.py","file_name":"myFUQlib.py","file_ext":"py","file_size_in_byte":28024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"447076087","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField\nfrom wtforms.validators import DataRequired, Email, Length\n\n\nclass ContactForm(FlaskForm):\n fname = StringField('First Name', validators=[DataRequired(), Length(min=-1, max=80, message='You cannot have more than 80 characters')])\n lname = StringField('Last Name', validators=[Length(min=-1, max=100, message='You cannot have more than 100 characters')])\n email = StringField('E-Mail', validators=[Email(), Length(min=-1, max=200, message='You cannot have more than 200 characters')])\n phone = StringField('Phone', validators=[Length(min=-1, max=20, message='You cannot have more than 20 characters')])\n title = StringField('Title', validators=[Length(min=-1, max=50, message='You cannot have more than 50 characters')])\n company = StringField('Company', validators=[Length(min=-1, max=100, message='You cannot have more than 100 characters')])","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"549406409","text":"\n#uses Tkinter a GUI for Python https://wiki.python.org/moin/TkInter\n\nfrom tkinter import *\n\n\n\ndef closeApp():\n app.quit()\n\n\ndef calculateLabel(*args):\n loan = float(loanAmount.get())\n yearlyinterest = float(loanInterest.get())\n interest = (yearlyinterest/100)/12\n term = int(loanTerm.get())\n total = loan * (interest/(1-(1+interest) ** (-term)))\n #loanAmount.delete(0,END)\n #loanInterest.delete(0,END) \n labelText.set(\"$\" + str(round(total,2)))\n\napp = Tk()\napp.title(\"Loan Calculator\")\napp.geometry('500x200+200+200')\napp.bind(\"\",calculateLabel)\n\nmenubar = Menu(app)\nfilemenu = Menu(menubar, tearoff=0)\nfilemenu.add_command(label=\"Quit\",command=app.quit)\nmenubar.add_cascade(label=\"File\",menu=filemenu)\n\nlabel2 = Label(app, text='Loan Amount')\nlabel2.grid(row=1)\n\nloanAmount = IntVar()\nloanAmount.set(\"\")\nloanAmount = Entry(app, textvariable=loanAmount)\nloanAmount.focus_set()\nloanAmount.grid(row=1, column=1)\n\n\nlabel3 = Label(app, text='Interest Rate')\nlabel3.grid(row=2)\n\nloanInterest = IntVar()\nloanInterest.set(\"\")\nloanInterest = Entry(app, textvariable=loanAmount)\nloanInterest.grid(row=2, column=1)\n\nlabel4 = Label(app, text='Term in Months')\nlabel4.grid(row=3)\n\nloanTerm = IntVar()\nloanTerm.set(\"\")\nloanTerm = Entry(app, textvariable=loanTerm)\nloanTerm.grid(row=3, column=1)\n\npaymentLabel = Label(app, text='Your monthly payment will be ')\npaymentLabel.grid(row=8, column=0)\n\nlabelText = StringVar()\nlabelText.set(\"Payment\")\nlabel1 = Label(app, textvariable=labelText, height=4)\nlabel1.grid(row=8, column=1)\n\n\ncalculateButton = Button(app, text=\"Click to calculate\", width=20, command=calculateLabel)\ncalculateButton.grid(row=10,column=2)\n\ncloseButton = Button(app, text=\"Close\", width=20, command=closeApp)\ncloseButton.grid(row=10,column=1)\n\napp.config(menu=menubar)\n\napp.mainloop()","sub_path":"backend/example functions/loan calculations/CalculateLoanPayment.py","file_name":"CalculateLoanPayment.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"213758322","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\ndef modelo(dr,n): \n \n \n \n pi=math.pi\n rho_c=5.9*10**17 #densidad central\n m_n= 1.68*10**(-27) #masa neutron\n h=6.62*10**-34 #constante de planck\n c=3*10**8 #velocidad luz\n G=6.67*10**(-11) #constante universal\n p_f=((3/(8*pi))*(rho_c/m_n))**(1.0/3) * h #momento de fermi\n x=p_f/(m_n*c) #x\n \n l_P = [] \n l_rho = [] \n l_m = [] \n l_r = [] \n \n rho = 5.9*10**17 \n \n nn=float(n)\n k=((8*pi*h**2)/(15*m_n))*((3)/(8*pi*m_n*c**2))**(5.0/3)\n P = k*rho**((nn+1)/nn) \n \n m=0 \n dm=0 \n dP=0 \n r=0 \n \n while P> 0:\n \n cons=4*pi*rho*(r**2) \n dm= cons*dr \n m=m + dm\n l_m.append(m)\n print(r)\n \n \n if r>=0.001: \n hidro = -(G*m*rho)/(r**2) \n \n if r < 0.001:\n hidro = -r\n \n \n dP = dr*hidro\n P = P + dP \n print(rho)\n \n if P>=0:\n \n l_P.append(P)\n rho = (P/k)**(1/((nn+1)/nn)) \n l_rho.append(rho) \n r = r + dr \n l_r.append(r) \n \n \n if P < 0:\n \n l_P.append(0) \n l_r.append(r)\n l_rho.append(0)\n \n \n for i in range (0,len(l_r)): \n a=l_r[i]\n \n if a>l_r[i-1]:\n maxr=l_r[i]\n \n \n \n return np.array(l_r) , np.array(l_m) \n\n\n\n\nr_32,rho_32 = modelo(1,5/3) \n#r_43,rho_43 = modelo(0.001,1.33333)\n#r_53,rho_53 = modelo(0.001,1.66667)\n#r_340,rho_340 = modelo(0.001,3.4)\n\n\nr_s, rho_s = np.loadtxt(\"sol.dat\",usecols=(0,3),unpack=True)\n\n\nfor i in range (0,len(rho_s)): \n a=rho_s[i] \n if a>rho_s[i-1]:\n maxrho=rho_s[i]\n \n#plt.plot(r_s,rho_s/maxrho,\"y\",label=\"sol\")\nplt.plot(r_32,rho_32,\"r\",label=\"n=3/2\")\n#plt.plot(r_43,rho_43,\"b\",label=\"n=4/3\")\n#plt.plot(r_53,rho_53,\"g\",label=\"n=5/3\")\n#plt.plot(r_340,rho_340,\"m\",label=\"n=3.4\")\n\n\nplt.legend()\nplt.ylabel(r\"$\\rho / \\rho_c$\")\nplt.xlabel(r\"$r / r_o$\")\nplt.title(\"$\\Delta$=0.001\")\n\nplt.show()\n","sub_path":"Estelar/6/ayuditaparaelproblemadelapolitropatarea/Politropas-1(2) (copy).py","file_name":"Politropas-1(2) (copy).py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"130870536","text":"import arcade\nimport imgui\nimport imgui.core\n\nfrom arcade_imgui import ArcadeRenderer\n\n\nclass MyGui:\n def __init__(self, window):\n self.window = window\n # Must create or set the context before instantiating the renderer\n imgui.create_context()\n self.renderer = ArcadeRenderer(window)\n self.values = 88.0, 42.0\n\n\n def draw(self):\n imgui.new_frame()\n\n imgui.set_next_window_position(16, 32, imgui.ONCE)\n imgui.set_next_window_size(512, 512, imgui.ONCE)\n\n imgui.begin(\"Example: drag float\")\n changed, self.values = imgui.drag_float2(\n \"Default\", *self.values\n )\n changed, self.values = imgui.drag_float2(\n \"Less precise\", *self.values, format=\"%.1f\"\n )\n imgui.text(\"Changed: %s, Values: %s\" % (changed, self.values))\n imgui.end()\n\n imgui.end_frame()\n\n imgui.render()\n\n self.renderer.render(imgui.get_draw_data())\n\n\nclass App(arcade.Window):\n def __init__(self):\n super().__init__(800, 600, \"Drag Float 2 Example\", resizable=True)\n self.gui = MyGui(self)\n\n def on_draw(self):\n arcade.start_render()\n self.gui.draw()\n\n\napp = App()\narcade.run()\n","sub_path":"imdemo/examples/dragfloat2.py","file_name":"dragfloat2.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"122772536","text":"from src.Hand_Pose import HandPose\nimport tensorflow as tf\nimport matplotlib\nmatplotlib.use('Agg') # plot to file\nimport matplotlib.pyplot as plt\nimport pickle\n\nTRAIN_BATCH = 0\nTEST_BATCH = 1\nMODEL_PATH = './train/train.ckpt'\n\ndef train():\n epochs = 250\n batchSize = 128\n weightDecay = 0.001\n learning_rate = 1e-3\n\n handPose = HandPose('../data/ICVL', weight_decay=weightDecay, batchSize=batchSize)\n # 加载训练数据\n handPose.loadData()\n handPose.create_embedding()\n\n shape = [handPose.batchSize]\n for i in range(1, len(handPose.train_data.shape)):\n shape.append(handPose.train_data.shape[i])\n print(shape)\n # 用于放置每一个batch的数据\n images = tf.placeholder(tf.float32, shape)\n y = handPose.predict(images)\n # 用于放置每一个batch的标记\n label = tf.placeholder(tf.float32, shape=[handPose.batchSize, 30])\n loss = handPose.loss(y, label)\n\n # 优化器使用RMSPropOptimizer,需要了解一下。。。\n train_step = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.9, epsilon=1e-2).minimize(loss)\n\n # 觉得要配置一下GPU显存的的使用量\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(config=config)\n\n #用于记录所有的损失\n costs = []\n\n ##################################################\n # 开始训练:\n\n # 保存器\n saver = tf.train.Saver()\n last_step = -1\n\n if tf.gfile.Exists('./train/train.ckpt.meta'):\n print('已有模型,继续训练')\n f = open('./train/last_data.pkl', 'rb')\n # 加载上一次训练步数\n last_step, costs = pickle.load(f)\n f.close()\n saver.restore(sess, MODEL_PATH)\n else:\n print('未找到模型,开始训练')\n sess.run(tf.global_variables_initializer())\n\n current_step = last_step + 1\n\n # 如果没有找到训��的结果,就重新训练\n for i in range(last_step + 1, epochs):\n print(\"epochs:{}\".format(i))\n current_step = i\n batch = handPose.getNextBatch(TRAIN_BATCH)\n while batch:\n train_images_batch = batch[0]\n train_gt3D_batch = batch[1]\n sess.run(train_step, feed_dict={images: train_images_batch, label: train_gt3D_batch})\n if (handPose.batch_index % 30 == 0):\n cost = loss.eval(feed_dict={images: train_images_batch, label: train_gt3D_batch}, session=sess)\n costs.append(cost)\n # if i== epochs-1:\n # predict_embed = y.eval(feed_dict={images: train_images_batch}, session=sess)\n # np.savetxt('train'+str(i)+','+str(handPose.batch_index)+'.csv', predict_embed, delimiter=',')\n print(\"epochs:{}, batches:{}, cost:{}\".format(i, handPose.batch_index, cost))\n batch = handPose.getNextBatch(TRAIN_BATCH)\n\n # 保存训练次数以及所有的损失\n f = open('./train/last_data.pkl', 'wb')\n pickle.dump((current_step, costs), f)\n f.close()\n # 保存训练好的模型\n saver.save(sess=sess,save_path=MODEL_PATH)\n\n # 画出损失函数变化图像\n fig = plt.figure()\n plt.semilogy(costs)\n plt.show(block=False)\n fig.savefig('./costs.png')\n\ndef temp():\n f = open('./train/last_data.pkl', 'rb')\n # 加载上一次训练步数\n last_step, costs = pickle.load(f)\n last_step = 149\n f.close()\n\n f = open('./train/last_data.pkl', 'wb')\n pickle.dump((last_step, costs), f)\n f.close()\n\n\nif __name__ =='__main__':\n train()\n # test()\n","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"138326324","text":"class Triangle:\n def __init__(self, raw_data):\n self.raw_data = raw_data.replace(' ', '').split(',')\n self.name = self.raw_data[0]\n\n def __str__(self):\n return '[Triangle {0}]: {1} cm'.format(self.name, self.calculate_square())\n\n def _set_triangle_sides(self):\n try:\n self.a = float(self.raw_data[1])\n self.b = float(self.raw_data[2])\n self.c = float(self.raw_data[3])\n except ValueError:\n raise ValueError('Invalid type')\n\n def calculate_square(self):\n self._set_triangle_sides()\n p = (self.a + self.b + self.c) / 2\n square = (p * (p - self.a) * (p - self.b) * (p - self.c)) ** 0.5\n return square\n\n\nif __name__ == '__main__':\n triangles = []\n while True:\n raw_data = input('Enter: , , , : ')\n try:\n Triangle(raw_data).calculate_square()\n triangles.append(Triangle(raw_data))\n except ValueError:\n print('Wrong data')\n\n is_break = True if input('\\nContinue? [y/n]: ') == 'n' else False\n if is_break:\n triangles.sort(key=lambda c: c.calculate_square(), reverse=True)\n print(f'{\"Triangles\":=^40}')\n for i in triangles:\n print(i)\n break\n","sub_path":"elementary_task/Task3/triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"548606644","text":"import itertools\nimport types\nfrom numbers import Number\nfrom itertools import groupby\nfrom functools import partial\nfrom contextlib import contextmanager\nfrom inspect import ArgSpec\n\nimport numpy as np\nimport param\n\nfrom . import traversal, util\nfrom .dimension import OrderedDict, Dimension, ViewableElement, redim\nfrom .layout import Layout, AdjointLayout, NdLayout\nfrom .ndmapping import UniformNdMapping, NdMapping, item_check\nfrom .overlay import Overlay, CompositeOverlay, NdOverlay, Overlayable\nfrom .options import Store, StoreOptions\nfrom ..streams import Stream\n\n\n\nclass HoloMap(UniformNdMapping, Overlayable):\n \"\"\"\n A HoloMap can hold any number of DataLayers indexed by a list of\n dimension values. It also has a number of properties, which can find\n the x- and y-dimension limits and labels.\n \"\"\"\n\n data_type = (ViewableElement, NdMapping, Layout)\n\n def overlay(self, dimensions=None, **kwargs):\n \"\"\"\n Splits the UniformNdMapping along a specified number of dimensions and\n overlays items in the split out Maps.\n\n Shows all HoloMap data When no dimensions are specified.\n \"\"\"\n dimensions = self._valid_dimensions(dimensions)\n if len(dimensions) == self.ndims:\n with item_check(False):\n return NdOverlay(self, **kwargs).reindex(dimensions)\n else:\n dims = [d for d in self.kdims if d not in dimensions]\n return self.groupby(dims, group_type=NdOverlay, **kwargs)\n\n\n def grid(self, dimensions=None, **kwargs):\n \"\"\"\n GridSpace takes a list of one or two dimensions, and lays out the containing\n Views along these axes in a GridSpace.\n\n Shows all HoloMap data When no dimensions are specified.\n \"\"\"\n dimensions = self._valid_dimensions(dimensions)\n if len(dimensions) == self.ndims:\n with item_check(False):\n return GridSpace(self, **kwargs).reindex(dimensions)\n return self.groupby(dimensions, container_type=GridSpace, **kwargs)\n\n\n def layout(self, dimensions=None, **kwargs):\n \"\"\"\n GridSpace takes a list of one or two dimensions, and lays out the containing\n Views along these axes in a GridSpace.\n\n Shows all HoloMap data When no dimensions are specified.\n \"\"\"\n dimensions = self._valid_dimensions(dimensions)\n if len(dimensions) == self.ndims:\n with item_check(False):\n return NdLayout(self, **kwargs).reindex(dimensions)\n return self.groupby(dimensions, container_type=NdLayout, **kwargs)\n\n\n def split_overlays(self):\n \"\"\"\n Given a UniformNdMapping of Overlays of N layers, split out the layers into\n N separate Maps.\n \"\"\"\n if not issubclass(self.type, CompositeOverlay):\n return None, self.clone()\n\n item_maps = OrderedDict()\n for k, overlay in self.data.items():\n for key, el in overlay.items():\n if key not in item_maps:\n item_maps[key] = [(k, el)]\n else:\n item_maps[key].append((k, el))\n\n maps, keys = [], []\n for k, layermap in item_maps.items():\n maps.append(self.clone(layermap))\n keys.append(k)\n return keys, maps\n\n\n def _dimension_keys(self):\n \"\"\"\n Helper for __mul__ that returns the list of keys together with\n the dimension labels.\n \"\"\"\n return [tuple(zip([d.name for d in self.kdims], [k] if self.ndims == 1 else k))\n for k in self.keys()]\n\n\n def _dynamic_mul(self, dimensions, other, keys):\n \"\"\"\n Implements dynamic version of overlaying operation overlaying\n DynamicMaps and HoloMaps where the key dimensions of one is\n a strict superset of the other.\n \"\"\"\n # If either is a HoloMap compute Dimension values\n if not isinstance(self, DynamicMap) or not isinstance(other, DynamicMap):\n keys = sorted((d, v) for k in keys for d, v in k)\n grouped = dict([(g, [v for _, v in group])\n for g, group in groupby(keys, lambda x: x[0])])\n dimensions = [d(values=grouped[d.name]) for d in dimensions]\n map_obj = None\n\n # Combine streams\n map_obj = self if isinstance(self, DynamicMap) else other\n if isinstance(self, DynamicMap) and isinstance(other, DynamicMap):\n self_streams = util.dimensioned_streams(self)\n other_streams = util.dimensioned_streams(other)\n streams = list(util.unique_iterator(self_streams+other_streams))\n else:\n streams = map_obj.streams\n\n def dynamic_mul(*key, **kwargs):\n key_map = {d.name: k for d, k in zip(dimensions, key)}\n layers = []\n try:\n self_el = self.select(HoloMap, **key_map) if self.kdims else self[()]\n layers.append(self_el)\n except KeyError:\n pass\n try:\n other_el = other.select(HoloMap, **key_map) if other.kdims else other[()]\n layers.append(other_el)\n except KeyError:\n pass\n return Overlay(layers)\n callback = Callable(dynamic_mul, inputs=[self, other])\n callback._is_overlay = True\n if map_obj:\n return map_obj.clone(callback=callback, shared_data=False,\n kdims=dimensions, streams=streams)\n else:\n return DynamicMap(callback=callback, kdims=dimensions,\n streams=streams)\n\n\n def __mul__(self, other):\n \"\"\"\n The mul (*) operator implements overlaying of different Views.\n This method tries to intelligently overlay Maps with differing\n keys. If the UniformNdMapping is mulled with a simple\n ViewableElement each element in the UniformNdMapping is\n overlaid with the ViewableElement. If the element the\n UniformNdMapping is mulled with is another UniformNdMapping it\n will try to match up the dimensions, making sure that items\n with completely different dimensions aren't overlaid.\n \"\"\"\n if isinstance(other, HoloMap):\n self_set = {d.name for d in self.kdims}\n other_set = {d.name for d in other.kdims}\n\n # Determine which is the subset, to generate list of keys and\n # dimension labels for the new view\n self_in_other = self_set.issubset(other_set)\n other_in_self = other_set.issubset(self_set)\n dims = [other.kdims, self.kdims] if self_in_other else [self.kdims, other.kdims]\n dimensions = util.merge_dimensions(dims)\n\n if self_in_other and other_in_self: # superset of each other\n keys = self._dimension_keys() + other._dimension_keys()\n super_keys = util.unique_iterator(keys)\n elif self_in_other: # self is superset\n dimensions = other.kdims\n super_keys = other._dimension_keys()\n elif other_in_self: # self is superset\n super_keys = self._dimension_keys()\n else: # neither is superset\n raise Exception('One set of keys needs to be a strict subset of the other.')\n\n if isinstance(self, DynamicMap) or isinstance(other, DynamicMap):\n return self._dynamic_mul(dimensions, other, super_keys)\n\n items = []\n for dim_keys in super_keys:\n # Generate keys for both subset and superset and sort them by the dimension index.\n self_key = tuple(k for p, k in sorted(\n [(self.get_dimension_index(dim), v) for dim, v in dim_keys\n if dim in self.kdims]))\n other_key = tuple(k for p, k in sorted(\n [(other.get_dimension_index(dim), v) for dim, v in dim_keys\n if dim in other.kdims]))\n new_key = self_key if other_in_self else other_key\n # Append SheetOverlay of combined items\n if (self_key in self) and (other_key in other):\n items.append((new_key, self[self_key] * other[other_key]))\n elif self_key in self:\n items.append((new_key, Overlay([self[self_key]])))\n else:\n items.append((new_key, Overlay([other[other_key]])))\n return self.clone(items, kdims=dimensions, label=self._label, group=self._group)\n elif isinstance(other, self.data_type):\n if isinstance(self, DynamicMap):\n def dynamic_mul(*args, **kwargs):\n element = self[args]\n return element * other\n callback = Callable(dynamic_mul, inputs=[self, other])\n callback._is_overlay = True\n return self.clone(shared_data=False, callback=callback,\n streams=[])\n items = [(k, v * other) for (k, v) in self.data.items()]\n return self.clone(items, label=self._label, group=self._group)\n else:\n return NotImplemented\n\n\n def __add__(self, obj):\n return Layout.from_values([self, obj])\n\n\n def __lshift__(self, other):\n if isinstance(other, (ViewableElement, UniformNdMapping)):\n return AdjointLayout([self, other])\n elif isinstance(other, AdjointLayout):\n return AdjointLayout(other.data+[self])\n else:\n raise TypeError('Cannot append {0} to a AdjointLayout'.format(type(other).__name__))\n\n\n def collate(self, merge_type=None, drop=[], drop_constant=False):\n \"\"\"\n Collation allows collapsing nested HoloMaps by merging\n their dimensions. In the simple case a HoloMap containing\n other HoloMaps can easily be joined in this way. However\n collation is particularly useful when the objects being\n joined are deeply nested, e.g. you want to join multiple\n Layouts recorded at different times, collation will return\n one Layout containing HoloMaps indexed by Time. Changing\n the merge_type will allow merging the outer Dimension\n into any other UniformNdMapping type.\n\n Specific dimensions may be dropped if they are redundant\n by supplying them in a list. Enabling drop_constant allows\n ignoring any non-varying dimensions during collation.\n \"\"\"\n from .element import Collator\n merge_type=merge_type if merge_type else self.__class__\n return Collator(self, merge_type=merge_type, drop=drop,\n drop_constant=drop_constant)()\n\n\n def collapse(self, dimensions=None, function=None, spreadfn=None, **kwargs):\n \"\"\"\n Allows collapsing one of any number of key dimensions\n on the HoloMap. Homogeneous Elements may be collapsed by\n supplying a function, inhomogeneous elements are merged.\n \"\"\"\n if not dimensions:\n dimensions = self.kdims\n if not isinstance(dimensions, list): dimensions = [dimensions]\n if self.ndims > 1 and len(dimensions) != self.ndims:\n groups = self.groupby([dim for dim in self.kdims\n if dim not in dimensions])\n elif all(d in self.kdims for d in dimensions):\n groups = HoloMap([(0, self)])\n else:\n raise KeyError(\"Supplied dimensions not found.\")\n\n collapsed = groups.clone(shared_data=False)\n for key, group in groups.items():\n group_data = [el.data for el in group]\n args = (group_data, function, group.last.kdims)\n if hasattr(group.last, 'interface'):\n col_data = group.type(group.table().aggregate(group.last.kdims, function, spreadfn, **kwargs))\n\n else:\n data = group.type.collapse_data(*args, **kwargs)\n col_data = group.last.clone(data)\n collapsed[key] = col_data\n return collapsed if self.ndims > 1 else collapsed.last\n\n\n def sample(self, samples=[], bounds=None, **sample_values):\n \"\"\"\n Sample each Element in the UniformNdMapping by passing either a list of\n samples or a tuple specifying the number of regularly spaced\n samples per dimension. Alternatively, a single sample may be\n requested using dimension-value pairs. Optionally, the bounds\n argument can be used to specify the bounding extent from which\n the coordinates are to regularly sampled. Regular sampling\n assumes homogeneous and regularly sampled data.\n\n For 1D sampling, the shape is simply as the desired number of\n samples (and not a tuple). The bounds format for 1D sampling\n is the tuple (lower, upper) and the tuple (left, bottom,\n right, top) for 2D sampling.\n \"\"\"\n dims = self.last.ndims\n if isinstance(samples, tuple) or np.isscalar(samples):\n if dims == 1:\n xlim = self.last.range(0)\n lower, upper = (xlim[0], xlim[1]) if bounds is None else bounds\n edges = np.linspace(lower, upper, samples+1)\n linsamples = [(l+u)/2.0 for l,u in zip(edges[:-1], edges[1:])]\n elif dims == 2:\n (rows, cols) = samples\n if bounds:\n (l,b,r,t) = bounds\n else:\n l, r = self.last.range(0)\n b, t = self.last.range(1)\n\n xedges = np.linspace(l, r, cols+1)\n yedges = np.linspace(b, t, rows+1)\n xsamples = [(lx+ux)/2.0 for lx,ux in zip(xedges[:-1], xedges[1:])]\n ysamples = [(ly+uy)/2.0 for ly,uy in zip(yedges[:-1], yedges[1:])]\n\n Y,X = np.meshgrid(ysamples, xsamples)\n linsamples = list(zip(X.flat, Y.flat))\n else:\n raise NotImplementedError(\"Regular sampling not implemented \"\n \"for high-dimensional Views.\")\n\n samples = list(util.unique_iterator(self.last.closest(linsamples)))\n\n sampled = self.clone([(k, view.sample(samples, closest=False,\n **sample_values))\n for k, view in self.data.items()])\n return sampled.table()\n\n\n def reduce(self, dimensions=None, function=None, **reduce_map):\n \"\"\"\n Reduce each Element in the HoloMap using a function supplied\n via the kwargs, where the keyword has to match a particular\n dimension in the Elements.\n \"\"\"\n from ..element import Table\n reduced_items = [(k, v.reduce(dimensions, function, **reduce_map))\n for k, v in self.items()]\n if not isinstance(reduced_items[0][1], Table):\n params = dict(util.get_param_values(self.last),\n kdims=self.kdims, vdims=self.last.vdims)\n return Table(reduced_items, **params)\n return self.clone(reduced_items).table()\n\n\n def relabel(self, label=None, group=None, depth=1):\n # Identical to standard relabel method except for default depth of 1\n return super(HoloMap, self).relabel(label=label, group=group, depth=depth)\n\n\n def hist(self, num_bins=20, bin_range=None, adjoin=True, individually=True, **kwargs):\n histmaps = [self.clone(shared_data=False) for _ in\n kwargs.get('dimension', range(1))]\n\n if individually:\n map_range = None\n else:\n if 'dimension' not in kwargs:\n raise Exception(\"Please supply the dimension to compute a histogram for.\")\n map_range = self.range(kwargs['dimension'])\n bin_range = map_range if bin_range is None else bin_range\n style_prefix = 'Custom[<' + self.name + '>]_'\n if issubclass(self.type, (NdOverlay, Overlay)) and 'index' not in kwargs:\n kwargs['index'] = 0\n for k, v in self.data.items():\n hists = v.hist(adjoin=False, bin_range=bin_range,\n individually=individually, num_bins=num_bins,\n style_prefix=style_prefix, **kwargs)\n if isinstance(hists, Layout):\n for i, hist in enumerate(hists):\n histmaps[i][k] = hist\n else:\n histmaps[0][k] = hists\n\n if adjoin:\n layout = self\n for hist in histmaps:\n layout = (layout << hist)\n if issubclass(self.type, (NdOverlay, Overlay)):\n layout.main_layer = kwargs['index']\n return layout\n else:\n if len(histmaps) > 1:\n return Layout.from_values(histmaps)\n else:\n return histmaps[0]\n\n\nclass Callable(param.Parameterized):\n \"\"\"\n Callable allows wrapping callbacks on one or more DynamicMaps\n allowing their inputs (and in future outputs) to be defined.\n This makes it possible to wrap DynamicMaps with streams and\n makes it possible to traverse the graph of operations applied\n to a DynamicMap.\n\n Additionally, if the memoize attribute is True, a Callable will\n memoize the last returned value based on the arguments to the\n function and the state of all streams on its inputs, to avoid\n calling the function unnecessarily. Note that because memoization\n includes the streams found on the inputs it may be disabled if the\n stream requires it and is triggering.\n\n A Callable may also specify a stream_mapping which specifies the\n objects that are associated with interactive (i.e linked) streams\n when composite objects such as Layouts are returned from the\n callback. This is required for building interactive, linked\n visualizations (for the backends that support them) when returning\n Layouts, NdLayouts or GridSpace objects. When chaining multiple\n DynamicMaps into a pipeline, the link_inputs parameter declares\n whether the visualization generated using this Callable will\n inherit the linked streams. This parameter is used as a hint by\n the applicable backend.\n\n The mapping should map from an appropriate key to a list of\n streams associated with the selected object. The appropriate key\n may be a type[.group][.label] specification for Layouts, an\n integer index or a suitable NdLayout/GridSpace key. For more\n information see the DynamicMap tutorial at holoviews.org.\n \"\"\"\n\n callable = param.Callable(default=None, constant=True, doc=\"\"\"\n The callable function being wrapped.\"\"\")\n\n inputs = param.List(default=[], constant=True, doc=\"\"\"\n The list of inputs the callable function is wrapping. Used\n to allow deep access to streams in chained Callables.\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n If the Callable wraps around other DynamicMaps in its inputs,\n determines whether linked streams attached to the inputs are\n transferred to the objects returned by the Callable.\n\n For example the Callable wraps a DynamicMap with an RangeXY\n stream, this switch determines whether the corresponding\n visualization should update this stream with range changes\n originating from the newly generated axes.\"\"\")\n\n memoize = param.Boolean(default=True, doc=\"\"\"\n Whether the return value of the callable should be memoized\n based on the call arguments and any streams attached to the\n inputs.\"\"\")\n\n stream_mapping = param.Dict(default={}, constant=True, doc=\"\"\"\n Defines how streams should be mapped to objects returned by\n the Callable, e.g. when it returns a Layout.\"\"\")\n\n def __init__(self, callable, **params):\n super(Callable, self).__init__(callable=callable,\n **dict(params, name=util.callable_name(callable)))\n self._memoized = {}\n self._is_overlay = False\n self.args = None\n self.kwargs = None\n self._stream_memoization = self.memoize\n\n @property\n def argspec(self):\n return util.argspec(self.callable)\n\n @property\n def noargs(self):\n \"Returns True if the callable takes no arguments\"\n noargs = ArgSpec(args=[], varargs=None, keywords=None, defaults=None)\n return self.argspec == noargs\n\n\n def clone(self, callable=None, **overrides):\n \"\"\"\n Allows making a copy of the Callable optionally overriding\n the callable and other parameters.\n \"\"\"\n old = {k: v for k, v in self.get_param_values()\n if k not in ['callable', 'name']}\n params = dict(old, **overrides)\n callable = self.callable if callable is None else callable\n return self.__class__(callable, **params)\n\n\n def __call__(self, *args, **kwargs):\n # Nothing to do for callbacks that accept no arguments\n kwarg_hash = kwargs.pop('memoization_hash', ())\n (self.args, self.kwargs) = (args, kwargs)\n if not args and not kwargs: return self.callable()\n inputs = [i for i in self.inputs if isinstance(i, DynamicMap)]\n streams = []\n for stream in [s for i in inputs for s in get_nested_streams(i)]:\n if stream not in streams: streams.append(stream)\n\n memoize = self._stream_memoization and not any(s.transient and s._triggering for s in streams)\n values = tuple(tuple(sorted(s.hashkey.items())) for s in streams)\n key = args + kwarg_hash + values\n\n hashed_key = util.deephash(key) if self.memoize else None\n if hashed_key is not None and memoize and hashed_key in self._memoized:\n return self._memoized[hashed_key]\n\n if self.argspec.varargs is not None:\n # Missing information on positional argument names, cannot promote to keywords\n pass\n elif len(args) != 0: # Turn positional arguments into keyword arguments\n pos_kwargs = {k:v for k,v in zip(self.argspec.args, args)}\n ignored = range(len(self.argspec.args),len(args))\n if len(ignored):\n self.warning('Ignoring extra positional argument %s'\n % ', '.join('%s' % i for i in ignored))\n clashes = set(pos_kwargs.keys()) & set(kwargs.keys())\n if clashes:\n self.warning('Positional arguments %r overriden by keywords'\n % list(clashes))\n args, kwargs = (), dict(pos_kwargs, **kwargs)\n\n try:\n ret = self.callable(*args, **kwargs)\n except KeyError:\n # KeyError is caught separately because it is used to signal\n # invalid keys on DynamicMap and should not warn\n raise\n except:\n posstr = ', '.join(['%r' % el for el in self.args]) if self.args else ''\n kwstr = ', '.join('%s=%r' % (k,v) for k,v in self.kwargs.items())\n argstr = ', '.join([el for el in [posstr, kwstr] if el])\n message = (\"Exception raised in callable '{name}' of type '{ctype}'.\\n\"\n \"Invoked as {name}({argstr})\")\n self.warning(message.format(name=self.name,\n ctype = type(self.callable).__name__,\n argstr=argstr))\n raise\n\n if hashed_key is not None:\n self._memoized = {hashed_key : ret}\n return ret\n\n\n\nclass Generator(Callable):\n \"\"\"\n Generators are considered a special case of Callable that accept no\n arguments and never memoize.\n \"\"\"\n\n callable = param.ClassSelector(default=None, class_ = types.GeneratorType,\n constant=True, doc=\"\"\"\n The generator that is wrapped by this Generator.\"\"\")\n\n @property\n def argspec(self):\n return ArgSpec(args=[], varargs=None, keywords=None, defaults=None)\n\n def __call__(self):\n try:\n return next(self.callable)\n except StopIteration:\n raise\n except Exception:\n msg = 'Generator {name} raised the following exception:'\n self.warning(msg.format(name=self.name))\n raise\n\n\ndef get_nested_dmaps(dmap):\n \"\"\"\n Get all DynamicMaps referenced by the supplied DynamicMap's callback.\n \"\"\"\n dmaps = [dmap]\n for o in dmap.callback.inputs:\n if isinstance(o, DynamicMap):\n dmaps.extend(get_nested_dmaps(o))\n return list(set(dmaps))\n\n\ndef get_nested_streams(dmap):\n \"\"\"\n Get all (potentially nested) streams from DynamicMap with Callable\n callback.\n \"\"\"\n return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})\n\n\n@contextmanager\ndef dynamicmap_memoization(callable_obj, streams):\n \"\"\"\n Determine whether the Callable should have memoization enabled\n based on the supplied streams (typically by a\n DynamicMap). Memoization is disabled if any of the streams require\n it it and are currently in a triggered state.\n \"\"\"\n memoization_state = bool(callable_obj._stream_memoization)\n callable_obj._stream_memoization &= not any(s.transient and s._triggering for s in streams)\n try:\n yield\n except:\n raise\n finally:\n callable_obj._stream_memoization = memoization_state\n\n\n\nclass periodic(object):\n \"\"\"\n Implements the utility of the same name on DynamicMap.\n\n Used to defined periodic event updates that can be started and\n stopped.\n \"\"\"\n _periodic_util = util.periodic\n\n def __init__(self, dmap):\n self.dmap = dmap\n self.instance = None\n\n def __call__(self, period, count=None, param_fn=None, timeout=None, block=True):\n \"\"\"\n Run a non-blocking loop that updates the stream parameters using\n the event method. Runs count times with the specified period. If\n count is None, runs indefinitely.\n\n If param_fn is not specified, the event method is called without\n arguments. If it is specified, it must be a callable accepting a\n single argument (the iteration count, starting at 1) that\n returns a dictionary of the new stream values to be passed to\n the event method.\n \"\"\"\n\n if self.instance is not None and not self.instance.completed:\n raise RuntimeError('Periodic process already running. '\n 'Wait until it completes or call '\n 'stop() before running a new periodic process')\n def inner(i):\n kwargs = {} if param_fn is None else param_fn(i)\n self.dmap.event(**kwargs)\n\n instance = self._periodic_util(period, count, inner,\n timeout=timeout, block=block)\n instance.start()\n self.instance= instance\n\n def stop(self):\n \"Stop the periodic process.\"\n self.instance.stop()\n\n def __str__(self):\n return \"\"\n\n\n\nclass DynamicMap(HoloMap):\n \"\"\"\n A DynamicMap is a type of HoloMap where the elements are dynamically\n generated by a callable. The callable is invoked with values\n associated with the key dimensions or with values supplied by stream\n parameters.\n \"\"\"\n\n # Declare that callback is a positional parameter (used in clone)\n __pos_params = ['callback']\n\n kdims = param.List(default=[], constant=True, doc=\"\"\"\n The key dimensions of a DynamicMap map to the arguments of the\n callback. This mapping can be by position or by name.\"\"\")\n\n callback = param.ClassSelector(class_=Callable, constant=True, doc=\"\"\"\n The callable used to generate the elements. The arguments to the\n callable includes any number of declared key dimensions as well\n as any number of stream parameters defined on the input streams.\n\n If the callable is an instance of Callable it will be used\n directly, otherwise it will be automatically wrapped in one.\"\"\")\n\n streams = param.List(default=[], constant=True, doc=\"\"\"\n List of Stream instances to associate with the DynamicMap. The\n set of parameter values across these streams will be supplied as\n keyword arguments to the callback when the events are received,\n updating the streams.\"\"\" )\n\n cache_size = param.Integer(default=500, doc=\"\"\"\n The number of entries to cache for fast access. This is an LRU\n cache where the least recently used item is overwritten once\n the cache is full.\"\"\")\n\n def __init__(self, callback, initial_items=None, **params):\n\n if isinstance(callback, types.GeneratorType):\n callback = Generator(callback)\n elif not isinstance(callback, Callable):\n callback = Callable(callback)\n\n if 'sampled' in params:\n self.warning('DynamicMap sampled parameter is deprecated '\n 'and no longer needs to be specified.')\n del params['sampled']\n\n super(DynamicMap, self).__init__(initial_items, callback=callback, **params)\n invalid = [s for s in self.streams if not isinstance(s, Stream)]\n if invalid:\n msg = ('The supplied streams list contains objects that '\n 'are not Stream instances: {objs}')\n raise TypeError(msg.format(objs = ', '.join('%r' % el for el in invalid)))\n\n\n if self.callback.noargs:\n prefix = 'DynamicMaps using generators (or callables without arguments)'\n if self.kdims:\n raise Exception(prefix + ' must be declared without key dimensions')\n if len(self.streams)> 1:\n raise Exception(prefix + ' must have either streams=[] or a single, '\n + 'stream instance without any stream parameters')\n if util.stream_parameters(self.streams) != []:\n raise Exception(prefix + ' cannot accept any stream parameters')\n\n self._posarg_keys = util.validate_dynamic_argspec(self.callback,\n self.kdims,\n self.streams)\n # Set source to self if not already specified\n for stream in self.streams:\n if stream.source is None:\n stream.source = self\n self.redim = redim(self, mode='dynamic')\n self.periodic = periodic(self)\n\n @property\n def unbounded(self):\n \"\"\"\n Returns a list of key dimensions that are unbounded, excluding\n stream parameters. If any of theses key dimensions are\n unbounded, the DynamicMap as a whole is also unbounded.\n \"\"\"\n unbounded_dims = []\n # Dimensioned streams do not need to be bounded\n stream_params = set(util.stream_parameters(self.streams))\n for kdim in self.kdims:\n if str(kdim) in stream_params:\n continue\n if kdim.values:\n continue\n if None in kdim.range:\n unbounded_dims.append(str(kdim))\n return unbounded_dims\n\n def _initial_key(self):\n \"\"\"\n Construct an initial key for based on the lower range bounds or\n values on the key dimensions.\n \"\"\"\n key = []\n undefined = []\n stream_params = set(util.stream_parameters(self.streams))\n for kdim in self.kdims:\n if str(kdim) in stream_params:\n key.append(None)\n elif kdim.values:\n key.append(kdim.values[0])\n elif kdim.range[0] is not None:\n key.append(kdim.range[0])\n else:\n undefined.append(kdim)\n if undefined:\n msg = ('Dimension(s) {undefined_dims} do not specify range or values needed '\n 'to generate initial key')\n undefined_dims = ', '.join(['%r' % str(dim) for dim in undefined])\n raise KeyError(msg.format(undefined_dims=undefined_dims))\n\n return tuple(key)\n\n\n def _validate_key(self, key):\n \"\"\"\n Make sure the supplied key values are within the bounds\n specified by the corresponding dimension range and soft_range.\n \"\"\"\n if key == () and len(self.kdims) == 0: return ()\n key = util.wrap_tuple(key)\n assert len(key) == len(self.kdims)\n for ind, val in enumerate(key):\n kdim = self.kdims[ind]\n low, high = util.max_range([kdim.range, kdim.soft_range])\n if low is not np.NaN:\n if val < low:\n raise KeyError(\"Key value %s below lower bound %s\"\n % (val, low))\n if high is not np.NaN:\n if val > high:\n raise KeyError(\"Key value %s above upper bound %s\"\n % (val, high))\n\n def event(self, **kwargs):\n \"\"\"\n This method allows any of the available stream parameters\n (renamed as appropriate) to be updated in an event.\n \"\"\"\n if self.callback.noargs and self.streams == []:\n self.warning('No streams declared. To update a DynamicMaps using '\n 'generators (or callables without arguments) use streams=[Next()]')\n return\n if self.streams == []:\n self.warning('No streams on DynamicMap, calling event will have no effect')\n return\n\n stream_params = set(util.stream_parameters(self.streams))\n invalid = [k for k in kwargs.keys() if k not in stream_params]\n if invalid:\n msg = 'Key(s) {invalid} do not correspond to stream parameters'\n raise KeyError(msg.format(invalid = ', '.join('%r' % i for i in invalid)))\n\n for stream in self.streams:\n applicable_kws = {k:v for k,v in kwargs.items()\n if k in set(stream.contents.keys())}\n rkwargs = util.rename_stream_kwargs(stream, applicable_kws, reverse=True)\n stream.update(**rkwargs)\n\n Stream.trigger(self.streams)\n\n def _style(self, retval):\n \"\"\"\n Use any applicable OptionTree of the DynamicMap to apply options\n to the return values of the callback.\n \"\"\"\n if self.id not in Store.custom_options():\n return retval\n spec = StoreOptions.tree_to_dict(Store.custom_options()[self.id])\n return retval.opts(spec)\n\n\n def _execute_callback(self, *args):\n \"\"\"\n Execute the callback, validating both the input key and output\n key where applicable.\n \"\"\"\n self._validate_key(args) # Validate input key\n\n # Additional validation needed to ensure kwargs don't clash\n kdims = [kdim.name for kdim in self.kdims]\n kwarg_items = [s.contents.items() for s in self.streams]\n hash_items = tuple(tuple(sorted(s.hashkey.items())) for s in self.streams)+args\n flattened = [(k,v) for kws in kwarg_items for (k,v) in kws\n if k not in kdims]\n\n if self._posarg_keys:\n kwargs = dict(flattened, **dict(zip(self._posarg_keys, args)))\n args = ()\n else:\n kwargs = dict(flattened)\n if not isinstance(self.callback, Generator):\n kwargs['memoization_hash'] = hash_items\n\n with dynamicmap_memoization(self.callback, self.streams):\n retval = self.callback(*args, **kwargs)\n return self._style(retval)\n\n\n def opts(self, options=None, **kwargs):\n \"\"\"\n Apply the supplied options to a clone of the DynamicMap which is\n then returned. Note that if no options are supplied at all,\n all ids are reset.\n \"\"\"\n from ..util import Dynamic\n dmap = Dynamic(self, operation=lambda obj, **dynkwargs: obj.opts(options, **kwargs),\n streams=self.streams, link_inputs=True)\n dmap.data = OrderedDict([(k, v.opts(options, **kwargs))\n for k, v in self.data.items()])\n return dmap\n\n\n def clone(self, data=None, shared_data=True, new_type=None, link_inputs=True,\n *args, **overrides):\n \"\"\"\n Clone method to adapt the slightly different signature of\n DynamicMap that also overrides Dimensioned clone to avoid\n checking items if data is unchanged.\n \"\"\"\n if data is None and shared_data:\n data = self.data\n overrides['plot_id'] = self._plot_id\n clone = super(UniformNdMapping, self).clone(overrides.pop('callback', self.callback),\n shared_data, new_type,\n *(data,) + args, **overrides)\n\n # Ensure the clone references this object to ensure\n # stream sources are inherited\n if clone.callback is self.callback:\n with util.disable_constant(clone):\n clone.callback = clone.callback.clone(inputs=[self],\n link_inputs=link_inputs)\n return clone\n\n\n def reset(self):\n \"\"\"\n Return a cleared dynamic map with a cleared cached\n \"\"\"\n self.data = OrderedDict()\n return self\n\n\n def _cross_product(self, tuple_key, cache, data_slice):\n \"\"\"\n Returns a new DynamicMap if the key (tuple form) expresses a\n cross product, otherwise returns None. The cache argument is a\n dictionary (key:element pairs) of all the data found in the\n cache for this key.\n\n Each key inside the cross product is looked up in the cache\n (self.data) to check if the appropriate element is\n available. Otherwise the element is computed accordingly.\n\n The data_slice may specify slices into each value in the\n the cross-product.\n \"\"\"\n if not any(isinstance(el, (list, set)) for el in tuple_key):\n return None\n if len(tuple_key)==1:\n product = tuple_key[0]\n else:\n args = [set(el) if isinstance(el, (list,set))\n else set([el]) for el in tuple_key]\n product = itertools.product(*args)\n\n data = []\n for inner_key in product:\n key = util.wrap_tuple(inner_key)\n if key in cache:\n val = cache[key]\n else:\n val = self._execute_callback(*key)\n if data_slice:\n val = self._dataslice(val, data_slice)\n data.append((key, val))\n product = self.clone(data)\n\n if data_slice:\n from ..util import Dynamic\n dmap = Dynamic(self, operation=lambda obj, **dynkwargs: obj[data_slice],\n streams=self.streams)\n dmap.data = product.data\n return dmap\n return product\n\n\n def _slice_bounded(self, tuple_key, data_slice):\n \"\"\"\n Slices bounded DynamicMaps by setting the soft_ranges on\n key dimensions and applies data slice to cached and dynamic\n values.\n \"\"\"\n slices = [el for el in tuple_key if isinstance(el, slice)]\n if any(el.step for el in slices):\n raise Exception(\"DynamicMap slices cannot have a step argument\")\n elif len(slices) not in [0, len(tuple_key)]:\n raise Exception(\"Slices must be used exclusively or not at all\")\n elif not slices:\n return None\n\n sliced = self.clone(self)\n for i, slc in enumerate(tuple_key):\n (start, stop) = slc.start, slc.stop\n if start is not None and start < sliced.kdims[i].range[0]:\n raise Exception(\"Requested slice below defined dimension range.\")\n if stop is not None and stop > sliced.kdims[i].range[1]:\n raise Exception(\"Requested slice above defined dimension range.\")\n sliced.kdims[i].soft_range = (start, stop)\n if data_slice:\n if not isinstance(sliced, DynamicMap):\n return self._dataslice(sliced, data_slice)\n else:\n from ..util import Dynamic\n if len(self):\n slices = [slice(None) for _ in range(self.ndims)] + list(data_slice)\n sliced = super(DynamicMap, sliced).__getitem__(tuple(slices))\n dmap = Dynamic(self, operation=lambda obj, **dynkwargs: obj[data_slice],\n streams=self.streams)\n dmap.data = sliced.data\n return dmap\n return sliced\n\n\n def __getitem__(self, key):\n \"\"\"\n Return an element for any key chosen key. Also allows for usual\n deep slicing semantics by slicing values in the cache and\n applying the deep slice to newly generated values.\n \"\"\"\n # Split key dimensions and data slices\n sample = False\n if key is Ellipsis:\n return self\n elif isinstance(key, (list, set)) and all(isinstance(v, tuple) for v in key):\n map_slice, data_slice = key, ()\n sample = True\n else:\n map_slice, data_slice = self._split_index(key)\n tuple_key = util.wrap_tuple_streams(map_slice, self.kdims, self.streams)\n\n # Validation\n if not sample:\n sliced = self._slice_bounded(tuple_key, data_slice)\n if sliced is not None:\n return sliced\n\n # Cache lookup\n try:\n dimensionless = util.dimensionless_contents(get_nested_streams(self),\n self.kdims, no_duplicates=False)\n empty = util.stream_parameters(self.streams) == [] and self.kdims==[]\n if dimensionless or empty:\n raise KeyError('Using dimensionless streams disables DynamicMap cache')\n cache = super(DynamicMap,self).__getitem__(key)\n except KeyError:\n cache = None\n\n # If the key expresses a cross product, compute the elements and return\n product = self._cross_product(tuple_key, cache.data if cache else {}, data_slice)\n if product is not None:\n return product\n\n # Not a cross product and nothing cached so compute element.\n if cache is not None: return cache\n val = self._execute_callback(*tuple_key)\n if data_slice:\n val = self._dataslice(val, data_slice)\n self._cache(tuple_key, val)\n return val\n\n\n def select(self, selection_specs=None, **kwargs):\n \"\"\"\n Allows slicing or indexing into the DynamicMap objects by\n supplying the dimension and index/slice as key value\n pairs. Select descends recursively through the data structure\n applying the key dimension selection and applies to dynamically\n generated items by wrapping the callback.\n\n The selection may also be selectively applied to specific\n objects by supplying the selection_specs as an iterable of\n type.group.label specs, types or functions.\n \"\"\"\n if selection_specs is not None and not isinstance(selection_specs, (list, tuple)):\n selection_specs = [selection_specs]\n selection = super(DynamicMap, self).select(selection_specs, **kwargs)\n def dynamic_select(obj, **dynkwargs):\n if selection_specs is not None:\n matches = any(obj.matches(spec) for spec in selection_specs)\n else:\n matches = True\n if matches:\n return obj.select(**kwargs)\n return obj\n\n if not isinstance(selection, DynamicMap):\n return dynamic_select(selection)\n else:\n from ..util import Dynamic\n dmap = Dynamic(self, operation=dynamic_select, streams=self.streams)\n dmap.data = selection.data\n return dmap\n \n\n\n def _cache(self, key, val):\n \"\"\"\n Request that a key/value pair be considered for caching.\n \"\"\"\n cache_size = (1 if util.dimensionless_contents(self.streams, self.kdims)\n else self.cache_size)\n if len(self) >= cache_size:\n first_key = next(k for k in self.data)\n self.data.pop(first_key)\n self[key] = val\n\n\n def map(self, map_fn, specs=None, clone=True, link_inputs=True):\n \"\"\"\n Recursively replaces elements using a map function when the\n specification applies. Extends regular map with functionality\n to dynamically apply functions. By default all streams are\n still linked to the mapped object, to disable linked streams\n set linked_inputs=False.\n \"\"\"\n deep_mapped = super(DynamicMap, self).map(map_fn, specs, clone)\n if isinstance(deep_mapped, type(self)):\n from ..util import Dynamic\n def apply_map(obj, **dynkwargs):\n return obj.map(map_fn, specs, clone)\n dmap = Dynamic(self, operation=apply_map, streams=self.streams,\n link_inputs=link_inputs)\n dmap.data = deep_mapped.data\n return dmap\n return deep_mapped\n\n\n def relabel(self, label=None, group=None, depth=1):\n \"\"\"\n Assign a new label and/or group to an existing LabelledData\n object, creating a clone of the object with the new settings.\n \"\"\"\n relabelled = super(DynamicMap, self).relabel(label, group, depth)\n if depth > 0:\n from ..util import Dynamic\n def dynamic_relabel(obj, **dynkwargs):\n return obj.relabel(group=group, label=label, depth=depth-1)\n dmap = Dynamic(self, streams=self.streams, operation=dynamic_relabel)\n dmap.data = relabelled.data\n with util.disable_constant(dmap):\n dmap.group = relabelled.group\n dmap.label = relabelled.label\n return dmap\n return relabelled\n\n\n def collate(self):\n \"\"\"\n Collation allows reorganizing DynamicMaps with invalid nesting\n hierarchies. This is particularly useful when defining\n DynamicMaps returning an (Nd)Layout or GridSpace\n types. Collating will split the DynamicMap into individual\n DynamicMaps for each item in the container. Note that the\n composite object has to be of consistent length and types for\n this to work correctly.\n \"\"\"\n # Initialize\n if self.last is not None:\n initialized = self\n else:\n initialized = self.clone()\n initialized[initialized._initial_key()]\n\n if not isinstance(initialized.last, (Layout, NdLayout, GridSpace)):\n return self\n\n container = initialized.last.clone(shared_data=False)\n\n # Get stream mapping from callback\n remapped_streams = []\n streams = self.callback.stream_mapping\n for i, (k, v) in enumerate(initialized.last.data.items()):\n vstreams = streams.get(i, [])\n if not vstreams:\n if isinstance(initialized.last, Layout):\n for l in range(len(k)):\n path = '.'.join(k[:l])\n if path in streams:\n vstreams = streams[path]\n break\n else:\n vstreams = streams.get(k, [])\n if any(s in remapped_streams for s in vstreams):\n raise ValueError(\n \"The stream_mapping supplied on the Callable \"\n \"is ambiguous please supply more specific Layout \"\n \"path specs.\")\n remapped_streams += vstreams\n\n # Define collation callback\n def collation_cb(*args, **kwargs):\n return self[args][kwargs['selection_key']]\n callback = Callable(partial(collation_cb, selection_key=k),\n inputs=[self])\n vdmap = self.clone(callback=callback, shared_data=False,\n streams=vstreams)\n\n # Remap source of streams\n for stream in vstreams:\n if stream.source is self:\n stream.source = vdmap\n container[k] = vdmap\n\n unmapped_streams = [repr(stream) for stream in self.streams\n if (stream.source is self) and\n (stream not in remapped_streams)\n and stream.linked]\n if unmapped_streams:\n raise ValueError(\n 'The following streams are set to be automatically '\n 'linked to a plot, but no stream_mapping specifying '\n 'which item in the (Nd)Layout to link it to was found:\\n%s'\n % ', '.join(unmapped_streams)\n )\n return container\n\n\n def groupby(self, dimensions=None, container_type=None, group_type=None, **kwargs):\n \"\"\"\n Implements a dynamic version of a groupby, which will\n intelligently expand either the inner or outer dimensions\n depending on whether the container_type or group_type is dynamic.\n\n To apply a groupby to a DynamicMap the dimensions, which are\n expanded into a non-dynamic type must define a fixed sampling\n via the values attribute.\n\n Using the dynamic groupby makes it incredibly easy to generate\n dynamic views into a high-dimensional space while taking\n advantage of the capabilities of NdOverlay, GridSpace and\n NdLayout types to visualize more than one Element at a time.\n \"\"\"\n if dimensions is None:\n dimensions = self.kdims\n if not isinstance(dimensions, (list, tuple)):\n dimensions = [dimensions]\n\n container_type = container_type if container_type else type(self)\n group_type = group_type if group_type else type(self)\n\n outer_kdims = [self.get_dimension(d) for d in dimensions]\n inner_kdims = [d for d in self.kdims if not d in outer_kdims]\n\n outer_dynamic = issubclass(container_type, DynamicMap)\n inner_dynamic = issubclass(group_type, DynamicMap)\n\n if ((not outer_dynamic and any(not d.values for d in outer_kdims)) or\n (not inner_dynamic and any(not d.values for d in inner_kdims))):\n raise Exception('Dimensions must specify sampling via '\n 'values to apply a groupby')\n\n if outer_dynamic:\n def outer_fn(*outer_key, **dynkwargs):\n if inner_dynamic:\n def inner_fn(*inner_key, **dynkwargs):\n outer_vals = zip(outer_kdims, util.wrap_tuple(outer_key))\n inner_vals = zip(inner_kdims, util.wrap_tuple(inner_key))\n inner_sel = [(k.name, v) for k, v in inner_vals]\n outer_sel = [(k.name, v) for k, v in outer_vals]\n return self.select(**dict(inner_sel+outer_sel))\n return self.clone([], callback=inner_fn, kdims=inner_kdims)\n else:\n dim_vals = [(d.name, d.values) for d in inner_kdims]\n dim_vals += [(d.name, [v]) for d, v in\n zip(outer_kdims, util.wrap_tuple(outer_key))]\n with item_check(False):\n selected = HoloMap(self.select(**dict(dim_vals)))\n return group_type(selected.reindex(inner_kdims))\n if outer_kdims:\n return self.clone([], callback=outer_fn, kdims=outer_kdims)\n else:\n return outer_fn(())\n else:\n outer_product = itertools.product(*[self.get_dimension(d).values\n for d in dimensions])\n groups = []\n for outer in outer_product:\n outer_vals = [(d.name, [o]) for d, o in zip(outer_kdims, outer)]\n if inner_dynamic or not inner_kdims:\n def inner_fn(outer_vals, *key, **dynkwargs):\n inner_dims = zip(inner_kdims, util.wrap_tuple(key))\n inner_vals = [(d.name, k) for d, k in inner_dims]\n return self.select(**dict(outer_vals+inner_vals)).last\n if inner_kdims:\n group = self.clone(callback=partial(inner_fn, outer_vals),\n kdims=inner_kdims)\n else:\n group = inner_fn(outer_vals, ())\n groups.append((outer, group))\n else:\n inner_vals = [(d.name, self.get_dimension(d).values)\n for d in inner_kdims]\n with item_check(False):\n selected = HoloMap(self.select(**dict(outer_vals+inner_vals)))\n group = group_type(selected.reindex(inner_kdims))\n groups.append((outer, group))\n return container_type(groups, kdims=outer_kdims)\n\n\n def grid(self, dimensions=None, **kwargs):\n return self.groupby(dimensions, container_type=GridSpace, **kwargs)\n\n\n def layout(self, dimensions=None, **kwargs):\n return self.groupby(dimensions, container_type=NdLayout, **kwargs)\n\n\n def overlay(self, dimensions=None, **kwargs):\n if dimensions is None:\n dimensions = self.kdims\n else:\n if not isinstance(dimensions, (list, tuple)):\n dimensions = [dimensions]\n dimensions = [self.get_dimension(d, strict=True)\n for d in dimensions]\n dims = [d for d in self.kdims if d not in dimensions]\n return self.groupby(dims, group_type=NdOverlay)\n\n\n def hist(self, num_bins=20, bin_range=None, adjoin=True, individually=True, **kwargs):\n \"\"\"\n Computes a histogram from the object and adjoins it by\n default. By default the histogram is computed for the bottom\n layer, which can be overriden by supplying an ``index`` and\n for the first value dimension, which may be overridden by\n supplying an explicit ``dimension``.\n \"\"\"\n def dynamic_hist(obj, **dynkwargs):\n if isinstance(obj, (NdOverlay, Overlay)):\n index = kwargs.get('index', 0)\n obj = obj.get(index)\n return obj.hist(num_bins=num_bins, bin_range=bin_range,\n adjoin=False, **kwargs)\n\n from ..util import Dynamic\n hist = Dynamic(self, streams=self.streams, link_inputs=False,\n operation=dynamic_hist)\n if adjoin:\n return self << hist\n else:\n return hist\n\n\n def reindex(self, kdims=[], force=False):\n \"\"\"\n Reindexing a DynamicMap allows reordering the dimensions but\n not dropping an individual dimension. The force argument which\n usually allows dropping non-constant dimensions is therefore\n ignored and only for API consistency.\n \"\"\"\n kdims = [self.get_dimension(kd, strict=True) for kd in kdims]\n dropped = [kd for kd in self.kdims if kd not in kdims]\n if dropped:\n raise ValueError(\"DynamicMap does not allow dropping dimensions, \"\n \"reindex may only be used to reorder dimensions.\")\n return super(DynamicMap, self).reindex(kdims, force)\n\n\n def drop_dimension(self, dimensions):\n raise NotImplementedError('Cannot drop dimensions from a DynamicMap, '\n 'cast to a HoloMap first.')\n\n def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):\n raise NotImplementedError('Cannot add dimensions to a DynamicMap, '\n 'cast to a HoloMap first.')\n\n def next(self):\n if self.callback.noargs:\n return self[()]\n else:\n raise Exception('The next method can only be used for DynamicMaps using'\n 'generators (or callables without arguments)')\n\n # For Python 2 and 3 compatibility\n __next__ = next\n\n\n\nclass GridSpace(UniformNdMapping):\n \"\"\"\n Grids are distinct from Layouts as they ensure all contained\n elements to be of the same type. Unlike Layouts, which have\n integer keys, Grids usually have floating point keys, which\n correspond to a grid sampling in some two-dimensional space. This\n two-dimensional space may have to arbitrary dimensions, e.g. for\n 2D parameter spaces.\n \"\"\"\n\n kdims = param.List(default=[Dimension(\"X\"), Dimension(\"Y\")], bounds=(1,2))\n\n def __init__(self, initial_items=None, kdims=None, **params):\n super(GridSpace, self).__init__(initial_items, kdims=kdims, **params)\n if self.ndims > 2:\n raise Exception('Grids can have no more than two dimensions.')\n\n\n def __mul__(self, other):\n if isinstance(other, GridSpace):\n if set(self.keys()) != set(other.keys()):\n raise KeyError(\"Can only overlay two ParameterGrids if their keys match\")\n zipped = zip(self.keys(), self.values(), other.values())\n overlayed_items = [(k, el1 * el2) for (k, el1, el2) in zipped]\n return self.clone(overlayed_items)\n elif isinstance(other, UniformNdMapping) and len(other) == 1:\n view = other.last\n elif isinstance(other, UniformNdMapping) and len(other) != 1:\n raise Exception(\"Can only overlay with HoloMap of length 1\")\n else:\n view = other\n\n overlayed_items = [(k, el * view) for k, el in self.items()]\n return self.clone(overlayed_items)\n\n\n def __lshift__(self, other):\n if isinstance(other, (ViewableElement, UniformNdMapping)):\n return AdjointLayout([self, other])\n elif isinstance(other, AdjointLayout):\n return AdjointLayout(other.data+[self])\n else:\n raise TypeError('Cannot append {0} to a AdjointLayout'.format(type(other).__name__))\n\n\n def _transform_indices(self, key):\n \"\"\"\n Transforms indices by snapping to the closest value if\n values are numeric, otherwise applies no transformation.\n \"\"\"\n ndims = self.ndims\n if all(not (isinstance(el, slice) or callable(el)) for el in key):\n dim_inds = []\n for dim in self.kdims:\n dim_type = self.get_dimension_type(dim)\n if isinstance(dim_type, type) and issubclass(dim_type, Number):\n dim_inds.append(self.get_dimension_index(dim))\n str_keys = iter(key[i] for i in range(self.ndims)\n if i not in dim_inds)\n num_keys = []\n if len(dim_inds):\n keys = list({tuple(k[i] if ndims > 1 else k for i in dim_inds)\n for k in self.keys()})\n q = np.array([tuple(key[i] if ndims > 1 else key for i in dim_inds)])\n idx = np.argmin([np.inner(q - np.array(x), q - np.array(x))\n if len(dim_inds) == 2 else np.abs(q-x)\n for x in keys])\n num_keys = iter(keys[idx])\n key = tuple(next(num_keys) if i in dim_inds else next(str_keys)\n for i in range(self.ndims))\n elif any(not (isinstance(el, slice) or callable(el)) for el in key):\n index_inds = [idx for idx, el in enumerate(key)\n if not isinstance(el, (slice, str))]\n if len(index_inds):\n index_ind = index_inds[0]\n dim_keys = np.array([k[index_ind] for k in self.keys()])\n snapped_val = dim_keys[np.argmin(np.abs(dim_keys-key[index_ind]))]\n key = list(key)\n key[index_ind] = snapped_val\n key = tuple(key)\n return key\n\n\n def keys(self, full_grid=False):\n \"\"\"\n Returns a complete set of keys on a GridSpace, even when GridSpace isn't fully\n populated. This makes it easier to identify missing elements in the\n GridSpace.\n \"\"\"\n keys = super(GridSpace, self).keys()\n if self.ndims == 1 or not full_grid:\n return keys\n dim1_keys = sorted(set(k[0] for k in keys))\n dim2_keys = sorted(set(k[1] for k in keys))\n return [(d1, d2) for d1 in dim1_keys for d2 in dim2_keys]\n\n\n @property\n def last(self):\n \"\"\"\n The last of a GridSpace is another GridSpace\n constituted of the last of the individual elements. To access\n the elements by their X,Y position, either index the position\n directly or use the items() method.\n \"\"\"\n if self.type == HoloMap:\n last_items = [(k, v.last if isinstance(v, HoloMap) else v)\n for (k, v) in self.data.items()]\n else:\n last_items = self.data\n return self.clone(last_items)\n\n\n def __len__(self):\n \"\"\"\n The maximum depth of all the elements. Matches the semantics\n of __len__ used by Maps. For the total number of elements,\n count the full set of keys.\n \"\"\"\n return max([(len(v) if hasattr(v, '__len__') else 1) for v in self.values()] + [0])\n\n\n def __add__(self, obj):\n return Layout.from_values([self, obj])\n\n\n @property\n def shape(self):\n keys = self.keys()\n if self.ndims == 1:\n return (len(keys), 1)\n return len(set(k[0] for k in keys)), len(set(k[1] for k in keys))\n\n\n\nclass GridMatrix(GridSpace):\n \"\"\"\n GridMatrix is container type for heterogeneous Element types\n laid out in a grid. Unlike a GridSpace the axes of the Grid\n must not represent an actual coordinate space, but may be used\n to plot various dimensions against each other. The GridMatrix\n is usually constructed using the gridmatrix operation, which\n will generate a GridMatrix plotting each dimension in an\n Element against each other.\n \"\"\"\n\n\n def _item_check(self, dim_vals, data):\n if not traversal.uniform(NdMapping([(0, self), (1, data)])):\n raise ValueError(\"HoloMaps dimensions must be consistent in %s.\" %\n type(self).__name__)\n NdMapping._item_check(self, dim_vals, data)\n","sub_path":"examples/sites/holoviews/holoviews/core/spaces.py","file_name":"spaces.py","file_ext":"py","file_size_in_byte":62572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"523429532","text":"import base64\nimport logging\nimport uuid\nfrom dataclasses import dataclass\nfrom typing import Optional, Callable, Any\n\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.http import HttpRequest, HttpResponse, QueryDict\n\nlog = logging.getLogger(__name__)\n\n\n@dataclass\nclass RequestContext:\n request_id: Any\n user: Any\n user_session_id: Any\n\n\nclass AccessLogMiddleware:\n def __init__(self, get_response: Callable[[HttpRequest], HttpResponse]) -> None:\n self.get_response = get_response\n\n def __call__(self, request: HttpRequest) -> HttpResponse:\n request_context = self.log_request(request)\n response: HttpResponse = self.get_response(request)\n self.log_response(response, request_context)\n return response\n\n def log_request(self, request: HttpRequest) -> RequestContext:\n request_context = RequestContext(\n request_id=self.get_request_id(request),\n user=self.get_user(request),\n user_session_id=self.get_user_session_id(request),\n )\n log.info(\n \"RX\",\n extra=dict(\n http_method=request.method,\n path=request.path,\n query_params=self.get_query_params(request),\n user_agent=self.get_user_agent(request),\n ip=self.get_ip(request),\n x_forwarded_for=self.get_x_forwarded_for(request),\n request_id=request_context.request_id,\n user=request_context.user,\n user_session_id=request_context.user_session_id,\n ),\n )\n return request_context\n\n @staticmethod\n def get_x_forwarded_for(request: HttpRequest) -> Optional[str]:\n return request.META.get(\"HTTP_X_FORWARDED_FOR\")\n\n @staticmethod\n def get_ip(request: HttpRequest) -> Optional[str]:\n return request.META.get(\"REMOTE_ADDR\")\n\n @staticmethod\n def get_user_agent(request: HttpRequest) -> Optional[str]:\n return request.META.get(\"HTTP_USER_AGENT\")\n\n @staticmethod\n def get_query_params(request: HttpRequest) -> Optional[QueryDict]:\n return request.GET if request.GET else None\n\n @staticmethod\n def get_user(request: HttpRequest) -> Optional[str]:\n return (\n str(request.user) if not str(request.user) == str(AnonymousUser()) else None\n )\n\n @staticmethod\n def get_request_id(request: HttpRequest) -> str:\n return request.META.get(\"HTTP_X_REQUEST_ID\") or base64.urlsafe_b64encode(\n uuid.uuid4().bytes\n ).rstrip(b\"=\").decode(\"ascii\")\n\n @staticmethod\n def get_user_session_id(request: HttpRequest) -> Optional[str]:\n session = getattr(request, \"session\", None)\n session_id = getattr(session, \"session_key\", None)\n return session_id\n\n @staticmethod\n def log_response(response: HttpResponse, request_context: RequestContext) -> None:\n log.info(\n \"TX\",\n extra=dict(\n status_code=response.status_code,\n cookies=response.cookies if response.cookies else None,\n request_id=request_context.request_id,\n user=request_context.user,\n user_session_id=request_context.user_session_id,\n ),\n )\n","sub_path":"django_json_logging/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"125230085","text":"import os.path\nimport sys\n\nPROJECT_ROOT = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(PROJECT_ROOT, 'src'))\n\nimport filework\n\ndbData = filework.getFileData('tmp/database.txt')\n\nlength = len(dbData)\ni = 0\n\nwhile i < length:\n count = dbData.count(dbData[i])\n removeElem = dbData[i]\n\n if (count > 1):\n for j in range(1, count):\n dbData.remove(removeElem)\n length -= 1\n\n i += 1\n\ndbFile = open('tmp/database.txt', 'w')\n\ndbFile.write('\\n'.join(map(str, dbData)))\n\ndbFile.close()","sub_path":"removeDuplicates.py","file_name":"removeDuplicates.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"551791970","text":"import json\nimport logging\nimport traceback\n\nfrom django.contrib import admin\nfrom django.contrib.admin import ListFilter\nfrom django.db.models import Q\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.urls import path\n\n\nclass AjaxAdmin(admin.ModelAdmin):\n \"\"\"\n This class is used to add ajax functionality to the admin interface.\n \"\"\"\n\n def _get_and_clear_simple_filter(self, request):\n\n sfs = []\n _filter = {}\n if \"_filter\" in request.POST:\n _filter = json.loads(request.POST.get(\"_filter\"))\n lls = self.get_list_filter(request)\n if lls:\n for ll in lls:\n try:\n name = ll.parameter_name\n if issubclass(ll, ListFilter):\n val = _filter[name]\n\n def value(*args, **kwargs):\n return val\n\n # 用lambda表达式,重写value 返回val\n ll.value = value\n\n if name in _filter:\n # 删除\n del _filter[name]\n sfs.append(ll)\n except Exception as e:\n pass\n return _filter, sfs\n\n def _get_queryset(self, request):\n post = request.POST\n action = post.get(\"_action\")\n selected = post.get(\"_selected\")\n select_across = post.get(\"select_across\")\n if hasattr(self, action):\n # 过滤simplefilter的key\n _filter, sfs = self._get_and_clear_simple_filter(request)\n # 这里的queryset 会有数据过滤,只包含选中的数据\n queryset = self.get_changelist_instance(request).get_queryset(request)\n\n # 没有选择全部的时候才过滤数据\n if select_across == \"0\":\n if selected and selected.split(\",\"):\n queryset = queryset.filter(pk__in=selected.split(\",\"))\n else:\n # 过滤搜索条件,自simpleui-2022.3.13版本开始,支持搜索条件过滤,simplepro需要3.4.2及以上版本\n # 只有选择全部的时候才过滤数据\n # 字段为_search和_filter 是为了防止命名冲突\n\n # search\n if \"_search\" in post:\n search_fields = self.get_search_fields(request)\n\n if search_fields:\n search_value = post.get(\"_search\")\n if search_value:\n q = Q()\n for s in search_fields:\n q = q | Q(**{s + \"__icontains\": search_value})\n try:\n queryset = queryset.filter(q)\n except Exception as e:\n traceback.print_exc()\n raise e\n\n # filter条件过滤\n if \"_filter\" in post:\n if _filter:\n new_filter = self.__clean_filter(_filter)\n # 过滤simplefilter\n if sfs:\n for sf in sfs:\n queryset = sf.queryset.__call__(sf, request, queryset)\n queryset = queryset.filter(**new_filter)\n return queryset\n else:\n raise Exception(\"action not found\")\n\n def __clean_filter(self, _filter):\n new_filter = {}\n for k, v in _filter.items():\n if \"__exact\" in k and isinstance(v, list):\n new_filter[k.replace('__exact', '__in')] = v[0]\n else:\n new_filter[k] = v\n\n return new_filter\n\n def __handler_simple_filter(self, queryset, _filter):\n # 判断_filter里面的字段是否是simplefilter\n # 如果是simplefilter,就调用simplefilter的过滤方法\n # 如果不是simplefilter,就返回原来的queryset\n\n sf = self.get_list_filter()\n if sf:\n for f in sf:\n if f.__class__.__name__ == \"SimpleListFilter\":\n if f.parameter_name in _filter:\n return f.queryset(request=None, queryset=queryset, value=_filter[f.parameter_name])\n\n # for k, v in _filter.items():\n\n return queryset\n\n def callback(self, request):\n \"\"\"\n This method is used to handle ajax requests.\n \"\"\"\n post = request.POST\n action = post.get(\"_action\")\n\n # call admin\n if hasattr(self, action):\n func, action, description = self.get_action(action)\n qs = self._get_queryset(request)\n r = func(self, request, qs)\n if r is None:\n return JsonResponse(data={\n \"status\": \"success\",\n \"msg\": \"Success!\"\n })\n if isinstance(r, HttpResponseRedirect):\n return JsonResponse(data={\n \"status\": \"redirect\",\n \"url\": r.url\n })\n elif isinstance(r, JsonResponse):\n return r\n elif isinstance(r, dict):\n return JsonResponse(data=r)\n else:\n logging.warning(f\"action {action} return type is {type(r)}\")\n return JsonResponse(data={\"status\": \"error\", \"msg\": r})\n\n def get_layer(self, request):\n \"\"\"\n This method is used to get the layer of the admin interface.\n \"\"\"\n _action = request.POST.get(\"_action\")\n if hasattr(self, _action):\n func, action, description = self.get_action(_action)\n if hasattr(func, \"layer\"):\n arg_count = func.layer.__code__.co_argcount\n if arg_count == 2:\n result = func.layer(self, request)\n elif arg_count == 3:\n # 兼容老版本\n qs = self._get_queryset(request)\n result = func.layer(self, request, qs)\n\n return JsonResponse(data=result, safe=False)\n else:\n raise Exception(f'action \"{_action}\" not found')\n\n def get_urls(self):\n \"\"\"\n This method is used to add ajax functionality to the admin interface.\n \"\"\"\n info = self.model._meta.app_label, self.model._meta.model_name\n\n return super().get_urls() + [\n path(\"ajax\", self.callback, name=\"%s_%s_ajax\" % info),\n path(\"layer\", self.get_layer, name=\"%s_%s_layer\" % info)\n ]\n","sub_path":"simpleui/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"18320538","text":"# Derived from ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py\n\n# Copyright (c) 2017 Netronome Systems Pty. Ltd.\n# Copyright (c) 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron.plugins.ml2.drivers.openvswitch.mech_driver \\\n import mech_openvswitch\nfrom neutron_lib.api.definitions import portbindings\nfrom neutron_lib.api.definitions.portbindings import VNIC_TYPE\nfrom neutron_lib.api.definitions.portbindings import VNIC_VIRTIO_FORWARDER\nfrom oslo_config import cfg\n\nfrom networking_netronome.plugins.ml2.drivers import agilio_ovs_conf\n\n\nCONF = cfg.CONF\n\nagilio_ovs_conf.register_agilio_ovs_opts()\nCONF.import_group('AGILIO_OVS',\n 'networking_netronome.plugins.ml2.drivers.agilio_ovs_conf')\n\n\nclass AgilioOvsMechanismDriver(mech_openvswitch.OpenvswitchMechanismDriver):\n \"\"\"Extend the Openvswitch Mechanism Driver to support Agilio OVS NICs\n\n This mechanism driver introduces extended functionality into the\n Openvswitch driver in order to support accelerated SR-IOV and\n vhost-user VNIC types.\n \"\"\"\n\n def __init__(self):\n super(AgilioOvsMechanismDriver, self).__init__()\n self.supported_vnic_types += [portbindings.VNIC_DIRECT,\n portbindings.VNIC_VIRTIO_FORWARDER]\n self.vif_type = 'agilio_ovs'\n\n def _get_vhost_mode_for_hypervisor(self):\n # This function returns the vhost mode for the hypervisor.\n # If virtio-forwarder is set to client, the hypervisor must be\n # server, and vice versa.\n if cfg.CONF.AGILIO_OVS.virtio_forwarder_mode == \\\n portbindings.VHOST_USER_MODE_CLIENT:\n return portbindings.VHOST_USER_MODE_SERVER\n return portbindings.VHOST_USER_MODE_CLIENT\n\n def _pre_get_vif_details(self, agent, context):\n vif_details = super(\n AgilioOvsMechanismDriver,\n self)._pre_get_vif_details(agent, context)\n if context.current[VNIC_TYPE] == VNIC_VIRTIO_FORWARDER:\n sock_path = self.agent_vhu_sockpath(agent, context.current['id'])\n vif_details[portbindings.VHOST_USER_SOCKET] = sock_path\n vif_details[portbindings.VHOST_USER_MODE] = \\\n self._get_vhost_mode_for_hypervisor()\n vif_details[portbindings.VHOST_USER_OVS_PLUG] = True\n return vif_details\n","sub_path":"networking_netronome/plugins/ml2/drivers/agilio_ovs/mech_driver/mech_agilio_ovs.py","file_name":"mech_agilio_ovs.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"628715130","text":"from tensorflow import keras as krs\nimport numpy as np\n\nfrom src import JsonControl as jc\nfrom src import Collector as collector\n\n# load preprocessed data\n# parameter: preprocessed train data path, preprocessed test data path\n# return: train_x, train_y, test_x, test_y\ndef load_data(ptr_path, pte_path):\n print('Opening preprocessed train json...')\n train = jc.open_json(ptr_path)\n train_x = train['x']\n train_y = train['y']\n\n print('Opening preprocessed test json...')\n test = jc.open_json(pte_path)\n test_x = test['x']\n test_y = test['y']\n\n return train_x, train_y, test_x, test_y\n\n# pick which reaction to use from sentiments\ndef pick_y_binary(sentiments, i):\n return 1 if max(sentiments) == sentiments[i] else 0\n # return 1 if max(sentiments[0], sentiments[1], sentiments[2], sentiments[3], sentiments[4]) == sentiments[i] else 0\n\n# train a model for binary output\ndef train_binary(train_x, train_y):\n model = krs.models.Sequential()\n model.add(krs.layers.Dense(64, activation='relu', input_shape=(10000,)))\n model.add(krs.layers.Dropout(0.7))\n model.add(krs.layers.Dense(64, activation='relu'))\n model.add(krs.layers.Dropout(0.7))\n model.add(krs.layers.Dense(1, activation='sigmoid'))\n\n model.compile(optimizer=krs.optimizers.RMSprop(lr=0.001),\n loss=krs.losses.binary_crossentropy,\n metrics=[krs.metrics.binary_accuracy])\n\n print(\"Training...\")\n model.fit(train_x, train_y, epochs=10, batch_size=512, verbose=False)\n\n return model\n\n# run new analysis from newly built model(W,b)\ndef run_new_train(ptr_path, pte_path):\n # load data\n train_x, train_y, test_x, test_y = load_data(ptr_path, pte_path)\n\n # train model\n models = []\n train_x = np.asarray(train_x).astype('float32')\n\n for i in range(5):\n train_y2 = [pick_y_binary(y, i) for y in train_y]\n train_y2 = np.asarray(train_y2).astype('float32')\n\n model = [train_binary(train_x, train_y2)]\n\n models += model\n\n # throw sample tests\n reaction_index = 0\n reactions = ['like', 'warm', 'sad', 'angry', 'want']\n\n # bring urls from urls.json (last 30)\n urls = jc.open_json('data\\\\urls.json')['urls'][-30:]\n pool = [word for word, count in jc.open_json(\"data\\\\wordpool.json\")]\n\n # for each url(news)\n for url in urls:\n print(\">>>\", url)\n test = make_test_from_url(url, pool)\n # for each model (like, warm, sad, angry, want)\n for model in models:\n prob = model.predict(np.asarray(test['x']))[0][0]\n print(' {}: {:.2f}%'.format(reactions[reaction_index % 5], prob*100), end='')\n reaction_index += 1\n print()\n\n# given url, produce test data\ndef make_test_from_url(url, pool):\n anews = collector.get_news_contents(url)\n\n test_data = {'x': [], 'y': []}\n x = [anews['title'].split(' ') + anews['body'].split(' ')]\n\n x = [[words.count(word) for word in pool] for words in x]\n test_data['x'] = np.asarray(x).astype('float32')\n\n return test_data\n","sub_path":"src/DNN.py","file_name":"DNN.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"344078912","text":"import time\nimport math\n\nfrom wpilib import SmartDashboard\nfrom wpilib.command import Command\n\nimport robotmap\nimport subsystems\n\n\nclass TurnToHeading(Command):\n \"\"\"\n This command implements a PID loop to turn the robot to a heading\n\n target and tolerance are both given in Yaw (-180 to +180)\n\n It collects samples of the turn rate to determine when turn is finished via measuring how steady the robot is\n Good pid values for Stronghold were 0.05, 0.001, 0.1 (PID)\n\n ScaleSpeed is how much to reduce the PID output value by to apply to each wheel (eg.. 0.5 will apply half to each\n wheel which is equivilent to the intended output vs no scaling which effectively doubles the turn speed)\n \"\"\"\n def __init__(self, target=0.0, p=0.0, i=0.0, d=0.0, tolerance=0.0, minSpeed=0.0, numSamples=4, steadyRate=2.0, scaleSpeed=0.5, useDashboardValues=False):\n super().__init__('TurnToHeading')\n self.requires(subsystems.driveline)\n self.setInterruptible(True)\n self.setRunWhenDisabled(False)\n self.logCounter = 0\n self.useSmartDashboardValues = useDashboardValues\n self.target = target\n self.kP = p\n self.kI = i\n self.kD = d\n self.tolerance = tolerance\n self.minSpeed = minSpeed\n self.scaleSpeed = scaleSpeed\n\n self.sampleToUpdate = 0 # index for which sample are we updating each pass of execute\n self.ratePasses = 0 # prevent skew when calculating average before we've filled each sample\n self.turnRateSamples = None # hold the samples\n self.numSamples = numSamples\n self.avgRate = 0.0 # track the current avg turn rate to know when steady\n self.steadyRate = steadyRate\n\n\n def initialize(self):\n #print(\"CMD TurnToHeading initialize called\")\n if self.useSmartDashboardValues:\n self.target = SmartDashboard.getNumber(\"Turn Target\", 0.0)\n self.tolerance = SmartDashboard.getNumber(\"Turn Tolerance\", 500.0)\n self.tolerance = SmartDashboard.getNumber(\"Turn minSpeed\", 0.0)\n self.scaleSpeed = SmartDashboard.getNumber(\"Turn scaleSpeed\", 0.5)\n self.kP = SmartDashboard.getNumber(\"Turn P\", 0.05)\n self.kI = SmartDashboard.getNumber(\"Turn I\", 0.001)\n self.kD = SmartDashboard.getNumber(\"Turn D\", 0.01)\n\n subsystems.driveline.turnPID.minSpeed = self.minSpeed\n subsystems.driveline.turnPID.scaleSpeed = self.scaleSpeed\n subsystems.driveline.pidTurnController.setOutputRange(-1.0, 1.0)\n subsystems.driveline.pidTurnController.setInputRange(-180, 180)\n subsystems.driveline.pidTurnController.setContinuous(True)\n subsystems.driveline.pidTurnController.setPID(self.kP, self.kI, self.kD)\n subsystems.driveline.pidTurnController.setSetpoint(self.target)\n subsystems.driveline.pidTurnController.setAbsoluteTolerance(self.tolerance)\n\n\n self.turnRateSamples = [self.steadyRate] * self.numSamples # biasing to neutral to begin\n\n\n print(\"CMD TurnToHeading Starting({}) - target: {}, current: {}, P: {}, I: {}, D: {}, tolerance: {}, steadyRate: {}, scaleSpeed: {}\".format(\n int(round(time.time() * 1000)), self.target, robotmap.sensors.ahrs.getYaw(),\n self.kP, self.kI, self.kD, self.tolerance, self.steadyRate, self.scaleSpeed))\n\n def execute(self):\n if not subsystems.driveline.pidTurnController.isEnabled():\n subsystems.driveline.pidTurnController.enable()\n\n if self.numSamples > 0:\n self.turnRateSamples[self.sampleToUpdate] = math.fabs(robotmap.sensors.ahrs.getRate())\n self.sampleToUpdate += 1\n if self.ratePasses <= self.numSamples:\n self.ratePasses += 1\n if self.sampleToUpdate == self.numSamples:\n self.sampleToUpdate = 0\n\n\n def isFinished(self):\n if self.numSamples > 0:\n avgRate = sum(self.turnRateSamples) / self.ratePasses\n\n self.logCounter += 1\n if self.logCounter > 25:\n self.logCounter = 0\n if self.numSamples > 0:\n print(\"CMD TurnToHeading isFinished - target: {}, current: {}, tolerance: {}, avgRate: {}, steadyRate: {}\".format(\n self.target, robotmap.sensors.ahrs.getYaw(), self.tolerance, avgRate, self.steadyRate))\n else:\n print(\"CMD TurnToHeading isFinished - target: {}, current: {}, tolerance: {}\".format(\n self.target, robotmap.sensors.ahrs.getYaw(), self.tolerance))\n\n if self.numSamples > 0:\n if self.useSmartDashboardValues:\n SmartDashboard.putNumber(\"AvgRateYawDPS\", avgRate)\n if avgRate < self.steadyRate:\n if math.fabs(self.target - robotmap.sensors.ahrs.getYaw()) < self.tolerance:\n # print(\"CMD TurnToHeading isFinished is True\")\n return True\n else:\n if math.fabs(self.target - robotmap.sensors.ahrs.getYaw()) < self.tolerance:\n # print(\"CMD TurnToHeading isFinished is True\")\n return True\n return False\n\n def end(self):\n print(\"CMD TurnToHeading Ended({}) - target: {}, current: {}\".format(\n int(round(time.time() * 1000)), self.target, robotmap.sensors.ahrs.getYaw()))\n subsystems.driveline.pidTurnController.reset()\n subsystems.driveline.stop()\n\n def interrupted(self):\n print(\"CMD TurnToHeading Interrupted({}) - target: {}, current: {}\".format(\n int(round(time.time() * 1000)), self.target, robotmap.sensors.ahrs.getYaw()))\n subsystems.driveline.pidTurnController.reset()\n subsystems.driveline.stop()\n\n","sub_path":"commands/turntoheading.py","file_name":"turntoheading.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"336405963","text":"import pandas as pd\r\nimport argparse\r\nimport time\r\nimport torchvision\r\nimport torch.nn as nn\r\nfrom tqdm import tqdm_notebook as tqdm\r\n\r\nfrom PIL import Image, ImageFile\r\nfrom torch.utils.data import Dataset\r\nimport torch\r\nimport torch.optim as optim\r\nfrom torchvision import transforms\r\nfrom torch.optim import lr_scheduler\r\nimport os\r\n\r\ndevice = torch.device(\"cuda:0\")\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\n# parser = argparse.ArgumentParser(description='Train a simple classifier for blindness detection')\r\n# parser.add_argument('--train_dir', default='',\r\n# help='where you store the train img, e.g./input/aptos2019-blindness-detection/train_images')\r\n# parser.add_argument('--wd', type=float, default=0.0001, help='weight decay')\r\n# parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')\r\n# parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')\r\n# args = parser.parse_args()\r\noutput_features = 1 # 分级数据,0-4级总共5级\r\n\r\n\r\n# 首先搞一个dataset\r\nclass RetinopathyDatasetTrain(Dataset):\r\n\r\n def __init__(self, csv_file):\r\n\r\n self.data = pd.read_csv(csv_file)\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n def __getitem__(self, idx):\r\n img_name = os.path.join('./dataset/train', self.data.loc[idx, 'image'] + '.jpeg') # todo: 修正路径\r\n image = Image.open(img_name)\r\n image = image.resize((224, 224), resample=Image.BILINEAR)\r\n label = torch.tensor(self.data.loc[idx, 'level'])\r\n return {'image': transforms.ToTensor()(image),\r\n 'labels': label\r\n }\r\n\r\n\r\n# 设定好model\r\nprint('load model structure')\r\nmodel = torchvision.models.resnet50(pretrained=False) # todo:如果不想用官方模型,自己写的话需要替换torchvision.models.resnet101()\r\nprint('load pretrained model')\r\nmodel.load_state_dict(torch.load(\"./pretrained_models/resnet50-19c8e357.pth\")) # todo:修正路径\r\nnum_features = model.fc.in_features\r\nmodel.fc = nn.Linear(num_features, output_features)\r\nprint('load model to gpu')\r\nmodel = model.to(device)\r\n\r\n# 将dataset设定好\r\nprint('load dataset')\r\ntrain_dataset = RetinopathyDatasetTrain(csv_file='./dataset/trainLabels_cropped.csv') # todo: 修正路径\r\ndata_loader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=0) # windows情况下需要设成0\r\n\r\nplist = [\r\n {'params': model.layer4.parameters(), 'lr': 1e-4, 'weight': 0.001},\r\n {'params': model.fc.parameters(), 'lr': 1e-3}\r\n ]\r\n# 设定优化器\r\nprint('set optimizer')\r\noptimizer = optim.Adam(plist, lr=0.001)\r\nscheduler = lr_scheduler.StepLR(optimizer, step_size=10)\r\n\r\n\r\n# 开始训练\r\nprint('start training')\r\nsince = time.time()\r\ncriterion = nn.MSELoss()\r\nnum_epochs = 10\r\nfor epoch in range(num_epochs):\r\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\r\n print('-' * 10)\r\n scheduler.step()\r\n model.train()\r\n running_loss = 0.0\r\n tk0 = tqdm(data_loader, total=int(len(data_loader)))\r\n counter = 0\r\n for bi, d in enumerate(tk0):\r\n inputs = d[\"image\"]\r\n labels = d[\"labels\"].view(-1, 1)\r\n inputs = inputs.to(device, dtype=torch.float)\r\n labels = labels.to(device, dtype=torch.float)\r\n optimizer.zero_grad()\r\n with torch.set_grad_enabled(True):\r\n outputs = model(inputs)\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n running_loss += loss.item() * inputs.size(0)\r\n counter += 1\r\n tk0.set_postfix(loss=(running_loss / (counter * data_loader.batch_size)))\r\n if counter % 10 == 0:\r\n print('step{}: '.format(counter) +\r\n 'training loss: {:.4f}'.format(running_loss / (counter * data_loader.batch_size)))\r\n epoch_loss = running_loss / len(data_loader)\r\n print('Training Loss: {:.4f}'.format(epoch_loss))\r\n time_elapsed_epoch = time.time() - since\r\n print('Epoch completed in {:.0f}m {:.0f}s'.format(time_elapsed_epoch // 60, time_elapsed_epoch % 60))\r\ntime_elapsed = time.time() - since\r\nprint('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\r\ntorch.save(model.state_dict(), \"inception_v3_10.pth\")\r\n","sub_path":"original_train.py","file_name":"original_train.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"180712634","text":"from __future__ import division\nfrom builtins import range\nimport numpy as np\nfrom numpy import newaxis as na\nnp.seterr(invalid='raise')\nimport copy\n\nfrom autoregressive_mini.distributions import Multinomial\nfrom autoregressive_mini.util import sample_crp_tablecounts, count_transitions\n\n# TODO separate out bayesian and nonbayesian versions?\n\nclass WeakLimitStickyHDPHMMTransitions():\n\n def __init__(self,kappa=None,gamma=None,alpha=None,num_states=None,beta=None,trans_matrix=None):\n self.N = num_states\n self.alpha = alpha\n self.kappa = kappa\n self.beta_obj = Multinomial(alpha_0=gamma,K=self.N,weights=beta)\n\n alphav = alpha*self.beta\n self._row_distns = [Multinomial(alpha_0=alpha,K=self.N,alphav_0=alphav) for n in range(self.N)]\n\n @property\n def beta(self):\n return self.beta_obj.weights\n\n @beta.setter\n def beta(self,weights):\n self.beta_obj.weights = weights\n self.alphav = self.alpha * self.beta\n\n @property\n def gamma(self):\n return self.beta_obj.alpha_0\n\n @gamma.setter\n def gamma(self,val):\n self.beta_obj.alpha_0 = val\n\n @property\n def alpha(self):\n return self._alpha\n\n @alpha.setter\n def alpha(self,val):\n self._alpha = val\n\n @property\n def trans_matrix(self):\n return np.array([d.weights for d in self._row_distns])\n\n @trans_matrix.setter\n def trans_matrix(self,trans_matrix):\n N = self.N = trans_matrix.shape[0]\n self._row_distns = \\\n [Multinomial(alpha_0=self.alpha,K=N,alphav_0=self.alphav,weights=row)\n for row in trans_matrix]\n\n @property\n def alphav(self):\n return self._row_distns[0].alphav_0\n\n @alphav.setter\n def alphav(self,weights):\n for distn, delta_ij in zip(self._row_distns,np.eye(self.N)):\n distn.alphav_0 = weights + self.kappa * delta_ij\n\n def _get_m(self,trans_counts):\n # NOTE: this thins the m's\n\n if not (0 == trans_counts).all():\n m = sample_crp_tablecounts(float(self.alpha),trans_counts,self.beta)\n else:\n m = np.zeros_like(trans_counts)\n self.m = m\n\n newms = m.copy()\n if m.sum() > 0:\n indices = np.nonzero(newms.flat[::m.shape[0]+1])\n newms.flat[::m.shape[0]+1][indices] = np.array(np.random.binomial(\n m.flat[::m.shape[0]+1][indices],\n self.beta[indices]*self.alpha/(self.beta[indices]*self.alpha + self.kappa)),\n dtype=np.int32)\n return newms\n\n def resample(self,stateseqs=[],trans_counts=None,ms=None):\n trans_counts = self._count_transitions(stateseqs) if trans_counts is None \\\n else trans_counts\n ms = self._get_m(trans_counts) if ms is None else ms\n\n self._resample_beta(ms)\n\n trans_counts = self._count_transitions(stateseqs) if trans_counts is None \\\n else trans_counts\n for distn, counts in zip(self._row_distns,trans_counts):\n distn.resample(counts)\n return self\n\n def _resample_beta(self,ms):\n self.beta_obj.resample(ms)\n self.alphav = self.alpha * self.beta\n\n def _count_transitions(self,stateseqs):\n assert isinstance(stateseqs,list) and all(isinstance(s,np.ndarray) for s in stateseqs)\n return sum((count_transitions(s,num_states=self.N) for s in stateseqs),\n np.zeros((self.N,self.N),dtype=np.int32))\n\n","sub_path":"internals/transitions.py","file_name":"transitions.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"462860951","text":"import threading\nimport os\nimport requests\n\nfrom Send_message import send_message\nfrom Errorlog import errorlog\n\n\ndef load_bsmessage(FOLDER):\n global bsmessagestr; global backseating; global folder\n backseating = False\n folder = FOLDER\n\n try:\n with open(f'{os.path.dirname(os.path.dirname(__file__))}/{folder}/files/Backseatmessage.txt', 'r') as f:\n bsmessagestr = f.read()\n except:\n with open(f'{os.path.dirname(os.path.dirname(__file__))}/{folder}/files/Backseatmessage.txt', 'w') as f:\n bsmessagestr = \"/me Please don't backseat. This is a blind playthrough!\"\n f.write(bsmessagestr)\n\n\ndef bsmessage(s):\n global bstimer\n if backseating:\n try:\n bstimer = threading.Timer(900, bsmessage, [s])\n bstimer.start()\n send_message(s, bsmessagestr)\n except Exception as errormsg:\n errorlog(errormsg, \"Backseatmessage()\", '')\n\n\ndef backseatmessage(s, message):\n global bstimer; global bsmessagestr; global backseating\n messageparts = message.split(\" \")\n if messageparts[1] == \"on\":\n if not backseating:\n backseating = True\n bstimer = threading.Timer(900, bsmessage, [s])\n bstimer.start()\n send_message(s, \"Backseating message enabled.\")\n else:\n send_message(s, \"BSM already enabled.\")\n elif messageparts[1] == \"off\":\n if backseating:\n backseating = False\n bstimer.cancel()\n send_message(s, \"Backseating message disabled.\")\n else:\n send_message(s, \"BSM already off.\")\n elif messageparts[1] == \"set\":\n try:\n newbsmessage = \" \".join(messageparts[2:])\n bsmessagestr = newbsmessage\n with open(f'{os.path.dirname(os.path.dirname(__file__))}/{folder}/files/Backseatmessage.txt', 'w') as f:\n f.write(bsmessagestr)\n send_message(s, \"Backseat message changed.\")\n except Exception as errormsg:\n errorlog(errormsg, 'backseatmessage/set()', message)\n send_message(s, \"There was an error chaning the backseatmessage. Please try again.\")\n\n\ndef bsmcheck(channel_id, client_id):\n global backseating\n url = 'https://api.twitch.tv/helix/streams?user_id=%s' % channel_id\n headers = {'Client-ID': client_id, 'Accept': 'application/vnd.twitchtv.v5+json'}\n r = requests.get(url, headers=headers).json()\n response = r[\"data\"]\n try:\n if response[0][\"type\"] == \"live\":\n pass\n except:\n backseating = False\n","sub_path":"Modules/Backseatmessage.py","file_name":"Backseatmessage.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"182062316","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 25 12:11:58 2019\nScript for automating LISA Analyses\n\nI used geopandas to read an existing shapefile and perform a spatial measure\nPySAL will be used to perform the spatial measure (LISA)\n@author: dahaynes\n\"\"\"\n\n\n\nimport pysal, geopandas\n\nshpFilePath = r\"E:\\git\\GIS5578\\GIS5578-FOSS\\datasets\\mn_tracts_2010.shp\"\ncountiesDF = geopandas.read_file(shpFilePath)\n\n\n #Generate once per loop\nweights = pysal.weights.Queen.from_dataframe(countiesDF)\ncountiesDF.keys()\n\nnationalDeprivationScores = countiesDF['national_a']\npercentMinority = countiesDF['percent_mi']\n \nbivariateLisa = pysal.pysal.Moran_Local_BV(nationalDeprivationScores, percentMinority, weights, permutations=999)\nfor recordName, recordValues in zip(['lisa','p_value', 'quadrant'], [bivariateLisa.Is, bivariateLisa.p_sim, bivariateLisa.q] ):\n countiesDF.insert(len(countiesDF.keys()), recordName, recordValues)\n \n\ndel countiesDF['geometry']\n\ncountiesDF.to_csv(r\"E:\\work\\bivariate_list.csv\", sep=',')\nprint(\"Finished\")","sub_path":"geopandasLISA.py","file_name":"geopandasLISA.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"8277740","text":"import tunneling.solve as solve\nimport numpy as np\nimport cmath\nimport matplotlib.pyplot as plt\n\n\nclass SolvingProblemArray(solve.SolvingProblem):\n\n def __init__(self, thickness: float, area: float, arr, eff_m, start: float = 0, end=0):\n super().__init__(thickness, area, start, end, eff_m)\n self.dense = 30\n self.N = (len(arr) - 1) * self.dense + 2\n self.potential = np.array([0.3])\n self.reduced_mass = cmath.sqrt(0.511 * eff_m) * 1e+3 / 299792458 # eV ** 0.5 * s / m\n self.area = area\n self.dx = thickness / self.N\n self.arr = arr\n\n def gen_pot(self, v=0.):\n self.potential = np.zeros(self.N)\n for i in range(len(self.arr) - 1):\n self.potential[self.dense * i + 1: self.dense * (i + 1) + 1] \\\n = np.linspace(self.arr[i] + self.Ef, self.arr[i + 1] + self.Ef, self.dense)\n self.potential += np.linspace(0, v, self.N)\n self.potential.put(0, self.Ef)\n self.potential.put(-1, self.Ef + v)\n# plt.show()\n","sub_path":"tunneling/solve_with_array.py","file_name":"solve_with_array.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"39320974","text":"# Original code: https://github.com/prehensile/waveshare-clock\n# Modifications: https://github.com/pskowronek/epaper-clock-and-more, Apache 2 license\n\nfrom acquire import Acquire\n\nimport json\nimport logging\nimport requests\nfrom collections import namedtuple\n\n\nGMapsTuple = namedtuple('Gmaps', ['provider', 'time_to_dest', 'time_to_dest_in_traffic', 'distance', 'origin_address', 'destination_address' ])\n\n\nclass GMaps(Acquire):\n\n\n DEFAULT = GMapsTuple(provider='Google Maps', time_to_dest=-1, time_to_dest_in_traffic=-1, distance=-1, origin_address='n/a', destination_address='n/a') \n\n\n def __init__(self, key, home_lat, home_lon, dest_lat, dest_lon, units, name, cache_ttl):\n self.key = key\n self.home_lat = home_lat\n self.home_lon = home_lon\n self.dest_lat = dest_lat\n self.dest_lon = dest_lon\n self.units = units\n self.name = name\n self.cache_ttl = cache_ttl\n\n\n def cache_name(self):\n return \"gmaps-{}.json\".format(self.name)\n\n\n def ttl(self):\n return self.cache_ttl\n\n\n def error_found(self, status_code, response_text):\n result = False\n if super(GMaps, self).error_found(status_code, response_text):\n result = True\n else:\n response_parsed = json.loads(response_text)\n if 'error_message' in response_parsed:\n logging.warn(\"GMaps API returned the following error: %s\" % response_parsed['error_message'])\n result = True\n elif 'duration_in_traffic' not in response_text:\n logging.warn(\"GMaps API returned no 'duration_in_traffic' data - probably empty or wrong api key /what a strange API that is/\")\n result = True\n\n return result\n\n\n def acquire(self):\n logging.info(\"Getting time to get to dest: {} from the internet...\".format(self.name))\n\n try:\n r = requests.get(\n \"https://maps.googleapis.com/maps/api/distancematrix/json?units={}&departure_time=now&origins={},{}&destinations={},{}&key={}\".format(\n self.units,\n self.home_lat,\n self.home_lon,\n self.dest_lat,\n self.dest_lon,\n self.key\n ),\n )\n return r.status_code, r.text\n except Exception as e:\n logging.exception(e)\n\n return (None, None)\n\n\n def get(self):\n try:\n gmaps_data = self.load()\n if gmaps_data is None:\n return self.DEFAULT\n\n return GMapsTuple(\n provider='Google Maps',\n time_to_dest=gmaps_data['rows'][0]['elements'][0]['duration']['value'], # in seconds\n time_to_dest_in_traffic=gmaps_data['rows'][0]['elements'][0]['duration_in_traffic']['value'], # in seconds\n distance=gmaps_data['rows'][0]['elements'][0]['distance']['text'], # in km, string with km\n origin_address=gmaps_data['origin_addresses'][0],\n destination_address=gmaps_data['destination_addresses'][0]\n )\n except Exception as e:\n logging.exception(e)\n return self.DEFAULT\n\n\n","sub_path":"providers/gmaps.py","file_name":"gmaps.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"9"}
+{"seq_id":"351577066","text":"# -*- coding: utf-8 -*-\nimport json_lines\n\n__author__ = 'drumcap'\n\nimport scrapy\n\nfrom vanilla_scrap.items import MovieCommentItem\nfrom urllib.parse import urlparse, parse_qs\nfrom datetime import datetime\nimport re\nimport random\nimport time\nimport json\n\nextract_nums = lambda s: re.search('\\d+', s).group(0)\nsanitize_str = lambda s: s.strip()\nrand_sleep = lambda max: time.sleep(int(random.randrange(1, max)))\n\nNAVER_BASEURL = 'http://movie.naver.com/movie/point/af/list.nhn'\nNAVER_RATINGURL = NAVER_BASEURL + '?&page=%s'\nNAVER_MOVIEURL = NAVER_BASEURL + '?st=mcode&target=after&sword=%s&page=%s'\n\nNAVER_MOVIE_RANK = 'https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&tg=%s&page=%s'\n\nclass MovieCommentSpider(scrapy.Spider):\n name = \"movie-comment\"\n\n def extract_nums(self, s): return re.search('\\d+', s).group(0)\n\n def start_requests(self):\n filename = 'movie-info-items.jl'\n\n with json_lines.open(filename) as f:\n for item in f:\n yield scrapy.Request(NAVER_MOVIEURL % (item.get('movie_id'), 1), self.parse_naver_cmt)\n\n def parse_naver_cmt(self, response):\n dtnow = datetime.now()\n for sel in response.css('#old_content > table > tbody > tr'):\n item = MovieCommentItem()\n item['source'] = 'naver'\n item['review_id'] = sel.xpath('./td[@class=\"ac num\"]/text()').extract_first()\n item['rating'] = sel.xpath('./td[@class=\"point\"]/text()').extract_first()\n item['movie_id'] = extract_nums(sel.xpath('./td[@class=\"title\"]/a/@href').extract_first())\n item['movie_name'] = sel.xpath('./td[@class=\"title\"]/a/text()').extract_first()\n item['review_txt'] = ' '.join(sel.xpath('./td[@class=\"title\"]/text()').extract()).strip()\n item['author'] = sel.xpath('./td[@class=\"num\"]/a/text()').extract_first()\n item['date'] = datetime.strptime(sel.xpath('./td[@class=\"num\"]/text()').extract_first(),'%y.%m.%d').astimezone().isoformat()\n yield item\n\n next_page = response.css('.paging .pg_next::attr(href)').extract_first()\n next_page_n = parse_qs(urlparse(next_page).query).get('page')\n next_page_num = int(next_page_n[0]) if next_page_n is not None else 0\n if next_page is not None and next_page_num < 1000:\n print(\"2 ######## go next page {}\".format(next_page))\n yield response.follow(next_page, callback=self.parse_naver_cmt)","sub_path":"vanilla_scrap/spiders/movie_comment_spider.py","file_name":"movie_comment_spider.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"157118219","text":"import copy\nfrom pyiso.base import BaseClient\nimport pytz\n\n\nclass MISOClient(BaseClient):\n NAME = 'MISO'\n\n base_url = 'https://www.misoenergy.org/ria/'\n\n fuels = {\n 'Coal': 'coal',\n 'Natural Gas': 'natgas',\n 'Nuclear': 'nuclear',\n 'Other': 'other',\n 'Wind': 'wind',\n }\n\n TZ_NAME = 'America/New_York'\n\n def utcify(self, local_ts, **kwargs):\n # MISO is always on Eastern Standard Time, even during DST\n # ie UTC offset = -5 always\n utc_ts = super(MISOClient, self).utcify(local_ts, is_dst=False)\n utc_ts += utc_ts.astimezone(pytz.timezone(self.TZ_NAME)).dst() # adjust for EST\n return utc_ts\n\n def get_generation(self, latest=False, **kwargs):\n # set args\n self.handle_options(data='gen', latest=latest, **kwargs)\n\n # process args\n request_urls = []\n if latest:\n request_urls.append('FuelMix.aspx?CSV=True')\n\n else:\n raise ValueError('Latest must be True.')\n\n # set up storage\n raw_data = []\n parsed_data = []\n\n # collect raw data\n for request_url in request_urls:\n # set up request\n url = copy.deepcopy(self.base_url)\n url += request_url\n\n # carry out request\n response = self.request(url)\n if not response:\n return parsed_data\n\n # test for valid content\n if 'The page cannot be displayed' in response.text:\n self.logger.error('MISO: Error in source data for generation')\n return parsed_data\n\n # preliminary parsing\n rows = response.text.split('\\n')\n header = self.parse_row(rows[0])\n for row in rows[1:]:\n raw_data.append(dict(zip(header, self.parse_row(row))))\n\n # parse data\n for raw_dp in raw_data:\n # process timestamp\n aware_utc_timestamp = self.utcify(raw_dp['INTERVALEST'])\n\n # set up storage\n parsed_dp = {}\n\n # add values\n try:\n parsed_dp['timestamp'] = aware_utc_timestamp\n parsed_dp['gen_MW'] = float(raw_dp['ACT'])\n parsed_dp['fuel_name'] = self.fuels[raw_dp['CATEGORY']]\n parsed_dp['ba_name'] = self.NAME\n parsed_dp['market'] = self.MARKET_CHOICES.fivemin\n parsed_dp['freq'] = self.FREQUENCY_CHOICES.fivemin\n except KeyError: # blank last line\n continue\n\n # add to full storage\n parsed_data.append(parsed_dp)\n\n return parsed_data\n","sub_path":"pyiso/miso.py","file_name":"miso.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"605039968","text":"#!/usr/bin/python3\n\n# Usage:\n# ./python_std_sort.py list|array|np_array\n\nimport time, array, numpy, sys\n\ndef getDataStructure(size):\n structure = sys.argv[1]\n data = []\n for _ in range(size):\n data.append(int(input_data.readline()))\n if structure == 'list': return data\n if structure == 'array': return array.array('i', data)\n if structure == 'np_array': return numpy.array(data, dtype=int)\n\ninput_data = open('../input_data.txt', 'r')\n\nsize = int(input_data.readline())\n\ninput_data.close\n\ndata = getDataStructure(size)\n\nprint('{:,} integers have been loaded into {}'.format(len(data), type(data)))\n\ntic = round(time.time() * 1000, 2)\nsorted(data)\ntoc = round(time.time() * 1000, 2)\n\nprint(\"\\n==>%.2f\" % (toc - tic))","sub_path":"python/python_list_std_sort.py","file_name":"python_list_std_sort.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"430972471","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom math import log\n\n# os.chdir('data/')\n\n# --------------------------------------------\n# ********** READ DATA INTO PANDAS **********\n# --------------------------------------------\n\n\ntrain = pd.read_csv('west_nile/input/train.csv')\nspray = pd.read_csv('west_nile/input/spray.csv')\nweather = pd.read_csv('west_nile/input/weather.csv')\ntest = pd.read_csv('west_nile/input/test.csv')\n\nspray\n\nlen(test)\ntest.columns\n# ----------------------\n# ***** CLEAN DATA *****\n# ----------------------\n\n# Convert date to datetime and drop Address / Street columns\ntrain['Date'] = pd.to_datetime(train['Date'])\n\n# test\ntest['Date'] = pd.to_datetime(test['Date'])\n\ntrain.drop([c for c in train.columns if 'Address' in c or 'Street' in c], axis=1, inplace=True)\nlen(test)\n\ntest.drop([c for c in test.columns if 'Address' in c or 'Street' in c], axis=1, inplace=True)\nlen(test)\n\nspray['Date'] = pd.to_datetime(spray['Date']) # spray not used\n\nweather['Date'] = pd.to_datetime(weather['Date'])\n\n# Clean weather data\nweather = weather.replace({'M': np.nan})\nweather.fillna(method='ffill', inplace=True)\n\nweather = weather.replace({'-': np.nan})\nweather.fillna(method='ffill', inplace=True)\n\nweather.dropna(how='all')\nweather.drop('Water1', axis=1, inplace=True)\n\nweather.PrecipTotal = weather.PrecipTotal.str.replace('T', '0').astype(float)\n\nweather_numcols = ['Date', u'Tmax', u'Tmin', u'Tavg', u'Depart', u'DewPoint',\n u'WetBulb', u'Heat', u'Cool', u'Sunrise', u'Sunset',\n u'Depth', u'PrecipTotal', u'StnPressure', u'SeaLevel',\n u'ResultSpeed', u'ResultDir', u'AvgSpeed']\n\nweather_to_join = weather[weather_numcols]\nweather_to_join[[c for c in weather_to_join.columns if c != \"Date\"]] = \\\n\tweather_to_join[[c for c in weather_to_join.columns if c != \"Date\"]].astype(float)\n\n# create weather_avg_df which takes avg of values for the two stations\nweather_avg_df = weather_to_join.groupby('Date').agg(np.mean).reset_index()\n\n# Sum the WnvPresent counts and WnvPresent values and join species for the same trap on the same day\ntrain['Trap'] = train.Trap.apply(lambda x: x[:4])\n\ntrain['Year'] = train.Date.dt.year\ntrain['Month'] = train.Date.dt.month\ntrain['Day'] = train.Date.dt.day\ntrain['Week'] = train.Date.dt.week\n\ntrain\ntest['Year'] = test.Date.dt.year\ntest['Month'] = test.Date.dt.month\ntest['Week'] = test.Date.dt.week\ntest['Day'] = test.Date.dt.day\ntest['Trap'] = test.Trap.apply(lambda x: x[:4])\nlen(test.Trap.unique())\n\ntrain_group_trap = train.groupby(['Date', 'Block', 'Trap', 'Latitude', 'Longitude'])['NumMosquitos',\n 'WnvPresent', 'Species'].agg(\n\t{'NumMosquitos': sum, 'WnvPresent': sum,\n\t 'Species': lambda x: ', '.join(x), 'Day': 'count'})\n\ntrain_group_trap['LeakCount'] = train_group_trap['Day']\ntrain_group_trap.reset_index(inplace=True)\n# train_group_trap = train_group_trap.reset_index()[['Date', 'Trap', 'LeakCount']]\ntrain_group_trap\ntrain_group_trap_cum = train.groupby(['Trap'])['NumMosquitos', 'WnvPresent', 'Species'].agg({'NumMosquitos':\n\t sum,\n 'WnvPresent': sum,\n 'Species': lambda\n\t x: ', '.join(x)})\n\ntrain_group_trap_cum\ntrain_group_trap_cum.reset_index(inplace=True)\ntrain_group_trap_cum\n\ntest\ntest_group_trap = test.groupby(['Date', 'Trap'])['Species'].agg({'Species': lambda x: ', '.join(x), 'Day': 'count'})\n\ntest_group_trap\ntest_group_trap['LeakCount'] = test_group_trap['Day']\ntest_group_trap = test_group_trap.reset_index()[['Date', 'Trap', 'LeakCount']]\ntest_group_trap\ntest\nlen(test)\nlen(test_group_trap)\n# assert set(train.columns.tolist()) == set(train_group_trap.columns.tolist())\n\n\n\n# Add Year, Month, Day columns\n\n# train_group_trap['Year'] = train_group_trap.Date.dt.year\n# train_group_trap['Month'] = train_group_trap.Date.dt.month\n# train_group_trap['Day'] = train_group_trap.Date.dt.day\n\n\ntrain = train_group_trap\ntrain['Year'] = train.Date.dt.year\ntrain['Month'] = train.Date.dt.month\ntrain['Day'] = train.Date.dt.day\ntrain['Week'] = train.Date.dt.week\n\ntest = pd.concat([test, pd.get_dummies(test['Month'], prefix='Month')], axis=1)\ntest = pd.concat([test, pd.get_dummies(test['Week'], prefix='Week')], axis=1)\ntest = pd.concat([test, pd.get_dummies(test['Trap'], prefix='Trap')], axis=1)\ntest = pd.concat([test, pd.get_dummies(test['Block'], prefix='Block')], axis=1)\ntrain = pd.concat([train, pd.get_dummies(train['Month'], prefix='Month')], axis=1)\ntrain = pd.concat([train, pd.get_dummies(train['Week'], prefix='Week')], axis=1)\ntrain = pd.concat([train, pd.get_dummies(train['Trap'], prefix='Trap')], axis=1)\ntrain = pd.concat([train, pd.get_dummies(train['Block'], prefix='Block')], axis=1)\n# join train and weather dfs\ntest.columns\nset(train.Trap.unique().tolist()).difference(set(test.Trap.unique().tolist()))\nlen(test.Trap.unique())\nlen(train.Trap.unique())\nlen(train.Block.unique())\nlen(test.Block.unique())\n# add species data\ntrain.Trap.unique()\ntrain[train.Trap.str.startswith('T054')]\ntrain.head()\ntrain.Species.unique()\ntest.Species.unique()\n\n\ndef add_species_variable(df):\n\tdf['spec_pipiens'] = df.Species.str.contains('CULEX PIPIENS$')\n\tdf['spec_restuans'] = df.Species.str.contains('CULEX RESTUANS')\n\tdf['spec_pip_rest'] = df.Species.str.contains('CULEX PIPIENS/RESTUANS')\n\treturn df\n\n\ntrain = add_species_variable(train)\ntest = add_species_variable(test)\ntest.head()\n\ntrain_weather = pd.merge(train, weather_avg_df, how='left', left_on='Date', right_on='Date')\ntrain_weather.head(50)\nlen(train)\nlen(train_weather)\n# train_weather = pd.merge(train_weather, train_group_trap_cum[['Trap', 'Year',\n# 'NumMosquitos', 'WnvPresent']], how='left',\n# left_on=['Trap', 'Year'],\n# right_on=['Trap', 'Year'])\ntrain_weather = pd.merge(train_weather, train_group_trap_cum[['Trap',\n 'NumMosquitos', 'WnvPresent']], how='left',\n left_on=['Trap'],\n right_on=['Trap'])\ntrain_weather[['Trap', 'Month', 'NumMosquitos_y']].sort_values(['Trap'])\n\ntrain_weather['WnvPresent_y'] = train_weather['WnvPresent_y'] > 0\ntrain_weather['WnvPresent_y']\n# train_weather = pd.merge(train_weather, train_group_trap[['Trap',\n# 'Date', 'LeakCount']], how='left', left_on=['Trap', 'Date'],\n# right_on=['Trap', 'Date'])\ntrain_group_trap.dtypes\nlen(train_weather)\ntrain_weather['NumMosquitos_y'] = train_weather['NumMosquitos_y'].apply(lambda x: 1 / log(x + 1))\nfor c in train_weather.columns:\n\tprint('\"{}\",'.format(c))\n\n\ntest_weather = pd.merge(test, weather_avg_df, how='left', left_on='Date', right_on='Date')\nlen(test_weather)\ntest_weather = pd.merge(test_weather, train_group_trap_cum[['Trap',\n 'NumMosquitos', 'WnvPresent']], how='left',\n left_on=['Trap'],\n right_on=['Trap'])\nlen(test_weather)\ntrain_weather\ntest_group_trap\npd.merge(test_weather, test_group_trap, how='left', left_on=['Date', 'Trap'], right_on=['Date', 'Trap'])\ntest_weather.columns\ntest_weather.index\ntest_group_trap[['Trap', 'Date', 'LeakCount']]\ntest_group_trap.dtypes\ntest_weather.dtypes\nlen(test_group_trap)\ntest_weather = pd.merge(test_weather, test_group_trap, how='left', left_on=['Date', 'Trap'], right_on=['Date', 'Trap'])\n\ntest_weather\n\ntest_weather.head(50)\nlen(test_weather)\nlen(test)\n\ntest_weather = test_weather.rename(columns={'NumMosquitos': 'NumMosquitos_y', 'WnvPresent': 'WnvPresent_y'})\ntest_weather['NumMosquitos_y'] = test_weather['NumMosquitos_y'].apply(lambda x: 1 / log(x + 1))\ntest_weather['WnvPresent_y'] = test_weather['WnvPresent_y'] > 0\ntest_weather\nnull_mask = test_weather.NumMosquitos_y.isnull()\nnull_mask\ntest_weather.loc[null_mask, 'NumMosquitos_y'] = 34\ntest_weather.loc[null_mask, 'WnvPresent_y'] = 0\ntest_weather[test_weather.NumMosquitos_y.isnull()]\ntrain[train.Trap.str.startswith('T23')]['NumMosquitos']\n[c for c in train.columns if c.startswith('Num')]\ntrain_weather[train_weather.Block == 34]\ntrain[train.Latitude.apply(lambda x: round(x, 3) == 41.942)]\ntrain[train.Longitude.apply(lambda x: round(x, 3) == -87.761)]\n# 41.942285 -87.761726\n\n# num mosquitos for test set\n\n\n\n# --------------------------------\n# ******* TRAIN ESTIMATOR *******\n# --------------------------------\n## create dict to record highest auc scores\nhighest_average_dict = {}\n##\n\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, LogisticRegressionCV\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, BaggingClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_auc_score\nfrom sklearn.cross_validation import StratifiedKFold\ntrain_weather['WnvPresent_x'] = train_weather['WnvPresent_x'] > 0\ny_actual = 'WnvPresent_x'\nfeature_cols = [\n\t# 'Tavg',\n\t'Depart',\n\t# 'PrecipTotal',\n\t'DewPoint',\n\t# 'Month',\n\t'Year',\n\t'spec_pipiens',\n\t'spec_restuans',\n\t# 'spec_pip_rest',\n\t# 'Latitude',\n\t'Longitude',\n\t'Month_6', 'Month_7', 'Month_8', 'Month_9', 'Month_10',\n\t# \"Date\",\n\t# \"Species\",\n\t# \"Block\",\n\t# \"Trap\",\n\t# \"Latitude\",\n\t# \"Longitude\",\n\t# \"NumMosquitos_x\",\n\t# \"WnvPresent\",\n\t# \"Year\",\n\t# \"Month\",\n\t# \"Day\",\n\t# \"Month_5\",\n\t# 'Week_23', 'Week_25', 'Week_24', 'Week_32',\n\t'Trap_T900',\n\t# 'Week_26',\n\t# 'Week_35',\n\t# 'Week_36',\n\t# 'Week_33',\n\t# 'Week_34',\n\t# \"Month_6\",\n\t# \"Month_7\",\n\t# \"Month_8\",\n\t# \"Month_9\",\n\t# \"Month_10\",\n\t\"Tmax\",\n\t\"Tmin\",\n\t\"WetBulb\",\n\t\"Heat\",\n\t\"Cool\",\n\t\"Sunrise\",\n\t# \"Sunset\",\n\t# \"Depth\",\n\t# \"StnPressure\",\n\t\"SeaLevel\",\n\t\"ResultSpeed\",\n\t# \"ResultDir\",\n\t# \"AvgSpeed\",\n\t#\n\t#\n\t'NumMosquitos_y',\n\t'LeakCount',\n\t'WnvPresent_y'\n]\n\n# week_columns = [c for c in test_weather.columns if 'Week_' in c]\n# trap_columns = [c for c in test_weather.columns if 'Trap_' in c and c!='Trap_T234']\n# block_columns = [c for c in test_weather.columns if 'Block_' in c and c != 'Block_26']\n# week_columns\n# feature_cols.extend(week_columns)\n# feature_cols.extend(trap_columns)\n# feature_cols.extend(block_columns)\n### functions to test different features\n# gbc = GradientBoostingClassifier()\nfrom sklearn.preprocessing import RobustScaler, StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\n\ngbc = GradientBoostingClassifier(subsample=.8)\ngbc = GradientBoostingClassifier()\nrfc = RandomForestClassifier(n_estimators=100, max_features='sqrt')\nbgbc = BaggingClassifier(gbc)\nlogit = LogisticRegression()\n\n\ndef run_model(model):\n\t# X_train, X_test, y_train, y_test = train_test_split(train_weather[feature_cols],\n\t# train_weather[y_actual])\n\t# rfc.fit(X_train, y_train)\n\t# X_train['rfc_pp'] = rfc.predict_proba(X_train)[:,1]\n\tX = train_weather[feature_cols]\n\t# X['rfc_pp'] = rfc.predict_proba(X)[:,1]\n\tcv = StratifiedKFold(train_weather[y_actual], n_folds=3, shuffle=True)\n\t## try scaling\n\trobust_scaler = RobustScaler()\n\tX = robust_scaler.fit_transform(X)\n\t# standard_scaler = StandardScaler()\n\t# X_train = robust_scaler.fit_transform(X_train)\n\t\n\t# X_test = standard_scaler.fit_transform(X_test)\n\t####\n\t# model.fit(X_train, y_train)\n\t# pred_probas = model.predict_proba(X_test)[:, 1]\n\t# scr = roc_auc_score(y_test, pred_probas)\n\t# scr = cross_val_score(model, X_train, y_train, cv=cv, n_jobs=-1, scoring='roc_auc').mean()\n\tscr = cross_val_score(model, X, train_weather[y_actual], cv=cv, n_jobs=-1, scoring='roc_auc').mean()\n\t# print('gbc', roc_auc_score(y_test, pred_probas))\n\t# print('score', gbc.score(X_test, y_test))\n\t# print('cv score', cross_val_score(gbc, X_train, y_train, cv=cv, n_jobs=-1, scoring='roc_auc').mean())\n\tprint('cv score:', scr)\n\treturn scr\n\n\ndef avg_score():\n\tn = 10\n\tscrtotal = 0\n\tfor i in range(n):\n\t\tscrtotal += run_model(bgbc)\n\tprint('avg', scrtotal / float(n))\n\tavg = scrtotal / float(n)\n\tkey = ''.join(feature_cols)\n\thighest_average_dict[key] = avg\n\n\navg_score()\n\n[x for x in feature_cols]\nhighest_average_dict\ntest_weather\n\n# --------------------------------------------------------------------------\n# ****************** APPLY TO FULL TRAIN SET AND PREDICT ******************\n# --------------------------------------------------------------------------\nX = train_weather[feature_cols]\n# rfc.fit(X, train_weather[y_actual])\n# X['rfc_pp'] = rfc.predict_proba(X)[:,1]\nX_TEST = test_weather[feature_cols]\n# X_TEST['rfc_pp'] = rfc.predict_proba(X_TEST)[:,1]\n\nbgbc.fit(X, train_weather[y_actual])\n\ncv = StratifiedKFold(train_weather[y_actual], n_folds=3, shuffle=True, random_state=41)\nscr = cross_val_score(bgbc, X, train_weather[y_actual], cv=cv, n_jobs=-1, scoring='roc_auc').mean()\n\nprint(scr)\n\nanswer_pred_probas = bgbc.predict_proba(X_TEST)\nlen(test_weather)\nlen(answer_pred_probas)\nlen(test)\n\nanswer_df = pd.DataFrame(answer_pred_probas)\n# answer_df = pd.DataFrame(answer_pred_probas_svc)\nanswer_df.reset_index(inplace=True)\nanswer_df['Id'] = answer_df['index'] + 1\nanswer_df.Id = answer_df.Id.astype(str)\nanswer_df['pred'] = answer_df.loc[:, 1]\nanswer_df\nprint(len(answer_df))\nimport csv\n\nout = csv.writer(open(\"wnvsubmission_NA_8_3_bgbc.csv\", \"w\"))\n\nout.writerow([\"Id\", \"WnvPresent\"])\nfor row, p in answer_df.iterrows():\n\tout.writerow([p['Id'], p['pred']])\n\n# --------------------------------------------------------------------------\n\n### one-off test\ntrain_weather['gbc_pp'] = gbc.predict_proba(train_weather[feature_cols])[:, 1]\n\nfeature_cols.extend(['gbc_pp'])\nfeature_cols = ['gbc_pp', 'Tmin']\nX_train, X_test, y_train, y_test = train_test_split(train_weather[feature_cols],\n train_weather[y_actual])\n\nX_train.head()\nX_train = StandardScaler.fit_transform(X_train)\nprint('total num observations:', len(train_weather))\nnum_pos = len(train_weather[train_weather.WnvPresent_x == 1])\nprint('Number of total observations with Wnv found:', num_pos)\nnum_neg = len(train_weather[train_weather.WnvPresent_x == 0])\nprint('Number of total observations with Wnv NOT found:', num_neg)\nassert (num_pos + num_neg) == len(train_weather)\n\nnum_neg / len(train_weather)\n\nprint('X-shape', X_train.shape)\nprint('y-shape', y_train.shape)\n\n## random forest\nrfc = RandomForestClassifier(n_estimators=100, max_features='sqrt')\n\nrfc.fit(X_train, y_train)\ny_pred = rfc.predict(X_test)\n\npred_probas = rfc.predict_proba(X_test)[:, 1]\npred_probas\nprint('rfc:', roc_auc_score(y_test, pred_probas))\n\n## gradient boost classifier\n\ngbc = GradientBoostingClassifier(max_depth=1, n_estimators=90, subsample=0.8,\n max_features='sqrt')\ngbc.fit(X_train, y_train)\ny_pred = rfc.predict(X_test)\npred_probas = gbc.predict_proba(X_test)[:, 1]\npred_probas = gbc.predict_proba(X_test)[:, 1]\nprint('gbc', roc_auc_score(y_test, pred_probas))\n\n# GSCV\nX_train, X_test, y_train, y_test = train_test_split(train_weather[feature_cols],\n train_weather[y_actual])\n\nX_train.head()\nstandard_scaler = StandardScaler()\nX_train = standard_scaler.fit_transform(X_train)\ngbc.fit(train_weather[feature_cols], train_weather[y_actual])\n\nfrom sklearn.grid_search import GridSearchCV\n\n# n_estimators = list(range(100, 150))\n# n_estimators = [50, 70, 100, 110, 120, 150, 200, 500, 1000]\nmax_depth = [1, 2, 3, 4, 5, 10, 20]\n# subsample = [0.8, 0.9, 1.0]\n# max_features = [None, \"auto\"]\nmin_samples_leaf = [1, 3, 5, 7, 10]\nmin_samples_split = [2, 5, 7]\n\n## grid search gradient boost classifier\ngbc = GradientBoostingClassifier()\ncv = StratifiedKFold(train_weather[y_actual], n_folds=3, shuffle=True, random_state=41)\n# param_grid = dict(n_estimators=n_estimators)\n# param_grid= dict(max_depth=max_depth,\n# min_samples_leaf=min_samples_leaf,\n# min_samples_split=min_samples_split)\nparam_grid = dict(subsample=[.3, .4, .5, .6, .7, .8, .9])\ngrid = GridSearchCV(gbc, param_grid, cv=cv, scoring='roc_auc')\nstandard_scaler = StandardScaler()\nX = standard_scaler.fit_transform(train_weather[feature_cols])\n# grid.fit(train_weather[feature_cols], train_weather[y_actual])\ngrid.fit(X, train_weather[y_actual])\ngrid.best_score_\ngrid.best_params_\n# ------------------------------------------------------------------------\n# ***************** EVALUATE ESTIMATOR (ACCURACY & AUC) *****************\n# ------------------------------------------------------------------------\n\n\n### Accuracy score will always be around 94% because of unbalanced classes\nprint('feature import:', rfc.feature_importances_)\nprint('accuracy:', accuracy_score(y_test, y_pred))\nprint('cross_val_score', cross_val_score(rfc, X_train, y_train, cv=5))\nprint(confusion_matrix(y_test, y_pred))\nprint('classification report', classification_report(y_test, y_pred))\n\n### ROC_AUC score around 79-80% ! --> This is the relevant metric\n\n\n\npred_probas = rfc.predict_proba(X_test)[:, 1]\nX_test['pred_proba'] = rfc.predict_proba(X_test)[:, 1]\nX_test['y_act'] = y_test\nX_test[X_test.y_act == 1]\nX_test[X_test.y_act == 0]\nlen(X_test[(X_test.pred_proba == 0) & (X_test.y_act == 1)])\n# print feature importances\n\nfidf = pd.DataFrame(rfc.feature_importances_)\npd.merge(pd.DataFrame(feature_cols), fidf, how='left', left_index=True, right_index=True)\n\nlen(X_test[X_test.pred_proba == 0])\nlen(y_test[y_test == 1])\nlen(y_test)\nprint(roc_auc_score(y_test, pred_probas))\n\nanswer_pred_probas = rfc.predict_proba(test_weather[feature_cols])\nanswer_pred_probas = bgbc.predict_proba(test_weather[feature_cols])\nanswer_pred_probas\ntest_weather\nlen(test_weather)\nlen(answer_pred_probas)\n\nanswer_df = pd.DataFrame(answer_pred_probas)\n# answer_df = pd.DataFrame(answer_pred_probas_svc)\nanswer_df.reset_index(inplace=True)\nanswer_df['Id'] = answer_df['index'] + 1\nanswer_df.Id = answer_df.Id.astype(str)\nanswer_df['pred'] = answer_df.loc[:, 1]\nanswer_df\nprint(len(answer_df))\nimport csv\n\nout = csv.writer(open(\"wnvsubmission_NA_8_3_bgbc.csv\", \"w\"))\n\nout.writerow([\"Id\", \"WnvPresent\"])\nfor row, p in answer_df.iterrows():\n\tout.writerow([p['Id'], p['pred']])\n\n[x for x in feature_cols]\n\n# ----------------------------------------------\n# *********** SUPPORT VECTOR MACHINE ***********\n# ----------------------------------------------\n\nfrom sklearn.svm import SVC\n\nsvc = SVC(C=15.0, gamma=0.001, probability=True)\n\nsvc.fit(X_train, y_train)\ny_pred = svc.predict(X_test)\n\n### Accuracy score will always be around 94% because of unbalanced classes\nprint('accuracy:', accuracy_score(y_test, y_pred))\nprint('cross_val_score', cross_val_score(svc, X_train, y_train, cv=5))\nprint(confusion_matrix(y_test, y_pred))\nprint('classification report', classification_report(y_test, y_pred))\n\n### ROC_AUC score around 79-80% ! --> This is the relevant metric\n\n# pred_probas = svc.fit(X_train, y_train).decision_function(X_test)\npred_probas = svc.predict_proba(X_test)[:, 1]\nprint(roc_auc_score(y_test, pred_probas))\n\nanswer_pred_probas_svc = svc.predict_proba(test_weather[feature_cols])\nanswer_pred_probas_svc\n\nanswer_df = pd.DataFrame(answer_pred_probas_svc)\nanswer_df\nanswer_df.reset_index(inplace=True)\nanswer_df['Id'] = answer_df['index'] + 1\nanswer_df.Id = answer_df.Id.astype(str)\nanswer_df['pred'] = answer_df.loc[:, 1]\nanswer_df\nprint(len(answer_df))\nimport csv\n\nout = csv.writer(open(\"wnvsubmission3.csv\", \"w\"))\n\nout.writerow([\"Id\", \"WnvPresent\"])\nfor row, p in answer_df.iterrows():\n\t# print(p['Id'], p['pred'])\n\tout.writerow([p['Id'], p['pred']])\n\nanswer_df.pred.unique()\n# Compute ROC curve and ROC area for each class\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import roc_curve, auc\n\n# y_score = rfc.fit(X_train, y_train).predict_proba(X_test)[:,1]\ny_score = svc.fit(X_train, y_train).decision_function(X_test)\n\n# Compute ROC curve and ROC area for each class\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(0, 1):\n\tfpr[i], tpr[i], _ = roc_curve(y_test, y_score)\n\troc_auc[i] = auc(fpr[i], tpr[i])\n\n# Compute micro-average ROC curve and ROC area\nfpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\nroc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\nimport matplotlib.pyplot as plt\n\nplt.hist(X_test.pred_proba)\n\nplt.figure()\nlw = 2\nplt.plot(fpr[0], tpr[0], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])\nplt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n# ----------------------------------\n# ******** PREDICT TEST SET ********\n# ----------------------------------\n\n","sub_path":"data/eda3.py","file_name":"eda3.py","file_ext":"py","file_size_in_byte":21176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"166973435","text":"import torch\nimport model as md\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\ndef loadData():\n # torchvision输出的是PILImage\n # 我们将其转化为tensor数据,并归一化 range [0, 255] -> [0.0,1.0]\n transform = transforms.Compose([transforms.ToTensor(), ])\n\n # 训练集,将相对目录./data下数据\n trainset = torchvision.datasets.MNIST(root='./data', train=True, download=False, transform=transform)\n testset = torchvision.datasets.MNIST(root='./data', train=False, download=False, transform=transform)\n # 将训练集的m张图片划分成(m/4)份,每份4张图,用于mini-batch输入。shffule=True在表示不同批次的数据遍历时,打乱顺序。num_workers=2表示使用两个子进程来加载数据\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=60, shuffle=True, num_workers=2)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=60, shuffle=True, num_workers=2)\n return train_loader,test_loader\n\ndef trainModel(epoch,train_loader,model,optimizer,criterion):\n # 把module设成training模式,对Dropout和BatchNorm有影响\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n # Variable类对Tensor对象进行封装,会保存该张量对应的梯度,\n # 以及对生成该张量的函数grad_fn的一个引用。如果该张量是用户创建的,\n # grad_fn是None,称这样的Variable为叶子Variable。\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n # 将output和labels使用叉熵计算损失\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n #len(data)=4;len(train_loader)=15000;len(train_loader.dataset)=60000\n if batch_idx % 1000 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n\n\ndef testModel(epoch,test_loader,model,optimizer,criterion):\n model.eval() # 把module设置为评估模式,只对Dropout和BatchNorm模块有影响\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n output = model(data)\n test_loss += criterion(output, target).data[0]\n # get the index of the max log-probability\n pred = output.data.max(1)[1]\n correct += pred.eq(target.data).cpu().sum()\n\n test_loss = test_loss\n # loss function already averages over batch size\n test_loss /= len(test_loader)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef run():\n train_loader, test_loader = loadData()\n\n model = md.LeNet5()\n # 将所有的模型参数移动到GPU上\n if torch.cuda.is_available():\n model.cuda()\n # 随机梯度下降\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n # 叉熵损失函数\n criterion = nn.CrossEntropyLoss()\n\n for epoch in range(1, 11):\n trainModel(epoch, train_loader, model, optimizer, criterion)\n testModel(epoch, test_loader, model, optimizer, criterion)\n\n # 保存整个神经网络的结构和模型参数\n torch.save(model, 'net.pkl')\n # 加载ConvNet\n # model2 = torch.load('net.pkl')\n\n\n\nif __name__ == '__main__':\n run()","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"439683476","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-11-27 16:12:57\nimport sys\nimport allure\nimport pytest\n\nfrom base.helper import JsonHelper\nfrom bns.iot.iotUser.bns_api_iotUser import BnsApi\nfrom case import BaseCase\n\n_testData_list = BaseCase().csv_info(curr_file=__file__)\n\n# 获取api操作对象, 默认权限为平台管理员\napi_object_admin = BnsApi()\n\n\nclass TestIotuser(BaseCase):\n\n @pytest.mark.parametrize(\"test_data\", _testData_list)\n def test_field_iotUser_add(self, test_data):\n\n first_layer = test_data[\"first_layer\"]\n sencod_layer = test_data[\"sencod_layer\"]\n third_layer = test_data[\"third_layer\"]\n\n if first_layer:\n allure.dynamic.epic(first_layer)\n if sencod_layer:\n allure.dynamic.feature(sencod_layer)\n if third_layer:\n allure.dynamic.story(third_layer)\n \n module_key = sys._getframe().f_code.co_name.split(\"_\")[2]\n test_data = self.parse_csv_param(test_data, module_key)\n \n userEmail = test_data[\"userEmail\"]\n userName = test_data[\"userName\"]\n userPhone = test_data[\"userPhone\"]\n roleIdsList = test_data[\"roleIdsList\"]\n \n with allure.step(\"步骤: 请求接口\"):\n\n res_json = api_object_admin.bns_iotUser_add(\n userEmail=userEmail,\n\t\t\t\tuserName=userName,\n\t\t\t\tuserPhone=userPhone,\n\t\t\t\troleIdsList=roleIdsList,\n )\n\n with allure.step(\"步骤: 提取接口的业务状态码\"):\n \n actual_code = JsonHelper.parseJson_by_objectpath(res_json, \"$.response_data.code\")\n\n with allure.step(\"校验: 业务状态码是否正确\"):\n\n self.assert_actual_equal_expect(\"业务状态码\", actual_code, test_data[\"expect_code\"])\n\n if test_data[\"expect_msg\"]:\n\n with allure.step(\"步骤: 提取接口的提示信息\"):\n\n actual_msg = JsonHelper.parseJson_by_objectpath(res_json, \"$.response_data.message\")\n\n with allure.step(\"校验: 提示信息是否正确\"):\n\n self.assert_actual_contain_expect(\"提示信息\", actual_msg, test_data[\"expect_msg\"])\n\n if test_data[\"clean_data\"]:\n\n with allure.step(\"步骤: 数据清理操作\"):\n\n userInfo = api_object_admin.bns_iotUser_list(userName=userName)\n userId = JsonHelper.parseJson_by_objectpath(userInfo, \"$..*[@.userId]\", res_firstOne=True)\n\n api_object_admin.bns_iotUser_delete(userId=userId)\n \n generator_objs_list = test_data.get(\"generator_objs_list\")\n if generator_objs_list:\n for generator_obj in generator_objs_list:\n try:\n generator_obj.__next__()\n except StopIteration:\n pass\n","sub_path":"接口/demo/case/field_rank/iotUser/add/test_field_iotUser_add.py","file_name":"test_field_iotUser_add.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"90"}
+{"seq_id":"209254684","text":"# Written by Laurenz Mädje\r\n\r\noverlay = None\r\nconnect = None\r\nmyplayer = None\r\nscreen = None\r\nstate = None\r\ninterface = None\r\ntype = None\r\nconnection = (None, None)\r\n\r\nenemyhandlen = 5\r\nenemynachziehlen = 5\r\nenemyablagelen = 0\r\nenemyablagecard = None\r\nenemystats = (1,1,0)\r\n\r\nhintergrundcolor = (200,200,200)\r\nhintergrund = None\r\n","sub_path":"var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"78331153","text":"# encoding=utf-8\n\n# Perception v0.9:二类感知器算法\n# 每个神经节点的各个输入线性组合\n# 然后输入激活函数进行输出\n\n# 当激活函数TF使用sigmoid,误差函数使用sig_error时,就是多层前馈网络中后向传播的节点\nimport numpy as np\nfrom math import fabs, exp\nfrom random import randint\n\n\nclass Perception:\n '''\n 感知器算法\n '''\n\n def __init__(self, max_iter, TF, eps, min_error, EF):\n self.dataset = [] # 存储样本点的增广坐标,这里用列表初始化,但是在getData过后会转变成matrix类型\n self.label = [] # 以list形式存储\n self.w = [] # 存储判决方程的系数向量,w中的第一个元素是b,对应着,所有数据向量第一个位置都是1\n self.max_iter = max_iter # 最大迭代次数\n self.TF = TF # 节点的激活函数\n self.eps = eps # 学习速率\n self.min_error = min_error # 误差阈值,需要匹配激活函数\n self.EF = EF # 误差函数,实际上是通过梯度求出来的\n # print(\"欢迎���用感知器分类算法v0.9,只能处理二类分类问题,在data.txt中存放数据,用一个空行分割不同类,单行内的空格分割同一个样本的不同特征值\")\n pass\n\n def getData(self, file):\n #\n with open(file, \"r\") as f:\n raw_data = f.readlines()\n for line in raw_data:\n # 去掉结尾的'\\n'\n if line.endswith('\\n'):\n line = line[0:-1]\n\n # 在python3里面\n # map()的返回值已经不再是list,而是iterators\n # 所以想要使用,只用将iterator 转换成list 即可\n # 比如 list(map())\n if(line):\n line = list(map(lambda x: float(x), line.split()))\n data = [1] # 偏置量\n data.extend(line)\n self.dataset.append(data[:-1])\n self.label.append(data[-1])\n\n # 自动将连接权重赋值为零\n length = len(self.dataset[0])\n self.w = np.mat([0.0] * length).T\n\n self.dataset = np.mat(self.dataset)\n self.w = np.mat(self.w).T\n print(self.dataset)\n print(self.label)\n print(self.w)\n print()\n\n def changeW(self, w):\n self.w = np.mat(w).T\n\n def getW(self):\n return self.w\n\n # 需要改动\n def update(self, data, label):\n # 给出一组数据和该组数据的label,进行更新\n # 乘出来的结果是一个1行1列的矩阵,所以用[0,0]取其中元素\n y_hat = self.TF((data * self.w)[0, 0])\n error = self.EF(label, y_hat)\n '''\n print(self.w.T)\n print(data)\n print(\"yhat:\"+str(y_hat))\n print(\"label:\"+str(label))\n print(\"error:\"+str(error))\n '''\n if(fabs(error) > self.min_error):\n self.w += self.eps * error * data.T\n print(self.w.T)\n print()\n return True\n else:\n print()\n return False\n\n def compute(self):\n for i in range(self.max_iter):\n is_update = False\n\n for j in range(len(self.dataset)):\n is_update = self.update(self.dataset[j], self.label[j]) or is_update\n '''\n j=randint(0,3)\n is_update = self.update(self.dataset[j],self.label[j]) or is_update\n '''\n error = 0\n '''\n temp=0\n for i in self.w:\n temp += i[0,0] **2\n self.w = self.w/(temp**0.5)\n '''\n for j in range(len(self.dataset)):\n error += (self.dataset[j] * self.w - self.label[j]) ** 2\n print(\"***************%f**************\" % error)\n if(is_update == False):\n break\n\n\ndef hardlim(x):\n if(x > 0):\n return 1\n elif(x == 0):\n return 0\n else:\n return -1\n\n\ndef linear(x):\n return x\n\n\ndef sigmoid(x):\n return 1 / (1 + exp(-x))\n\n\ndef error1(label, y):\n return label - y\n\n\ndef sig_error(label, y):\n return y * (1 - y) * (label - y)\n\np = Perception(1,sigmoid,0.9,0,sig_error)\np.getData(\"data.txt\")\np.changeW([0.1,-0.3,-0.2])\np.compute()\n","sub_path":"perception-2.py","file_name":"perception-2.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"599199460","text":"from typing import Dict, List\n\nimport torch\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data import Field, DataArray, Token, Vocabulary\nfrom allennlp.data.vocabulary import DEFAULT_OOV_TOKEN\n\n\nclass CopyField(Field[torch.Tensor]):\n\n def __init__(self,\n source_tokens: List[Token]) -> None:\n self._source_tokens = source_tokens\n self._out: List[int] = []\n\n @staticmethod\n def generate_ids_out(vocab: Vocabulary, source_tokens: List[Token]):\n vocab_size = vocab.get_vocab_size()\n ids = {}\n out = []\n for token in source_tokens:\n text = token.text.lower()\n text_ids = vocab.get_token_index(text)\n if text_ids == vocab.get_token_index(DEFAULT_OOV_TOKEN):\n out.append(ids.setdefault(text, len(ids) + vocab_size))\n else:\n out.append(text_ids)\n return ids, out\n\n def index(self, vocab: Vocabulary):\n _, self._out = self.generate_ids_out(vocab, self._source_tokens)\n\n def get_padding_lengths(self) -> Dict[str, int]:\n return {\"num_tokens\": len(self._source_tokens)}\n\n def as_tensor(self, padding_lengths: Dict[str, int]) -> DataArray:\n desired_length = padding_lengths[\"num_tokens\"]\n padded_tokens = pad_sequence_to_length(self._out, desired_length)\n tensor = torch.LongTensor(padded_tokens)\n max_oov = tensor.max()\n return {\n 'ids': tensor,\n 'max_oov': max_oov\n }\n\n def batch_tensors(self, tensor_list: List[DataArray]) -> DataArray:\n ids = []\n max_oovs = []\n for t in tensor_list:\n ids.append(t['ids'])\n max_oovs.append(t['max_oov'])\n return {\n 'ids': torch.stack(ids),\n 'max_oov': torch.stack(max_oovs)\n }\n\n def empty_field(self) -> 'Field':\n return CopyField([])\n","sub_path":"pointer_generator_salience/reader/copy_field.py","file_name":"copy_field.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}
+{"seq_id":"379300260","text":"from selenium import webdriver\n# from selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import Select\n# from selenium.webdriver.support.ui import WebDriverWait\n# from selenium.webdriver.support import expected_conditions as EC\n\nimport pickle\nimport time\nimport random\n\nattempt = 0\nmax_attempt = 3\ndef powerball(n):\n group = []\n for a in range(n):\n drum1 = list(range(1, 70))\n drum2 = list(range(1, 27))\n balls = []\n for b in range(5):\n choice = random.SystemRandom().choice(drum1)\n drum1.pop(drum1.index(choice))\n balls.append(choice)\n balls.append(random.SystemRandom().choice(drum2))\n group.append(balls)\n return group\n\nmillion = powerball(3)\nopt = Options()\n# opt.add_argument('headless')\nopt.add_argument('window-size=1920,1080')\nopt.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36')\ndriver = webdriver.Chrome(chrome_options=opt)\ndriver.set_page_load_timeout(5)\ndriver.implicitly_wait(5)\n\nwhile attempt < max_attempt:\n try:\n try:\n driver.get(\"https://www.myillinoislottery.com/en-us/extendedplay/powerball.html\")\n except:\n pass\n try:\n usr = driver.find_element_by_id('userName')\n psw = driver.find_element_by_id('password')\n usr.send_keys(\"tongzeyun@gmail.com\")\n psw.send_keys(\"Make1decision\")\n psw.submit()\n except:\n pass\n success = driver.find_elements_by_css_selector(\"a[title='Welcome back,Zeyun']\")\n draws = Select(driver.find_element_by_id(\"numberOfGamePanels\"))\n draws.select_by_value(\"3\")\n red = driver.find_elements_by_css_selector(\"input[name='ball']\")\n for d in range(3):\n for i in range(5):\n box = driver.find_elements_by_id(\"inputTxt\" + str(i))\n box[d].send_keys(million[d][i])\n red[d].send_keys(million[d][5])\n add_to_cart = driver.find_element_by_id(\"addToCart\")\n add_to_cart.click()\n check_out = driver.find_element_by_css_selector(\".shopping-cart a#btnCheckOut\")\n check_out.click()\n driver.find_element_by_id(\"card0\").click()\n driver.find_element_by_id(\"ageVerify\").click()\n submit = driver.find_element_by_id(\"submitWager\")\n # submit.click()\n attempt = max_attempt\n except Exception as e:\n attempt += 1\n print(e)\n # time.sleep(300)\n\ntime.sleep(10)\ndriver.quit()\n# title = driver.find_element_by_css_selector(\"div.col-lg-12.appointment-sec.center > div.rows.text-center > h1\")\n# print(title.text)\n# # print('successfully submited')\n# # time.sleep(25)\n# # print(driver.get_window_size())\n# # try:\n# # \tbalance = driver.find_element_by_id('accountAvailableBalanceValue')\n# # \toutput = balance.text\n# # \tprint(output)\n# # \tpickle.dump( driver.get_cookies() , open('cookies.pkl','wb'))\n# # except:\n# # \tx=driver.page_source.encode('utf-8')\n# # \tprint(x)\n# except:\n# pass\n# finally:\n# pass\n# driver.quit()\n# agent = driver.execute_script(\"return navigator.userAgent\")\n# print(agent)\nexit()\n\n","sub_path":"powerball.py","file_name":"powerball.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"89"}